summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2017-05-18 23:54:47 -0700
committerOlof Johansson <olof@lixom.net>2017-05-18 23:54:47 -0700
commit5252d73756f318f182f2316acd78a6532041414d (patch)
treeb082478fca4f00f599bb2ed1547b2652c2bea155 /arch
parente84188852a7239d7a144af12f7e5dac8fa88600b (diff)
parent2ea659a9ef488125eb46da6eb571de5eae5c43f6 (diff)
Merge tag 'v4.12-rc1' into fixes
We've received a few fixes branches with -rc1 as base, but our contents was still at pre-rc1. Merge it in expliticly to make 'git merge --log' clear on hat was actually merged. Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/include/uapi/asm/Kbuild41
-rw-r--r--arch/alpha/lib/Makefile11
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/Makefile4
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi24
-rw-r--r--arch/arc/include/asm/cache.h6
-rw-r--r--arch/arc/include/asm/mmu.h4
-rw-r--r--arch/arc/include/asm/pgtable.h6
-rw-r--r--arch/arc/include/uapi/asm/Kbuild3
-rw-r--r--arch/arc/include/uapi/asm/elf.h1
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h5
-rw-r--r--arch/arc/kernel/ptrace.c62
-rw-r--r--arch/arc/mm/cache.c111
-rw-r--r--arch/arm/Makefile8
-rw-r--r--arch/arm/boot/dts/rk1108.dtsi2
-rw-r--r--arch/arm/include/uapi/asm/Kbuild17
-rw-r--r--arch/arm/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm/kernel/module.c11
-rw-r--r--arch/arm/kvm/Makefile7
-rw-r--r--arch/arm/kvm/arm.c1480
-rw-r--r--arch/arm/kvm/mmio.c217
-rw-r--r--arch/arm/kvm/mmu.c1975
-rw-r--r--arch/arm/kvm/perf.c68
-rw-r--r--arch/arm/kvm/psci.c332
-rw-r--r--arch/arm/kvm/trace.h247
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_dpllcore.c3
-rw-r--r--arch/arm/mach-omap2/clock.c35
-rw-r--r--arch/arm/mach-omap2/clock.h2
-rw-r--r--arch/arm/mach-omap2/cm.h5
-rw-r--r--arch/arm/mach-omap2/cm2xxx.c9
-rw-r--r--arch/arm/mach-omap2/cm3xxx.c10
-rw-r--r--arch/arm/mach-omap2/cm_common.c2
-rw-r--r--arch/arm/mm/dma-mapping.c9
-rw-r--r--arch/arm/plat-samsung/devs.c1
-rw-r--r--arch/arm64/Makefile6
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h9
-rw-r--r--arch/arm64/include/asm/atomic_lse.h4
-rw-r--r--arch/arm64/include/asm/barrier.h20
-rw-r--r--arch/arm64/include/asm/cmpxchg.h2
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h6
-rw-r--r--arch/arm64/include/asm/uaccess.h13
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild18
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c3
-rw-r--r--arch/arm64/kernel/entry.S5
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c3
-rw-r--r--arch/arm64/kernel/module.c7
-rw-r--r--arch/arm64/kernel/traps.c4
-rw-r--r--arch/arm64/kvm/Makefile5
-rw-r--r--arch/arm64/kvm/sys_regs.c8
-rw-r--r--arch/arm64/mm/dma-mapping.c143
-rw-r--r--arch/blackfin/include/uapi/asm/Kbuild17
-rw-r--r--arch/c6x/include/uapi/asm/Kbuild8
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig1
-rw-r--r--arch/cris/include/arch-v10/arch/Kbuild1
-rw-r--r--arch/cris/include/arch-v32/arch/Kbuild1
-rw-r--r--arch/cris/include/uapi/arch-v10/arch/Kbuild5
-rw-r--r--arch/cris/include/uapi/arch-v32/arch/Kbuild3
-rw-r--r--arch/cris/include/uapi/asm/Kbuild42
-rw-r--r--arch/frv/include/uapi/asm/Kbuild33
-rw-r--r--arch/frv/kernel/asm-offsets.c19
-rw-r--r--arch/h8300/include/uapi/asm/Kbuild28
-rw-r--r--arch/h8300/include/uapi/asm/bitsperlong.h (renamed from arch/h8300/include/asm/bitsperlong.h)6
-rw-r--r--arch/hexagon/include/asm/Kbuild3
-rw-r--r--arch/hexagon/include/uapi/asm/Kbuild13
-rw-r--r--arch/ia64/include/uapi/asm/Kbuild45
-rw-r--r--arch/ia64/kernel/Makefile26
-rw-r--r--arch/ia64/kernel/Makefile.gate2
-rw-r--r--arch/m32r/include/uapi/asm/Kbuild31
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild24
-rw-r--r--arch/metag/include/asm/uaccess.h58
-rw-r--r--arch/metag/include/uapi/asm/Kbuild8
-rw-r--r--arch/metag/lib/usercopy.c236
-rw-r--r--arch/metag/mm/mmu-meta1.c1
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild32
-rw-r--r--arch/mips/Kbuild2
-rw-r--r--arch/mips/Kconfig24
-rw-r--r--arch/mips/Kconfig.debug2
-rw-r--r--arch/mips/cavium-octeon/Kconfig9
-rw-r--r--arch/mips/cavium-octeon/Platform4
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c139
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c21
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c113
-rw-r--r--arch/mips/cavium-octeon/setup.c12
-rw-r--r--arch/mips/configs/generic_defconfig3
-rw-r--r--arch/mips/include/asm/cache.h5
-rw-r--r--arch/mips/include/asm/cpu-info.h3
-rw-r--r--arch/mips/include/asm/cpufeature.h26
-rw-r--r--arch/mips/include/asm/mach-rm/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c-defs.h3193
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c.h59
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2d-defs.h526
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2t-defs.h286
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pciercx-defs.h3225
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sli-defs.h3541
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h3
-rw-r--r--arch/mips/include/asm/pgalloc.h26
-rw-r--r--arch/mips/include/asm/pgtable-64.h88
-rw-r--r--arch/mips/include/asm/uasm.h88
-rw-r--r--arch/mips/include/uapi/asm/Kbuild37
-rw-r--r--arch/mips/kernel/cpu-probe.c7
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c16
-rw-r--r--arch/mips/kernel/process.c6
-rw-r--r--arch/mips/kernel/r4k_switch.S6
-rw-r--r--arch/mips/kernel/smp-cps.c7
-rw-r--r--arch/mips/kernel/smp-mt.c49
-rw-r--r--arch/mips/kernel/smp.c20
-rw-r--r--arch/mips/lantiq/irq.c52
-rw-r--r--arch/mips/math-emu/cp1emu.c10
-rw-r--r--arch/mips/mm/fault.c16
-rw-r--r--arch/mips/mm/init.c3
-rw-r--r--arch/mips/mm/pgtable-64.c33
-rw-r--r--arch/mips/mm/tlbex.c22
-rw-r--r--arch/mips/mm/uasm-mips.c1
-rw-r--r--arch/mips/mm/uasm.c159
-rw-r--r--arch/mips/mti-malta/malta-int.c83
-rw-r--r--arch/mips/net/bpf_jit.c41
-rw-r--r--arch/mips/net/bpf_jit_asm.S23
-rw-r--r--arch/mips/pci/pcie-octeon.c4
-rw-r--r--arch/mips/sibyte/bcm1480/setup.c1
-rw-r--r--arch/mips/sibyte/sb1250/setup.c1
-rw-r--r--arch/mn10300/include/uapi/asm/Kbuild32
-rw-r--r--arch/nios2/Kconfig2
-rw-r--r--arch/nios2/Kconfig.debug1
-rw-r--r--arch/nios2/Makefile5
-rw-r--r--arch/nios2/boot/.gitignore2
-rw-r--r--arch/nios2/boot/dts/10m50_devboard.dts3
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/asm/cacheflush.h6
-rw-r--r--arch/nios2/include/asm/cmpxchg.h14
-rw-r--r--arch/nios2/include/asm/cpuinfo.h2
-rw-r--r--arch/nios2/include/asm/prom.h22
-rw-r--r--arch/nios2/include/asm/setup.h2
-rw-r--r--arch/nios2/include/asm/uaccess.h7
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild4
-rw-r--r--arch/nios2/kernel/.gitignore1
-rw-r--r--arch/nios2/kernel/Makefile1
-rw-r--r--arch/nios2/kernel/cpuinfo.c18
-rw-r--r--arch/nios2/kernel/early_printk.c118
-rw-r--r--arch/nios2/kernel/irq.c2
-rw-r--r--arch/nios2/kernel/prom.c49
-rw-r--r--arch/nios2/kernel/setup.c6
-rw-r--r--arch/nios2/mm/uaccess.c33
-rw-r--r--arch/nios2/platform/Kconfig.platform26
-rw-r--r--arch/openrisc/include/asm/Kbuild3
-rw-r--r--arch/openrisc/include/uapi/asm/Kbuild8
-rw-r--r--arch/parisc/include/uapi/asm/Kbuild28
-rw-r--r--arch/powerpc/Kconfig17
-rw-r--r--arch/powerpc/Makefile.postlink2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h6
-rw-r--r--arch/powerpc/include/asm/cpm1.h2
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h6
-rw-r--r--arch/powerpc/include/asm/cputable.h2
-rw-r--r--arch/powerpc/include/asm/dt_cpu_ftrs.h26
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h28
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h74
-rw-r--r--arch/powerpc/include/asm/processor.h5
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/asm/xive.h9
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild45
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h7
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c10
-rw-r--r--arch/powerpc/kernel/cputable.c37
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c1031
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S12
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S4
-rw-r--r--arch/powerpc/kernel/prom.c29
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c10
-rw-r--r--arch/powerpc/kvm/Kconfig5
-rw-r--r--arch/powerpc/kvm/Makefile4
-rw-r--r--arch/powerpc/kvm/book3s.c75
-rw-r--r--arch/powerpc/kvm/book3s_hv.c51
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c103
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c10
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xive.c47
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S62
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c21
-rw-r--r--arch/powerpc/kvm/book3s_xics.c35
-rw-r--r--arch/powerpc/kvm/book3s_xics.h7
-rw-r--r--arch/powerpc/kvm/book3s_xive.c1894
-rw-r--r--arch/powerpc/kvm/book3s_xive.h256
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c503
-rw-r--r--arch/powerpc/kvm/irq.h1
-rw-r--r--arch/powerpc/kvm/powerpc.c17
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal.c1
-rw-r--r--arch/powerpc/sysdev/cpm1.c25
-rw-r--r--arch/powerpc/sysdev/xive/common.c142
-rw-r--r--arch/powerpc/sysdev/xive/native.c86
-rw-r--r--arch/s390/include/uapi/asm/Kbuild46
-rw-r--r--arch/score/include/asm/Kbuild3
-rw-r--r--arch/score/include/uapi/asm/Kbuild32
-rw-r--r--arch/sh/Makefile7
-rw-r--r--arch/sh/include/uapi/asm/Kbuild23
-rw-r--r--arch/sparc/include/uapi/asm/Kbuild48
-rw-r--r--arch/sparc/kernel/head_64.S6
-rw-r--r--arch/sparc/kernel/led.c13
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/kernel/setup_64.c2
-rw-r--r--arch/sparc/lib/GENbzero.S2
-rw-r--r--arch/sparc/lib/NGbzero.S2
-rw-r--r--arch/tile/include/arch/Kbuild1
-rw-r--r--arch/tile/include/asm/Kbuild3
-rw-r--r--arch/tile/include/uapi/arch/Kbuild17
-rw-r--r--arch/tile/include/uapi/asm/Kbuild17
-rw-r--r--arch/um/Kconfig.common5
-rw-r--r--arch/um/kernel/initrd.c4
-rw-r--r--arch/um/kernel/sysrq.c6
-rw-r--r--arch/um/kernel/um_arch.c6
-rw-r--r--arch/um/os-Linux/skas/process.c4
-rw-r--r--arch/unicore32/Makefile4
-rw-r--r--arch/unicore32/include/uapi/asm/Kbuild6
-rw-r--r--arch/x86/Makefile3
-rw-r--r--arch/x86/boot/compressed/error.h4
-rw-r--r--arch/x86/boot/compressed/pagetable.c2
-rw-r--r--arch/x86/events/intel/rapl.c2
-rw-r--r--arch/x86/include/asm/asm.h1
-rw-r--r--arch/x86/include/asm/init.h3
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/pmem.h2
-rw-r--r--arch/x86/include/uapi/asm/Kbuild59
-rw-r--r--arch/x86/kernel/cpu/amd.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c2
-rw-r--r--arch/x86/kernel/i8259.c1
-rw-r--r--arch/x86/kernel/machine_kexec_64.c6
-rw-r--r--arch/x86/kernel/setup.c15
-rw-r--r--arch/x86/kernel/setup_percpu.c10
-rw-r--r--arch/x86/kernel/tboot.c3
-rw-r--r--arch/x86/kvm/mmu.c15
-rw-r--r--arch/x86/kvm/mmu.h1
-rw-r--r--arch/x86/kvm/paging_tmpl.h4
-rw-r--r--arch/x86/kvm/vmx.c105
-rw-r--r--arch/x86/lib/csum-copy_64.S12
-rw-r--r--arch/x86/lib/kaslr.c3
-rw-r--r--arch/x86/mm/ident_map.c14
-rw-r--r--arch/x86/mm/init_64.c12
-rw-r--r--arch/x86/mm/numa_32.c1
-rw-r--r--arch/x86/mm/testmmiotrace.c2
-rw-r--r--arch/x86/power/hibernate_64.c2
-rw-r--r--arch/x86/um/ptrace_64.c2
-rw-r--r--arch/x86/um/shared/sysdep/kernel-offsets.h9
-rw-r--r--arch/x86/xen/enlighten_pv.c42
-rw-r--r--arch/x86/xen/mmu_pv.c7
-rw-r--r--arch/x86/xen/time.c2
-rw-r--r--arch/xtensa/include/uapi/asm/Kbuild23
251 files changed, 6457 insertions, 16926 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index dcbd462b68b1..6c00e5b00f8b 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -324,6 +324,9 @@ config HAVE_CMPXCHG_LOCAL
config HAVE_CMPXCHG_DOUBLE
bool
+config ARCH_WEAK_RELEASE_ACQUIRE
+ bool
+
config ARCH_WANT_IPC_PARSE_VERSION
bool
diff --git a/arch/alpha/include/uapi/asm/Kbuild b/arch/alpha/include/uapi/asm/Kbuild
index d96f2ef5b639..b15bf6bc0e94 100644
--- a/arch/alpha/include/uapi/asm/Kbuild
+++ b/arch/alpha/include/uapi/asm/Kbuild
@@ -1,43 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += a.out.h
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += compiler.h
-header-y += console.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += fpu.h
-header-y += gentrap.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += pal.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += reg.h
-header-y += regdef.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += sysinfo.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 59660743237c..7083434dd241 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -46,11 +46,6 @@ AFLAGS___remqu.o = -DREM
AFLAGS___divlu.o = -DDIV -DINTSIZE
AFLAGS___remlu.o = -DREM -DINTSIZE
-$(obj)/__divqu.o: $(obj)/$(ev6-y)divide.S
- $(cmd_as_o_S)
-$(obj)/__remqu.o: $(obj)/$(ev6-y)divide.S
- $(cmd_as_o_S)
-$(obj)/__divlu.o: $(obj)/$(ev6-y)divide.S
- $(cmd_as_o_S)
-$(obj)/__remlu.o: $(obj)/$(ev6-y)divide.S
- $(cmd_as_o_S)
+$(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \
+ $(src)/$(ev6-y)divide.S FORCE
+ $(call if_changed_rule,as_o_S)
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 5d7fb3e7cb97..a5459698f0ee 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -444,6 +444,7 @@ config ARC_HAS_PAE40
bool "Support for the 40-bit Physical Address Extension"
default n
depends on ISA_ARCV2
+ select HIGHMEM
help
Enable access to physical memory beyond 4G, only supported on
ARC cores with 40 bit Physical Addressing support
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 19cce226d1a8..44ef35d33956 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -123,9 +123,9 @@ libs-y += arch/arc/lib/ $(LIBGCC)
boot := arch/arc/boot
#default target for make without any arguments.
-KBUILD_IMAGE := bootpImage
+KBUILD_IMAGE := $(boot)/bootpImage
-all: $(KBUILD_IMAGE)
+all: bootpImage
bootpImage: vmlinux
boot_targets += uImage uImage.bin uImage.gz
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index d6c1bbc98ac3..41cfb29b62c1 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -51,7 +51,7 @@
pguclk: pguclk {
#clock-cells = <0>;
compatible = "fixed-clock";
- clock-frequency = <74440000>;
+ clock-frequency = <74250000>;
};
};
@@ -149,12 +149,13 @@
interrupts = <14>;
};
- i2c@0x1e000 {
- compatible = "snps,designware-i2c";
+ i2s: i2s@1e000 {
+ compatible = "snps,designware-i2s";
reg = <0x1e000 0x100>;
- clock-frequency = <400000>;
- clocks = <&i2cclk>;
+ clocks = <&i2sclk 0>;
+ clock-names = "i2sclk";
interrupts = <15>;
+ #sound-dai-cells = <0>;
};
i2c@0x1f000 {
@@ -174,6 +175,7 @@
adi,input-colorspace = "rgb";
adi,input-clock = "1x";
adi,clock-delay = <0x03>;
+ #sound-dai-cells = <0>;
ports {
#address-cells = <1>;
@@ -295,5 +297,17 @@
};
};
};
+
+ sound_playback {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "AXS10x HDMI Audio";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,cpu {
+ sound-dai = <&i2s>;
+ };
+ simple-audio-card,codec {
+ sound-dai = <&adv7511>;
+ };
+ };
};
};
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 5008021fba98..19ebddffb279 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -62,6 +62,8 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
#define ARC_REG_IC_IVIC 0x10
#define ARC_REG_IC_CTRL 0x11
+#define ARC_REG_IC_IVIR 0x16
+#define ARC_REG_IC_ENDR 0x17
#define ARC_REG_IC_IVIL 0x19
#define ARC_REG_IC_PTAG 0x1E
#define ARC_REG_IC_PTAG_HI 0x1F
@@ -76,6 +78,8 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_DC_IVDL 0x4A
#define ARC_REG_DC_FLSH 0x4B
#define ARC_REG_DC_FLDL 0x4C
+#define ARC_REG_DC_STARTR 0x4D
+#define ARC_REG_DC_ENDR 0x4E
#define ARC_REG_DC_PTAG 0x5C
#define ARC_REG_DC_PTAG_HI 0x5F
@@ -83,6 +87,8 @@ extern unsigned long perip_base, perip_end;
#define DC_CTRL_DIS 0x001
#define DC_CTRL_INV_MODE_FLUSH 0x040
#define DC_CTRL_FLUSH_STATUS 0x100
+#define DC_CTRL_RGN_OP_INV 0x200
+#define DC_CTRL_RGN_OP_MSK 0x200
/*System-level cache (L2 cache) related Auxiliary registers */
#define ARC_REG_SLC_CFG 0x901
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index b144d7ca7d20..db7319e9b506 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -9,6 +9,10 @@
#ifndef _ASM_ARC_MMU_H
#define _ASM_ARC_MMU_H
+#ifndef __ASSEMBLY__
+#include <linux/threads.h> /* NR_CPUS */
+#endif
+
#if defined(CONFIG_ARC_MMU_V1)
#define CONFIG_ARC_MMU_VER 1
#elif defined(CONFIG_ARC_MMU_V2)
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index ee22d40afef4..08fe33830d4b 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -35,11 +35,11 @@
#ifndef _ASM_ARC_PGTABLE_H
#define _ASM_ARC_PGTABLE_H
-#include <asm/page.h>
-#include <asm/mmu.h>
+#include <linux/const.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
-#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/mmu.h> /* to propagate CONFIG_ARC_MMU_VER <n> */
/**************************************************************************
* Page Table Flags
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
index f50d02df78d5..b15bf6bc0e94 100644
--- a/arch/arc/include/uapi/asm/Kbuild
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -1,5 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += elf.h
-header-y += page.h
-header-y += cachectl.h
diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h
index 0037a587320d..06d95e611616 100644
--- a/arch/arc/include/uapi/asm/elf.h
+++ b/arch/arc/include/uapi/asm/elf.h
@@ -27,6 +27,7 @@ typedef unsigned long elf_greg_t;
typedef unsigned long elf_fpregset_t;
#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+#define ELF_ARCV2REG (sizeof(struct user_regs_arcv2) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 0b3ef63d4a03..dd206e6b482c 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -47,6 +47,11 @@ struct user_regs_struct {
unsigned long efa; /* break pt addr, for break points in delay slots */
unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
};
+
+struct user_regs_arcv2 {
+ unsigned long r30, r58, r59;
+};
+
#endif /* !__ASSEMBLY__ */
#endif /* _UAPI__ASM_ARC_PTRACE_H */
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index 31150060d38b..5ee4676f135d 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -184,19 +184,75 @@ static int genregs_set(struct task_struct *target,
return ret;
}
+#ifdef CONFIG_ISA_ARCV2
+static int arcv2regs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ int ret, copy_sz;
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS))
+ copy_sz = sizeof(struct user_regs_arcv2);
+ else
+ copy_sz = 4; /* r30 only */
+
+ /*
+ * itemized copy not needed like above as layout of regs (r30,r58,r59)
+ * is exactly same in kernel (pt_regs) and userspace (user_regs_arcv2)
+ */
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &regs->r30,
+ 0, copy_sz);
+
+ return ret;
+}
+
+static int arcv2regs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ int ret, copy_sz;
+
+ if (IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS))
+ copy_sz = sizeof(struct user_regs_arcv2);
+ else
+ copy_sz = 4; /* r30 only */
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, (void *)&regs->r30,
+ 0, copy_sz);
+
+ return ret;
+}
+
+#endif
+
enum arc_getset {
- REGSET_GENERAL,
+ REGSET_CMN,
+ REGSET_ARCV2,
};
static const struct user_regset arc_regsets[] = {
- [REGSET_GENERAL] = {
+ [REGSET_CMN] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
.get = genregs_get,
.set = genregs_set,
- }
+ },
+#ifdef CONFIG_ISA_ARCV2
+ [REGSET_ARCV2] = {
+ .core_note_type = NT_ARC_V2,
+ .n = ELF_ARCV2REG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .get = arcv2regs_get,
+ .set = arcv2regs_set,
+ },
+#endif
};
static const struct user_regset_view user_arc_view = {
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 928562967f3c..a867575a758b 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -21,6 +21,10 @@
#include <asm/cachectl.h>
#include <asm/setup.h>
+#ifdef CONFIG_ISA_ARCV2
+#define USE_RGN_FLSH 1
+#endif
+
static int l2_line_sz;
static int ioc_exists;
int slc_enable = 1, ioc_enable = 1;
@@ -28,7 +32,7 @@ unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
- unsigned long sz, const int cacheop);
+ unsigned long sz, const int op, const int full_page);
void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
@@ -233,11 +237,10 @@ slc_chk:
static inline
void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
- unsigned long sz, const int op)
+ unsigned long sz, const int op, const int full_page)
{
unsigned int aux_cmd;
int num_lines;
- const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
if (op == OP_INV_IC) {
aux_cmd = ARC_REG_IC_IVIL;
@@ -279,11 +282,10 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
*/
static inline
void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
- unsigned long sz, const int op)
+ unsigned long sz, const int op, const int full_page)
{
unsigned int aux_cmd, aux_tag;
int num_lines;
- const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
if (op == OP_INV_IC) {
aux_cmd = ARC_REG_IC_IVIL;
@@ -334,6 +336,8 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
}
}
+#ifndef USE_RGN_FLSH
+
/*
* In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
* Here's how cache ops are implemented
@@ -349,17 +353,16 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
*/
static inline
void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
- unsigned long sz, const int cacheop)
+ unsigned long sz, const int op, const int full_page)
{
unsigned int aux_cmd;
int num_lines;
- const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
- if (cacheop == OP_INV_IC) {
+ if (op == OP_INV_IC) {
aux_cmd = ARC_REG_IC_IVIL;
} else {
/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
- aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
+ aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
}
/* Ensure we properly floor/ceil the non-line aligned/sized requests
@@ -368,7 +371,7 @@ void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
* -@paddr will be cache-line aligned already (being page aligned)
* -@sz will be integral multiple of line size (being page sized).
*/
- if (!full_page_op) {
+ if (!full_page) {
sz += paddr & ~CACHE_LINE_MASK;
paddr &= CACHE_LINE_MASK;
}
@@ -381,7 +384,7 @@ void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
* - (and needs to be written before the lower 32 bits)
*/
if (is_pae40_enabled()) {
- if (cacheop == OP_INV_IC)
+ if (op == OP_INV_IC)
/*
* Non aliasing I-cache in HS38,
* aliasing I-cache handled in __cache_line_loop_v3()
@@ -397,6 +400,55 @@ void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
}
}
+#else
+
+/*
+ * optimized flush operation which takes a region as opposed to iterating per line
+ */
+static inline
+void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
+ unsigned long sz, const int op, const int full_page)
+{
+ unsigned int s, e;
+
+ /* Only for Non aliasing I-cache in HS38 */
+ if (op == OP_INV_IC) {
+ s = ARC_REG_IC_IVIR;
+ e = ARC_REG_IC_ENDR;
+ } else {
+ s = ARC_REG_DC_STARTR;
+ e = ARC_REG_DC_ENDR;
+ }
+
+ if (!full_page) {
+ /* for any leading gap between @paddr and start of cache line */
+ sz += paddr & ~CACHE_LINE_MASK;
+ paddr &= CACHE_LINE_MASK;
+
+ /*
+ * account for any trailing gap to end of cache line
+ * this is equivalent to DIV_ROUND_UP() in line ops above
+ */
+ sz += L1_CACHE_BYTES - 1;
+ }
+
+ if (is_pae40_enabled()) {
+ /* TBD: check if crossing 4TB boundary */
+ if (op == OP_INV_IC)
+ write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
+ else
+ write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
+ }
+
+ /* ENDR needs to be set ahead of START */
+ write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
+ write_aux_reg(s, paddr);
+
+ /* caller waits on DC_CTRL.FS */
+}
+
+#endif
+
#if (CONFIG_ARC_MMU_VER < 3)
#define __cache_line_loop __cache_line_loop_v2
#elif (CONFIG_ARC_MMU_VER == 3)
@@ -411,6 +463,11 @@ void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
* Machine specific helpers for Entire D-Cache or Per Line ops
*/
+#ifndef USE_RGN_FLSH
+/*
+ * this version avoids extra read/write of DC_CTRL for flush or invalid ops
+ * in the non region flush regime (such as for ARCompact)
+ */
static inline void __before_dc_op(const int op)
{
if (op == OP_FLUSH_N_INV) {
@@ -424,6 +481,32 @@ static inline void __before_dc_op(const int op)
}
}
+#else
+
+static inline void __before_dc_op(const int op)
+{
+ const unsigned int ctl = ARC_REG_DC_CTRL;
+ unsigned int val = read_aux_reg(ctl);
+
+ if (op == OP_FLUSH_N_INV) {
+ val |= DC_CTRL_INV_MODE_FLUSH;
+ }
+
+ if (op != OP_INV_IC) {
+ /*
+ * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
+ * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
+ */
+ val &= ~DC_CTRL_RGN_OP_MSK;
+ if (op & OP_INV)
+ val |= DC_CTRL_RGN_OP_INV;
+ }
+ write_aux_reg(ctl, val);
+}
+
+#endif
+
+
static inline void __after_dc_op(const int op)
{
if (op & OP_FLUSH) {
@@ -486,13 +569,14 @@ static void __dc_enable(void)
static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op)
{
+ const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
unsigned long flags;
local_irq_save(flags);
__before_dc_op(op);
- __cache_line_loop(paddr, vaddr, sz, op);
+ __cache_line_loop(paddr, vaddr, sz, op, full_page);
__after_dc_op(op);
@@ -521,10 +605,11 @@ static inline void
__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz)
{
+ const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
unsigned long flags;
local_irq_save(flags);
- (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
+ (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
local_irq_restore(flags);
}
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index ab30cc634d02..65f4e2a4eb94 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -297,10 +297,11 @@ drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
libs-y := arch/arm/lib/ $(libs-y)
# Default target when executing plain make
+boot := arch/arm/boot
ifeq ($(CONFIG_XIP_KERNEL),y)
-KBUILD_IMAGE := xipImage
+KBUILD_IMAGE := $(boot)/xipImage
else
-KBUILD_IMAGE := zImage
+KBUILD_IMAGE := $(boot)/zImage
endif
# Build the DT binary blobs if we have OF configured
@@ -308,9 +309,8 @@ ifeq ($(CONFIG_USE_OF),y)
KBUILD_DTBS := dtbs
endif
-all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
+all: $(notdir $(KBUILD_IMAGE)) $(KBUILD_DTBS)
-boot := arch/arm/boot
archheaders:
$(Q)$(MAKE) $(build)=arch/arm/tools uapi
diff --git a/arch/arm/boot/dts/rk1108.dtsi b/arch/arm/boot/dts/rk1108.dtsi
index 6c8fc19d0ecd..1297924db6ad 100644
--- a/arch/arm/boot/dts/rk1108.dtsi
+++ b/arch/arm/boot/dts/rk1108.dtsi
@@ -41,7 +41,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/clock/rk1108-cru.h>
+#include <dt-bindings/clock/rv1108-cru.h>
#include <dt-bindings/pinctrl/rockchip.h>
/ {
#address-cells = <1>;
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 46a76cd6acb6..607f702c2d62 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,23 +1,6 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += auxvec.h
-header-y += byteorder.h
-header-y += fcntl.h
-header-y += hwcap.h
-header-y += ioctls.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += perf_regs.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += signal.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += unistd.h
genhdr-y += unistd-common.h
genhdr-y += unistd-oabi.h
genhdr-y += unistd-eabi.h
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index a88726359e5f..5e3c673fa3f4 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -196,13 +196,17 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
+#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
#define VGIC_LEVEL_INFO_LINE_LEVEL 0
-#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
+#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
+#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 80254b47dc34..3ff571c2c71c 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -40,8 +40,15 @@
#ifdef CONFIG_MMU
void *module_alloc(unsigned long size)
{
- void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ gfp_t gfp_mask = GFP_KERNEL;
+ void *p;
+
+ /* Silence the initial allocation */
+ if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS))
+ gfp_mask |= __GFP_NOWARN;
+
+ p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+ gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
return p;
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 7b3670c2ae7b..d9beee652d36 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -18,9 +18,12 @@ KVM := ../../../virt/kvm
kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp/
+
obj-y += kvm-arm.o init.o interrupts.o
-obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
-obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o vgic-v3-coproc.o
+obj-y += handle_exit.o guest.o emulate.o reset.o
+obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o
+obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
+obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
obj-y += $(KVM)/arm/aarch32.o
obj-y += $(KVM)/arm/vgic/vgic.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
deleted file mode 100644
index 8a31906bdc9b..000000000000
--- a/arch/arm/kvm/arm.c
+++ /dev/null
@@ -1,1480 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/cpu_pm.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/kvm_host.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/mman.h>
-#include <linux/sched.h>
-#include <linux/kvm.h>
-#include <trace/events/kvm.h>
-#include <kvm/arm_pmu.h>
-
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
-#include <linux/uaccess.h>
-#include <asm/ptrace.h>
-#include <asm/mman.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
-#include <asm/virt.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmu.h>
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_coproc.h>
-#include <asm/kvm_psci.h>
-#include <asm/sections.h>
-
-#ifdef REQUIRES_VIRT
-__asm__(".arch_extension virt");
-#endif
-
-static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
-
-/* Per-CPU variable containing the currently running vcpu. */
-static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
-
-/* The VMID used in the VTTBR */
-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
-static u32 kvm_next_vmid;
-static unsigned int kvm_vmid_bits __read_mostly;
-static DEFINE_SPINLOCK(kvm_vmid_lock);
-
-static bool vgic_present;
-
-static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
-
-static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
-{
- BUG_ON(preemptible());
- __this_cpu_write(kvm_arm_running_vcpu, vcpu);
-}
-
-/**
- * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
- * Must be called from non-preemptible context
- */
-struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
-{
- BUG_ON(preemptible());
- return __this_cpu_read(kvm_arm_running_vcpu);
-}
-
-/**
- * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
- */
-struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
-{
- return &kvm_arm_running_vcpu;
-}
-
-int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
-}
-
-int kvm_arch_hardware_setup(void)
-{
- return 0;
-}
-
-void kvm_arch_check_processor_compat(void *rtn)
-{
- *(int *)rtn = 0;
-}
-
-
-/**
- * kvm_arch_init_vm - initializes a VM data structure
- * @kvm: pointer to the KVM struct
- */
-int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
-{
- int ret, cpu;
-
- if (type)
- return -EINVAL;
-
- kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
- if (!kvm->arch.last_vcpu_ran)
- return -ENOMEM;
-
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
-
- ret = kvm_alloc_stage2_pgd(kvm);
- if (ret)
- goto out_fail_alloc;
-
- ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
- if (ret)
- goto out_free_stage2_pgd;
-
- kvm_vgic_early_init(kvm);
-
- /* Mark the initial VMID generation invalid */
- kvm->arch.vmid_gen = 0;
-
- /* The maximum number of VCPUs is limited by the host's GIC model */
- kvm->arch.max_vcpus = vgic_present ?
- kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
-
- return ret;
-out_free_stage2_pgd:
- kvm_free_stage2_pgd(kvm);
-out_fail_alloc:
- free_percpu(kvm->arch.last_vcpu_ran);
- kvm->arch.last_vcpu_ran = NULL;
- return ret;
-}
-
-bool kvm_arch_has_vcpu_debugfs(void)
-{
- return false;
-}
-
-int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
-{
- return VM_FAULT_SIGBUS;
-}
-
-
-/**
- * kvm_arch_destroy_vm - destroy the VM data structure
- * @kvm: pointer to the KVM struct
- */
-void kvm_arch_destroy_vm(struct kvm *kvm)
-{
- int i;
-
- free_percpu(kvm->arch.last_vcpu_ran);
- kvm->arch.last_vcpu_ran = NULL;
-
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_arch_vcpu_free(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
-
- kvm_vgic_destroy(kvm);
-}
-
-int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
-{
- int r;
- switch (ext) {
- case KVM_CAP_IRQCHIP:
- r = vgic_present;
- break;
- case KVM_CAP_IOEVENTFD:
- case KVM_CAP_DEVICE_CTRL:
- case KVM_CAP_USER_MEMORY:
- case KVM_CAP_SYNC_MMU:
- case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
- case KVM_CAP_ONE_REG:
- case KVM_CAP_ARM_PSCI:
- case KVM_CAP_ARM_PSCI_0_2:
- case KVM_CAP_READONLY_MEM:
- case KVM_CAP_MP_STATE:
- case KVM_CAP_IMMEDIATE_EXIT:
- r = 1;
- break;
- case KVM_CAP_ARM_SET_DEVICE_ADDR:
- r = 1;
- break;
- case KVM_CAP_NR_VCPUS:
- r = num_online_cpus();
- break;
- case KVM_CAP_MAX_VCPUS:
- r = KVM_MAX_VCPUS;
- break;
- case KVM_CAP_NR_MEMSLOTS:
- r = KVM_USER_MEM_SLOTS;
- break;
- case KVM_CAP_MSI_DEVID:
- if (!kvm)
- r = -EINVAL;
- else
- r = kvm->arch.vgic.msis_require_devid;
- break;
- case KVM_CAP_ARM_USER_IRQ:
- /*
- * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
- * (bump this number if adding more devices)
- */
- r = 1;
- break;
- default:
- r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
- break;
- }
- return r;
-}
-
-long kvm_arch_dev_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- return -EINVAL;
-}
-
-
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
-{
- int err;
- struct kvm_vcpu *vcpu;
-
- if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
- err = -EBUSY;
- goto out;
- }
-
- if (id >= kvm->arch.max_vcpus) {
- err = -EINVAL;
- goto out;
- }
-
- vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
- if (!vcpu) {
- err = -ENOMEM;
- goto out;
- }
-
- err = kvm_vcpu_init(vcpu, kvm, id);
- if (err)
- goto free_vcpu;
-
- err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
- if (err)
- goto vcpu_uninit;
-
- return vcpu;
-vcpu_uninit:
- kvm_vcpu_uninit(vcpu);
-free_vcpu:
- kmem_cache_free(kvm_vcpu_cache, vcpu);
-out:
- return ERR_PTR(err);
-}
-
-void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
-{
- kvm_vgic_vcpu_early_init(vcpu);
-}
-
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
-{
- kvm_mmu_free_memory_caches(vcpu);
- kvm_timer_vcpu_terminate(vcpu);
- kvm_vgic_vcpu_destroy(vcpu);
- kvm_pmu_vcpu_destroy(vcpu);
- kvm_vcpu_uninit(vcpu);
- kmem_cache_free(kvm_vcpu_cache, vcpu);
-}
-
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
- kvm_arch_vcpu_free(vcpu);
-}
-
-int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
-{
- return kvm_timer_should_fire(vcpu_vtimer(vcpu)) ||
- kvm_timer_should_fire(vcpu_ptimer(vcpu));
-}
-
-void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
-{
- kvm_timer_schedule(vcpu);
-}
-
-void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
-{
- kvm_timer_unschedule(vcpu);
-}
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
- /* Force users to call KVM_ARM_VCPU_INIT */
- vcpu->arch.target = -1;
- bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
-
- /* Set up the timer */
- kvm_timer_vcpu_init(vcpu);
-
- kvm_arm_reset_debug_ptr(vcpu);
-
- return 0;
-}
-
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
- int *last_ran;
-
- last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
-
- /*
- * We might get preempted before the vCPU actually runs, but
- * over-invalidation doesn't affect correctness.
- */
- if (*last_ran != vcpu->vcpu_id) {
- kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
- *last_ran = vcpu->vcpu_id;
- }
-
- vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
-
- kvm_arm_set_running_vcpu(vcpu);
-
- kvm_vgic_load(vcpu);
-}
-
-void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
-{
- kvm_vgic_put(vcpu);
-
- vcpu->cpu = -1;
-
- kvm_arm_set_running_vcpu(NULL);
- kvm_timer_vcpu_put(vcpu);
-}
-
-int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
- struct kvm_mp_state *mp_state)
-{
- if (vcpu->arch.power_off)
- mp_state->mp_state = KVM_MP_STATE_STOPPED;
- else
- mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
-
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
- struct kvm_mp_state *mp_state)
-{
- switch (mp_state->mp_state) {
- case KVM_MP_STATE_RUNNABLE:
- vcpu->arch.power_off = false;
- break;
- case KVM_MP_STATE_STOPPED:
- vcpu->arch.power_off = true;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
- * @v: The VCPU pointer
- *
- * If the guest CPU is not waiting for interrupts or an interrupt line is
- * asserted, the CPU is by definition runnable.
- */
-int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
-{
- return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v))
- && !v->arch.power_off && !v->arch.pause);
-}
-
-/* Just ensure a guest exit from a particular CPU */
-static void exit_vm_noop(void *info)
-{
-}
-
-void force_vm_exit(const cpumask_t *mask)
-{
- preempt_disable();
- smp_call_function_many(mask, exit_vm_noop, NULL, true);
- preempt_enable();
-}
-
-/**
- * need_new_vmid_gen - check that the VMID is still valid
- * @kvm: The VM's VMID to check
- *
- * return true if there is a new generation of VMIDs being used
- *
- * The hardware supports only 256 values with the value zero reserved for the
- * host, so we check if an assigned value belongs to a previous generation,
- * which which requires us to assign a new value. If we're the first to use a
- * VMID for the new generation, we must flush necessary caches and TLBs on all
- * CPUs.
- */
-static bool need_new_vmid_gen(struct kvm *kvm)
-{
- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
-}
-
-/**
- * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
- * @kvm The guest that we are about to run
- *
- * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
- * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
- * caches and TLBs.
- */
-static void update_vttbr(struct kvm *kvm)
-{
- phys_addr_t pgd_phys;
- u64 vmid;
-
- if (!need_new_vmid_gen(kvm))
- return;
-
- spin_lock(&kvm_vmid_lock);
-
- /*
- * We need to re-check the vmid_gen here to ensure that if another vcpu
- * already allocated a valid vmid for this vm, then this vcpu should
- * use the same vmid.
- */
- if (!need_new_vmid_gen(kvm)) {
- spin_unlock(&kvm_vmid_lock);
- return;
- }
-
- /* First user of a new VMID generation? */
- if (unlikely(kvm_next_vmid == 0)) {
- atomic64_inc(&kvm_vmid_gen);
- kvm_next_vmid = 1;
-
- /*
- * On SMP we know no other CPUs can use this CPU's or each
- * other's VMID after force_vm_exit returns since the
- * kvm_vmid_lock blocks them from reentry to the guest.
- */
- force_vm_exit(cpu_all_mask);
- /*
- * Now broadcast TLB + ICACHE invalidation over the inner
- * shareable domain to make sure all data structures are
- * clean.
- */
- kvm_call_hyp(__kvm_flush_vm_context);
- }
-
- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
- kvm->arch.vmid = kvm_next_vmid;
- kvm_next_vmid++;
- kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
-
- /* update vttbr to be used with the new vmid */
- pgd_phys = virt_to_phys(kvm->arch.pgd);
- BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
- vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
- kvm->arch.vttbr = pgd_phys | vmid;
-
- spin_unlock(&kvm_vmid_lock);
-}
-
-static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = vcpu->kvm;
- int ret = 0;
-
- if (likely(vcpu->arch.has_run_once))
- return 0;
-
- vcpu->arch.has_run_once = true;
-
- /*
- * Map the VGIC hardware resources before running a vcpu the first
- * time on this VM.
- */
- if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
- ret = kvm_vgic_map_resources(kvm);
- if (ret)
- return ret;
- }
-
- ret = kvm_timer_enable(vcpu);
-
- return ret;
-}
-
-bool kvm_arch_intc_initialized(struct kvm *kvm)
-{
- return vgic_initialized(kvm);
-}
-
-void kvm_arm_halt_guest(struct kvm *kvm)
-{
- int i;
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm)
- vcpu->arch.pause = true;
- kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
-}
-
-void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.pause = true;
- kvm_vcpu_kick(vcpu);
-}
-
-void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
-{
- struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
-
- vcpu->arch.pause = false;
- swake_up(wq);
-}
-
-void kvm_arm_resume_guest(struct kvm *kvm)
-{
- int i;
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_arm_resume_vcpu(vcpu);
-}
-
-static void vcpu_sleep(struct kvm_vcpu *vcpu)
-{
- struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
-
- swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
- (!vcpu->arch.pause)));
-}
-
-static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.target >= 0;
-}
-
-/**
- * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
- * @vcpu: The VCPU pointer
- * @run: The kvm_run structure pointer used for userspace state exchange
- *
- * This function is called through the VCPU_RUN ioctl called from user space. It
- * will execute VM code in a loop until the time slice for the process is used
- * or some emulation is needed from user space in which case the function will
- * return with return value 0 and with the kvm_run structure filled in with the
- * required data for the requested emulation.
- */
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- int ret;
- sigset_t sigsaved;
-
- if (unlikely(!kvm_vcpu_initialized(vcpu)))
- return -ENOEXEC;
-
- ret = kvm_vcpu_first_run_init(vcpu);
- if (ret)
- return ret;
-
- if (run->exit_reason == KVM_EXIT_MMIO) {
- ret = kvm_handle_mmio_return(vcpu, vcpu->run);
- if (ret)
- return ret;
- }
-
- if (run->immediate_exit)
- return -EINTR;
-
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
-
- ret = 1;
- run->exit_reason = KVM_EXIT_UNKNOWN;
- while (ret > 0) {
- /*
- * Check conditions before entering the guest
- */
- cond_resched();
-
- update_vttbr(vcpu->kvm);
-
- if (vcpu->arch.power_off || vcpu->arch.pause)
- vcpu_sleep(vcpu);
-
- /*
- * Preparing the interrupts to be injected also
- * involves poking the GIC, which must be done in a
- * non-preemptible context.
- */
- preempt_disable();
-
- kvm_pmu_flush_hwstate(vcpu);
-
- kvm_timer_flush_hwstate(vcpu);
- kvm_vgic_flush_hwstate(vcpu);
-
- local_irq_disable();
-
- /*
- * If we have a singal pending, or need to notify a userspace
- * irqchip about timer or PMU level changes, then we exit (and
- * update the timer level state in kvm_timer_update_run
- * below).
- */
- if (signal_pending(current) ||
- kvm_timer_should_notify_user(vcpu) ||
- kvm_pmu_should_notify_user(vcpu)) {
- ret = -EINTR;
- run->exit_reason = KVM_EXIT_INTR;
- }
-
- if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
- vcpu->arch.power_off || vcpu->arch.pause) {
- local_irq_enable();
- kvm_pmu_sync_hwstate(vcpu);
- kvm_timer_sync_hwstate(vcpu);
- kvm_vgic_sync_hwstate(vcpu);
- preempt_enable();
- continue;
- }
-
- kvm_arm_setup_debug(vcpu);
-
- /**************************************************************
- * Enter the guest
- */
- trace_kvm_entry(*vcpu_pc(vcpu));
- guest_enter_irqoff();
- vcpu->mode = IN_GUEST_MODE;
-
- ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
-
- vcpu->mode = OUTSIDE_GUEST_MODE;
- vcpu->stat.exits++;
- /*
- * Back from guest
- *************************************************************/
-
- kvm_arm_clear_debug(vcpu);
-
- /*
- * We may have taken a host interrupt in HYP mode (ie
- * while executing the guest). This interrupt is still
- * pending, as we haven't serviced it yet!
- *
- * We're now back in SVC mode, with interrupts
- * disabled. Enabling the interrupts now will have
- * the effect of taking the interrupt again, in SVC
- * mode this time.
- */
- local_irq_enable();
-
- /*
- * We do local_irq_enable() before calling guest_exit() so
- * that if a timer interrupt hits while running the guest we
- * account that tick as being spent in the guest. We enable
- * preemption after calling guest_exit() so that if we get
- * preempted we make sure ticks after that is not counted as
- * guest time.
- */
- guest_exit();
- trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
-
- /*
- * We must sync the PMU and timer state before the vgic state so
- * that the vgic can properly sample the updated state of the
- * interrupt line.
- */
- kvm_pmu_sync_hwstate(vcpu);
- kvm_timer_sync_hwstate(vcpu);
-
- kvm_vgic_sync_hwstate(vcpu);
-
- preempt_enable();
-
- ret = handle_exit(vcpu, run, ret);
- }
-
- /* Tell userspace about in-kernel device output levels */
- if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
- kvm_timer_update_run(vcpu);
- kvm_pmu_update_run(vcpu);
- }
-
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
- return ret;
-}
-
-static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
-{
- int bit_index;
- bool set;
- unsigned long *ptr;
-
- if (number == KVM_ARM_IRQ_CPU_IRQ)
- bit_index = __ffs(HCR_VI);
- else /* KVM_ARM_IRQ_CPU_FIQ */
- bit_index = __ffs(HCR_VF);
-
- ptr = (unsigned long *)&vcpu->arch.irq_lines;
- if (level)
- set = test_and_set_bit(bit_index, ptr);
- else
- set = test_and_clear_bit(bit_index, ptr);
-
- /*
- * If we didn't change anything, no need to wake up or kick other CPUs
- */
- if (set == level)
- return 0;
-
- /*
- * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
- * trigger a world-switch round on the running physical CPU to set the
- * virtual IRQ/FIQ fields in the HCR appropriately.
- */
- kvm_vcpu_kick(vcpu);
-
- return 0;
-}
-
-int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
- bool line_status)
-{
- u32 irq = irq_level->irq;
- unsigned int irq_type, vcpu_idx, irq_num;
- int nrcpus = atomic_read(&kvm->online_vcpus);
- struct kvm_vcpu *vcpu = NULL;
- bool level = irq_level->level;
-
- irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
- vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
- irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
-
- trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
-
- switch (irq_type) {
- case KVM_ARM_IRQ_TYPE_CPU:
- if (irqchip_in_kernel(kvm))
- return -ENXIO;
-
- if (vcpu_idx >= nrcpus)
- return -EINVAL;
-
- vcpu = kvm_get_vcpu(kvm, vcpu_idx);
- if (!vcpu)
- return -EINVAL;
-
- if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
- return -EINVAL;
-
- return vcpu_interrupt_line(vcpu, irq_num, level);
- case KVM_ARM_IRQ_TYPE_PPI:
- if (!irqchip_in_kernel(kvm))
- return -ENXIO;
-
- if (vcpu_idx >= nrcpus)
- return -EINVAL;
-
- vcpu = kvm_get_vcpu(kvm, vcpu_idx);
- if (!vcpu)
- return -EINVAL;
-
- if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
- return -EINVAL;
-
- return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level);
- case KVM_ARM_IRQ_TYPE_SPI:
- if (!irqchip_in_kernel(kvm))
- return -ENXIO;
-
- if (irq_num < VGIC_NR_PRIVATE_IRQS)
- return -EINVAL;
-
- return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
- }
-
- return -EINVAL;
-}
-
-static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init)
-{
- unsigned int i;
- int phys_target = kvm_target_cpu();
-
- if (init->target != phys_target)
- return -EINVAL;
-
- /*
- * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
- * use the same target.
- */
- if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
- return -EINVAL;
-
- /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
- for (i = 0; i < sizeof(init->features) * 8; i++) {
- bool set = (init->features[i / 32] & (1 << (i % 32)));
-
- if (set && i >= KVM_VCPU_MAX_FEATURES)
- return -ENOENT;
-
- /*
- * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
- * use the same feature set.
- */
- if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
- test_bit(i, vcpu->arch.features) != set)
- return -EINVAL;
-
- if (set)
- set_bit(i, vcpu->arch.features);
- }
-
- vcpu->arch.target = phys_target;
-
- /* Now we know what it is, we can reset it. */
- return kvm_reset_vcpu(vcpu);
-}
-
-
-static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_init *init)
-{
- int ret;
-
- ret = kvm_vcpu_set_target(vcpu, init);
- if (ret)
- return ret;
-
- /*
- * Ensure a rebooted VM will fault in RAM pages and detect if the
- * guest MMU is turned off and flush the caches as needed.
- */
- if (vcpu->arch.has_run_once)
- stage2_unmap_vm(vcpu->kvm);
-
- vcpu_reset_hcr(vcpu);
-
- /*
- * Handle the "start in power-off" case.
- */
- if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
- vcpu->arch.power_off = true;
- else
- vcpu->arch.power_off = false;
-
- return 0;
-}
-
-static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr)
-{
- int ret = -ENXIO;
-
- switch (attr->group) {
- default:
- ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
- break;
- }
-
- return ret;
-}
-
-static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr)
-{
- int ret = -ENXIO;
-
- switch (attr->group) {
- default:
- ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
- break;
- }
-
- return ret;
-}
-
-static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr)
-{
- int ret = -ENXIO;
-
- switch (attr->group) {
- default:
- ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
- break;
- }
-
- return ret;
-}
-
-long kvm_arch_vcpu_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- struct kvm_vcpu *vcpu = filp->private_data;
- void __user *argp = (void __user *)arg;
- struct kvm_device_attr attr;
-
- switch (ioctl) {
- case KVM_ARM_VCPU_INIT: {
- struct kvm_vcpu_init init;
-
- if (copy_from_user(&init, argp, sizeof(init)))
- return -EFAULT;
-
- return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
- }
- case KVM_SET_ONE_REG:
- case KVM_GET_ONE_REG: {
- struct kvm_one_reg reg;
-
- if (unlikely(!kvm_vcpu_initialized(vcpu)))
- return -ENOEXEC;
-
- if (copy_from_user(&reg, argp, sizeof(reg)))
- return -EFAULT;
- if (ioctl == KVM_SET_ONE_REG)
- return kvm_arm_set_reg(vcpu, &reg);
- else
- return kvm_arm_get_reg(vcpu, &reg);
- }
- case KVM_GET_REG_LIST: {
- struct kvm_reg_list __user *user_list = argp;
- struct kvm_reg_list reg_list;
- unsigned n;
-
- if (unlikely(!kvm_vcpu_initialized(vcpu)))
- return -ENOEXEC;
-
- if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
- return -EFAULT;
- n = reg_list.n;
- reg_list.n = kvm_arm_num_regs(vcpu);
- if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
- return -EFAULT;
- if (n < reg_list.n)
- return -E2BIG;
- return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
- }
- case KVM_SET_DEVICE_ATTR: {
- if (copy_from_user(&attr, argp, sizeof(attr)))
- return -EFAULT;
- return kvm_arm_vcpu_set_attr(vcpu, &attr);
- }
- case KVM_GET_DEVICE_ATTR: {
- if (copy_from_user(&attr, argp, sizeof(attr)))
- return -EFAULT;
- return kvm_arm_vcpu_get_attr(vcpu, &attr);
- }
- case KVM_HAS_DEVICE_ATTR: {
- if (copy_from_user(&attr, argp, sizeof(attr)))
- return -EFAULT;
- return kvm_arm_vcpu_has_attr(vcpu, &attr);
- }
- default:
- return -EINVAL;
- }
-}
-
-/**
- * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
- * @kvm: kvm instance
- * @log: slot id and address to which we copy the log
- *
- * Steps 1-4 below provide general overview of dirty page logging. See
- * kvm_get_dirty_log_protect() function description for additional details.
- *
- * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
- * always flush the TLB (step 4) even if previous step failed and the dirty
- * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
- * does not preclude user space subsequent dirty log read. Flushing TLB ensures
- * writes will be marked dirty for next log read.
- *
- * 1. Take a snapshot of the bit and clear it if needed.
- * 2. Write protect the corresponding page.
- * 3. Copy the snapshot to the userspace.
- * 4. Flush TLB's if needed.
- */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
-{
- bool is_dirty = false;
- int r;
-
- mutex_lock(&kvm->slots_lock);
-
- r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
-
- if (is_dirty)
- kvm_flush_remote_tlbs(kvm);
-
- mutex_unlock(&kvm->slots_lock);
- return r;
-}
-
-static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
- struct kvm_arm_device_addr *dev_addr)
-{
- unsigned long dev_id, type;
-
- dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
- KVM_ARM_DEVICE_ID_SHIFT;
- type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
- KVM_ARM_DEVICE_TYPE_SHIFT;
-
- switch (dev_id) {
- case KVM_ARM_DEVICE_VGIC_V2:
- if (!vgic_present)
- return -ENXIO;
- return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
- default:
- return -ENODEV;
- }
-}
-
-long kvm_arch_vm_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- struct kvm *kvm = filp->private_data;
- void __user *argp = (void __user *)arg;
-
- switch (ioctl) {
- case KVM_CREATE_IRQCHIP: {
- int ret;
- if (!vgic_present)
- return -ENXIO;
- mutex_lock(&kvm->lock);
- ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
- mutex_unlock(&kvm->lock);
- return ret;
- }
- case KVM_ARM_SET_DEVICE_ADDR: {
- struct kvm_arm_device_addr dev_addr;
-
- if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
- return -EFAULT;
- return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
- }
- case KVM_ARM_PREFERRED_TARGET: {
- int err;
- struct kvm_vcpu_init init;
-
- err = kvm_vcpu_preferred_target(&init);
- if (err)
- return err;
-
- if (copy_to_user(argp, &init, sizeof(init)))
- return -EFAULT;
-
- return 0;
- }
- default:
- return -EINVAL;
- }
-}
-
-static void cpu_init_hyp_mode(void *dummy)
-{
- phys_addr_t pgd_ptr;
- unsigned long hyp_stack_ptr;
- unsigned long stack_page;
- unsigned long vector_ptr;
-
- /* Switch from the HYP stub to our own HYP init vector */
- __hyp_set_vectors(kvm_get_idmap_vector());
-
- pgd_ptr = kvm_mmu_get_httbr();
- stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
- hyp_stack_ptr = stack_page + PAGE_SIZE;
- vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
-
- __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
- __cpu_init_stage2();
-
- if (is_kernel_in_hyp_mode())
- kvm_timer_init_vhe();
-
- kvm_arm_init_debug();
-}
-
-static void cpu_hyp_reset(void)
-{
- if (!is_kernel_in_hyp_mode())
- __hyp_reset_vectors();
-}
-
-static void cpu_hyp_reinit(void)
-{
- cpu_hyp_reset();
-
- if (is_kernel_in_hyp_mode()) {
- /*
- * __cpu_init_stage2() is safe to call even if the PM
- * event was cancelled before the CPU was reset.
- */
- __cpu_init_stage2();
- } else {
- cpu_init_hyp_mode(NULL);
- }
-
- if (vgic_present)
- kvm_vgic_init_cpu_hardware();
-}
-
-static void _kvm_arch_hardware_enable(void *discard)
-{
- if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
- cpu_hyp_reinit();
- __this_cpu_write(kvm_arm_hardware_enabled, 1);
- }
-}
-
-int kvm_arch_hardware_enable(void)
-{
- _kvm_arch_hardware_enable(NULL);
- return 0;
-}
-
-static void _kvm_arch_hardware_disable(void *discard)
-{
- if (__this_cpu_read(kvm_arm_hardware_enabled)) {
- cpu_hyp_reset();
- __this_cpu_write(kvm_arm_hardware_enabled, 0);
- }
-}
-
-void kvm_arch_hardware_disable(void)
-{
- _kvm_arch_hardware_disable(NULL);
-}
-
-#ifdef CONFIG_CPU_PM
-static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
- unsigned long cmd,
- void *v)
-{
- /*
- * kvm_arm_hardware_enabled is left with its old value over
- * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
- * re-enable hyp.
- */
- switch (cmd) {
- case CPU_PM_ENTER:
- if (__this_cpu_read(kvm_arm_hardware_enabled))
- /*
- * don't update kvm_arm_hardware_enabled here
- * so that the hardware will be re-enabled
- * when we resume. See below.
- */
- cpu_hyp_reset();
-
- return NOTIFY_OK;
- case CPU_PM_EXIT:
- if (__this_cpu_read(kvm_arm_hardware_enabled))
- /* The hardware was enabled before suspend. */
- cpu_hyp_reinit();
-
- return NOTIFY_OK;
-
- default:
- return NOTIFY_DONE;
- }
-}
-
-static struct notifier_block hyp_init_cpu_pm_nb = {
- .notifier_call = hyp_init_cpu_pm_notifier,
-};
-
-static void __init hyp_cpu_pm_init(void)
-{
- cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
-}
-static void __init hyp_cpu_pm_exit(void)
-{
- cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
-}
-#else
-static inline void hyp_cpu_pm_init(void)
-{
-}
-static inline void hyp_cpu_pm_exit(void)
-{
-}
-#endif
-
-static void teardown_common_resources(void)
-{
- free_percpu(kvm_host_cpu_state);
-}
-
-static int init_common_resources(void)
-{
- kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
- if (!kvm_host_cpu_state) {
- kvm_err("Cannot allocate host CPU state\n");
- return -ENOMEM;
- }
-
- /* set size of VMID supported by CPU */
- kvm_vmid_bits = kvm_get_vmid_bits();
- kvm_info("%d-bit VMID\n", kvm_vmid_bits);
-
- return 0;
-}
-
-static int init_subsystems(void)
-{
- int err = 0;
-
- /*
- * Enable hardware so that subsystem initialisation can access EL2.
- */
- on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
-
- /*
- * Register CPU lower-power notifier
- */
- hyp_cpu_pm_init();
-
- /*
- * Init HYP view of VGIC
- */
- err = kvm_vgic_hyp_init();
- switch (err) {
- case 0:
- vgic_present = true;
- break;
- case -ENODEV:
- case -ENXIO:
- vgic_present = false;
- err = 0;
- break;
- default:
- goto out;
- }
-
- /*
- * Init HYP architected timer support
- */
- err = kvm_timer_hyp_init();
- if (err)
- goto out;
-
- kvm_perf_init();
- kvm_coproc_table_init();
-
-out:
- on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
-
- return err;
-}
-
-static void teardown_hyp_mode(void)
-{
- int cpu;
-
- if (is_kernel_in_hyp_mode())
- return;
-
- free_hyp_pgds();
- for_each_possible_cpu(cpu)
- free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
- hyp_cpu_pm_exit();
-}
-
-static int init_vhe_mode(void)
-{
- kvm_info("VHE mode initialized successfully\n");
- return 0;
-}
-
-/**
- * Inits Hyp-mode on all online CPUs
- */
-static int init_hyp_mode(void)
-{
- int cpu;
- int err = 0;
-
- /*
- * Allocate Hyp PGD and setup Hyp identity mapping
- */
- err = kvm_mmu_init();
- if (err)
- goto out_err;
-
- /*
- * Allocate stack pages for Hypervisor-mode
- */
- for_each_possible_cpu(cpu) {
- unsigned long stack_page;
-
- stack_page = __get_free_page(GFP_KERNEL);
- if (!stack_page) {
- err = -ENOMEM;
- goto out_err;
- }
-
- per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
- }
-
- /*
- * Map the Hyp-code called directly from the host
- */
- err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
- kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
- if (err) {
- kvm_err("Cannot map world-switch code\n");
- goto out_err;
- }
-
- err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
- kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
- if (err) {
- kvm_err("Cannot map rodata section\n");
- goto out_err;
- }
-
- err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
- kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
- if (err) {
- kvm_err("Cannot map bss section\n");
- goto out_err;
- }
-
- /*
- * Map the Hyp stack pages
- */
- for_each_possible_cpu(cpu) {
- char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
- err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
- PAGE_HYP);
-
- if (err) {
- kvm_err("Cannot map hyp stack\n");
- goto out_err;
- }
- }
-
- for_each_possible_cpu(cpu) {
- kvm_cpu_context_t *cpu_ctxt;
-
- cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
- err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
-
- if (err) {
- kvm_err("Cannot map host CPU state: %d\n", err);
- goto out_err;
- }
- }
-
- kvm_info("Hyp mode initialized successfully\n");
-
- return 0;
-
-out_err:
- teardown_hyp_mode();
- kvm_err("error initializing Hyp mode: %d\n", err);
- return err;
-}
-
-static void check_kvm_target_cpu(void *ret)
-{
- *(int *)ret = kvm_target_cpu();
-}
-
-struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
-{
- struct kvm_vcpu *vcpu;
- int i;
-
- mpidr &= MPIDR_HWID_BITMASK;
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
- return vcpu;
- }
- return NULL;
-}
-
-/**
- * Initialize Hyp-mode and memory mappings on all CPUs.
- */
-int kvm_arch_init(void *opaque)
-{
- int err;
- int ret, cpu;
-
- if (!is_hyp_mode_available()) {
- kvm_err("HYP mode not available\n");
- return -ENODEV;
- }
-
- for_each_online_cpu(cpu) {
- smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
- if (ret < 0) {
- kvm_err("Error, CPU %d not supported!\n", cpu);
- return -ENODEV;
- }
- }
-
- err = init_common_resources();
- if (err)
- return err;
-
- if (is_kernel_in_hyp_mode())
- err = init_vhe_mode();
- else
- err = init_hyp_mode();
- if (err)
- goto out_err;
-
- err = init_subsystems();
- if (err)
- goto out_hyp;
-
- return 0;
-
-out_hyp:
- teardown_hyp_mode();
-out_err:
- teardown_common_resources();
- return err;
-}
-
-/* NOP: Compiling as a module not supported */
-void kvm_arch_exit(void)
-{
- kvm_perf_teardown();
-}
-
-static int arm_init(void)
-{
- int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
- return rc;
-}
-
-module_init(arm_init);
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
deleted file mode 100644
index b6e715fd3c90..000000000000
--- a/arch/arm/kvm/mmio.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_mmio.h>
-#include <asm/kvm_emulate.h>
-#include <trace/events/kvm.h>
-
-#include "trace.h"
-
-void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
-{
- void *datap = NULL;
- union {
- u8 byte;
- u16 hword;
- u32 word;
- u64 dword;
- } tmp;
-
- switch (len) {
- case 1:
- tmp.byte = data;
- datap = &tmp.byte;
- break;
- case 2:
- tmp.hword = data;
- datap = &tmp.hword;
- break;
- case 4:
- tmp.word = data;
- datap = &tmp.word;
- break;
- case 8:
- tmp.dword = data;
- datap = &tmp.dword;
- break;
- }
-
- memcpy(buf, datap, len);
-}
-
-unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
-{
- unsigned long data = 0;
- union {
- u16 hword;
- u32 word;
- u64 dword;
- } tmp;
-
- switch (len) {
- case 1:
- data = *(u8 *)buf;
- break;
- case 2:
- memcpy(&tmp.hword, buf, len);
- data = tmp.hword;
- break;
- case 4:
- memcpy(&tmp.word, buf, len);
- data = tmp.word;
- break;
- case 8:
- memcpy(&tmp.dword, buf, len);
- data = tmp.dword;
- break;
- }
-
- return data;
-}
-
-/**
- * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
- * or in-kernel IO emulation
- *
- * @vcpu: The VCPU pointer
- * @run: The VCPU run struct containing the mmio data
- */
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- unsigned long data;
- unsigned int len;
- int mask;
-
- if (!run->mmio.is_write) {
- len = run->mmio.len;
- if (len > sizeof(unsigned long))
- return -EINVAL;
-
- data = kvm_mmio_read_buf(run->mmio.data, len);
-
- if (vcpu->arch.mmio_decode.sign_extend &&
- len < sizeof(unsigned long)) {
- mask = 1U << ((len * 8) - 1);
- data = (data ^ mask) - mask;
- }
-
- trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
- data);
- data = vcpu_data_host_to_guest(vcpu, data, len);
- vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
- }
-
- return 0;
-}
-
-static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
-{
- unsigned long rt;
- int access_size;
- bool sign_extend;
-
- if (kvm_vcpu_dabt_iss1tw(vcpu)) {
- /* page table accesses IO mem: tell guest to fix its TTBR */
- kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
- return 1;
- }
-
- access_size = kvm_vcpu_dabt_get_as(vcpu);
- if (unlikely(access_size < 0))
- return access_size;
-
- *is_write = kvm_vcpu_dabt_iswrite(vcpu);
- sign_extend = kvm_vcpu_dabt_issext(vcpu);
- rt = kvm_vcpu_dabt_get_rd(vcpu);
-
- *len = access_size;
- vcpu->arch.mmio_decode.sign_extend = sign_extend;
- vcpu->arch.mmio_decode.rt = rt;
-
- /*
- * The MMIO instruction is emulated and should not be re-executed
- * in the guest.
- */
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- return 0;
-}
-
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
- phys_addr_t fault_ipa)
-{
- unsigned long data;
- unsigned long rt;
- int ret;
- bool is_write;
- int len;
- u8 data_buf[8];
-
- /*
- * Prepare MMIO operation. First decode the syndrome data we get
- * from the CPU. Then try if some in-kernel emulation feels
- * responsible, otherwise let user space do its magic.
- */
- if (kvm_vcpu_dabt_isvalid(vcpu)) {
- ret = decode_hsr(vcpu, &is_write, &len);
- if (ret)
- return ret;
- } else {
- kvm_err("load/store instruction decoding not implemented\n");
- return -ENOSYS;
- }
-
- rt = vcpu->arch.mmio_decode.rt;
-
- if (is_write) {
- data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
- len);
-
- trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
- kvm_mmio_write_buf(data_buf, len, data);
-
- ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
- data_buf);
- } else {
- trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
- fault_ipa, 0);
-
- ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
- data_buf);
- }
-
- /* Now prepare kvm_run for the potential return to userland. */
- run->mmio.is_write = is_write;
- run->mmio.phys_addr = fault_ipa;
- run->mmio.len = len;
-
- if (!ret) {
- /* We handled the access successfully in the kernel. */
- if (!is_write)
- memcpy(run->mmio.data, data_buf, len);
- vcpu->stat.mmio_exit_kernel++;
- kvm_handle_mmio_return(vcpu, run);
- return 1;
- }
-
- if (is_write)
- memcpy(run->mmio.data, data_buf, len);
- vcpu->stat.mmio_exit_user++;
- run->exit_reason = KVM_EXIT_MMIO;
- return 0;
-}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
deleted file mode 100644
index 313ee646480f..000000000000
--- a/arch/arm/kvm/mmu.c
+++ /dev/null
@@ -1,1975 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/mman.h>
-#include <linux/kvm_host.h>
-#include <linux/io.h>
-#include <linux/hugetlb.h>
-#include <trace/events/kvm.h>
-#include <asm/pgalloc.h>
-#include <asm/cacheflush.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_mmu.h>
-#include <asm/kvm_mmio.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_emulate.h>
-#include <asm/virt.h>
-
-#include "trace.h"
-
-static pgd_t *boot_hyp_pgd;
-static pgd_t *hyp_pgd;
-static pgd_t *merged_hyp_pgd;
-static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
-
-static unsigned long hyp_idmap_start;
-static unsigned long hyp_idmap_end;
-static phys_addr_t hyp_idmap_vector;
-
-#define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
-#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
-
-#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
-#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
-
-static bool memslot_is_logging(struct kvm_memory_slot *memslot)
-{
- return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
-}
-
-/**
- * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
- * @kvm: pointer to kvm structure.
- *
- * Interface to HYP function to flush all VM TLB entries
- */
-void kvm_flush_remote_tlbs(struct kvm *kvm)
-{
- kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
-}
-
-static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
-{
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
-}
-
-/*
- * D-Cache management functions. They take the page table entries by
- * value, as they are flushing the cache using the kernel mapping (or
- * kmap on 32bit).
- */
-static void kvm_flush_dcache_pte(pte_t pte)
-{
- __kvm_flush_dcache_pte(pte);
-}
-
-static void kvm_flush_dcache_pmd(pmd_t pmd)
-{
- __kvm_flush_dcache_pmd(pmd);
-}
-
-static void kvm_flush_dcache_pud(pud_t pud)
-{
- __kvm_flush_dcache_pud(pud);
-}
-
-static bool kvm_is_device_pfn(unsigned long pfn)
-{
- return !pfn_valid(pfn);
-}
-
-/**
- * stage2_dissolve_pmd() - clear and flush huge PMD entry
- * @kvm: pointer to kvm structure.
- * @addr: IPA
- * @pmd: pmd pointer for IPA
- *
- * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
- * pages in the range dirty.
- */
-static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
-{
- if (!pmd_thp_or_huge(*pmd))
- return;
-
- pmd_clear(pmd);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- put_page(virt_to_page(pmd));
-}
-
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
- int min, int max)
-{
- void *page;
-
- BUG_ON(max > KVM_NR_MEM_OBJS);
- if (cache->nobjs >= min)
- return 0;
- while (cache->nobjs < max) {
- page = (void *)__get_free_page(PGALLOC_GFP);
- if (!page)
- return -ENOMEM;
- cache->objects[cache->nobjs++] = page;
- }
- return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
- while (mc->nobjs)
- free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
- void *p;
-
- BUG_ON(!mc || !mc->nobjs);
- p = mc->objects[--mc->nobjs];
- return p;
-}
-
-static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
-{
- pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
- stage2_pgd_clear(pgd);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- stage2_pud_free(pud_table);
- put_page(virt_to_page(pgd));
-}
-
-static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
-{
- pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
- VM_BUG_ON(stage2_pud_huge(*pud));
- stage2_pud_clear(pud);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- stage2_pmd_free(pmd_table);
- put_page(virt_to_page(pud));
-}
-
-static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
-{
- pte_t *pte_table = pte_offset_kernel(pmd, 0);
- VM_BUG_ON(pmd_thp_or_huge(*pmd));
- pmd_clear(pmd);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- pte_free_kernel(NULL, pte_table);
- put_page(virt_to_page(pmd));
-}
-
-/*
- * Unmapping vs dcache management:
- *
- * If a guest maps certain memory pages as uncached, all writes will
- * bypass the data cache and go directly to RAM. However, the CPUs
- * can still speculate reads (not writes) and fill cache lines with
- * data.
- *
- * Those cache lines will be *clean* cache lines though, so a
- * clean+invalidate operation is equivalent to an invalidate
- * operation, because no cache lines are marked dirty.
- *
- * Those clean cache lines could be filled prior to an uncached write
- * by the guest, and the cache coherent IO subsystem would therefore
- * end up writing old data to disk.
- *
- * This is why right after unmapping a page/section and invalidating
- * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
- * the IO subsystem will never hit in the cache.
- */
-static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
- phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t start_addr = addr;
- pte_t *pte, *start_pte;
-
- start_pte = pte = pte_offset_kernel(pmd, addr);
- do {
- if (!pte_none(*pte)) {
- pte_t old_pte = *pte;
-
- kvm_set_pte(pte, __pte(0));
- kvm_tlb_flush_vmid_ipa(kvm, addr);
-
- /* No need to invalidate the cache for device mappings */
- if (!kvm_is_device_pfn(pte_pfn(old_pte)))
- kvm_flush_dcache_pte(old_pte);
-
- put_page(virt_to_page(pte));
- }
- } while (pte++, addr += PAGE_SIZE, addr != end);
-
- if (stage2_pte_table_empty(start_pte))
- clear_stage2_pmd_entry(kvm, pmd, start_addr);
-}
-
-static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
- phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t next, start_addr = addr;
- pmd_t *pmd, *start_pmd;
-
- start_pmd = pmd = stage2_pmd_offset(pud, addr);
- do {
- next = stage2_pmd_addr_end(addr, end);
- if (!pmd_none(*pmd)) {
- if (pmd_thp_or_huge(*pmd)) {
- pmd_t old_pmd = *pmd;
-
- pmd_clear(pmd);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
-
- kvm_flush_dcache_pmd(old_pmd);
-
- put_page(virt_to_page(pmd));
- } else {
- unmap_stage2_ptes(kvm, pmd, addr, next);
- }
- }
- } while (pmd++, addr = next, addr != end);
-
- if (stage2_pmd_table_empty(start_pmd))
- clear_stage2_pud_entry(kvm, pud, start_addr);
-}
-
-static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
- phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t next, start_addr = addr;
- pud_t *pud, *start_pud;
-
- start_pud = pud = stage2_pud_offset(pgd, addr);
- do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
- if (stage2_pud_huge(*pud)) {
- pud_t old_pud = *pud;
-
- stage2_pud_clear(pud);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- kvm_flush_dcache_pud(old_pud);
- put_page(virt_to_page(pud));
- } else {
- unmap_stage2_pmds(kvm, pud, addr, next);
- }
- }
- } while (pud++, addr = next, addr != end);
-
- if (stage2_pud_table_empty(start_pud))
- clear_stage2_pgd_entry(kvm, pgd, start_addr);
-}
-
-/**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
- * @kvm: The VM pointer
- * @start: The intermediate physical base address of the range to unmap
- * @size: The size of the area to unmap
- *
- * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
- * be called while holding mmu_lock (unless for freeing the stage2 pgd before
- * destroying the VM), otherwise another faulting VCPU may come in and mess
- * with things behind our backs.
- */
-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
-{
- pgd_t *pgd;
- phys_addr_t addr = start, end = start + size;
- phys_addr_t next;
-
- assert_spin_locked(&kvm->mmu_lock);
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
- do {
- next = stage2_pgd_addr_end(addr, end);
- if (!stage2_pgd_none(*pgd))
- unmap_stage2_puds(kvm, pgd, addr, next);
- /*
- * If the range is too large, release the kvm->mmu_lock
- * to prevent starvation and lockup detector warnings.
- */
- if (next != end)
- cond_resched_lock(&kvm->mmu_lock);
- } while (pgd++, addr = next, addr != end);
-}
-
-static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
- phys_addr_t addr, phys_addr_t end)
-{
- pte_t *pte;
-
- pte = pte_offset_kernel(pmd, addr);
- do {
- if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
- kvm_flush_dcache_pte(*pte);
- } while (pte++, addr += PAGE_SIZE, addr != end);
-}
-
-static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
- phys_addr_t addr, phys_addr_t end)
-{
- pmd_t *pmd;
- phys_addr_t next;
-
- pmd = stage2_pmd_offset(pud, addr);
- do {
- next = stage2_pmd_addr_end(addr, end);
- if (!pmd_none(*pmd)) {
- if (pmd_thp_or_huge(*pmd))
- kvm_flush_dcache_pmd(*pmd);
- else
- stage2_flush_ptes(kvm, pmd, addr, next);
- }
- } while (pmd++, addr = next, addr != end);
-}
-
-static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
- phys_addr_t addr, phys_addr_t end)
-{
- pud_t *pud;
- phys_addr_t next;
-
- pud = stage2_pud_offset(pgd, addr);
- do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
- if (stage2_pud_huge(*pud))
- kvm_flush_dcache_pud(*pud);
- else
- stage2_flush_pmds(kvm, pud, addr, next);
- }
- } while (pud++, addr = next, addr != end);
-}
-
-static void stage2_flush_memslot(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
-{
- phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
- phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
- phys_addr_t next;
- pgd_t *pgd;
-
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
- do {
- next = stage2_pgd_addr_end(addr, end);
- stage2_flush_puds(kvm, pgd, addr, next);
- } while (pgd++, addr = next, addr != end);
-}
-
-/**
- * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
- * @kvm: The struct kvm pointer
- *
- * Go through the stage 2 page tables and invalidate any cache lines
- * backing memory already mapped to the VM.
- */
-static void stage2_flush_vm(struct kvm *kvm)
-{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- int idx;
-
- idx = srcu_read_lock(&kvm->srcu);
- spin_lock(&kvm->mmu_lock);
-
- slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots)
- stage2_flush_memslot(kvm, memslot);
-
- spin_unlock(&kvm->mmu_lock);
- srcu_read_unlock(&kvm->srcu, idx);
-}
-
-static void clear_hyp_pgd_entry(pgd_t *pgd)
-{
- pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
- pgd_clear(pgd);
- pud_free(NULL, pud_table);
- put_page(virt_to_page(pgd));
-}
-
-static void clear_hyp_pud_entry(pud_t *pud)
-{
- pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
- VM_BUG_ON(pud_huge(*pud));
- pud_clear(pud);
- pmd_free(NULL, pmd_table);
- put_page(virt_to_page(pud));
-}
-
-static void clear_hyp_pmd_entry(pmd_t *pmd)
-{
- pte_t *pte_table = pte_offset_kernel(pmd, 0);
- VM_BUG_ON(pmd_thp_or_huge(*pmd));
- pmd_clear(pmd);
- pte_free_kernel(NULL, pte_table);
- put_page(virt_to_page(pmd));
-}
-
-static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
-{
- pte_t *pte, *start_pte;
-
- start_pte = pte = pte_offset_kernel(pmd, addr);
- do {
- if (!pte_none(*pte)) {
- kvm_set_pte(pte, __pte(0));
- put_page(virt_to_page(pte));
- }
- } while (pte++, addr += PAGE_SIZE, addr != end);
-
- if (hyp_pte_table_empty(start_pte))
- clear_hyp_pmd_entry(pmd);
-}
-
-static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t next;
- pmd_t *pmd, *start_pmd;
-
- start_pmd = pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
- /* Hyp doesn't use huge pmds */
- if (!pmd_none(*pmd))
- unmap_hyp_ptes(pmd, addr, next);
- } while (pmd++, addr = next, addr != end);
-
- if (hyp_pmd_table_empty(start_pmd))
- clear_hyp_pud_entry(pud);
-}
-
-static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t next;
- pud_t *pud, *start_pud;
-
- start_pud = pud = pud_offset(pgd, addr);
- do {
- next = pud_addr_end(addr, end);
- /* Hyp doesn't use huge puds */
- if (!pud_none(*pud))
- unmap_hyp_pmds(pud, addr, next);
- } while (pud++, addr = next, addr != end);
-
- if (hyp_pud_table_empty(start_pud))
- clear_hyp_pgd_entry(pgd);
-}
-
-static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
-{
- pgd_t *pgd;
- phys_addr_t addr = start, end = start + size;
- phys_addr_t next;
-
- /*
- * We don't unmap anything from HYP, except at the hyp tear down.
- * Hence, we don't have to invalidate the TLBs here.
- */
- pgd = pgdp + pgd_index(addr);
- do {
- next = pgd_addr_end(addr, end);
- if (!pgd_none(*pgd))
- unmap_hyp_puds(pgd, addr, next);
- } while (pgd++, addr = next, addr != end);
-}
-
-/**
- * free_hyp_pgds - free Hyp-mode page tables
- *
- * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
- * therefore contains either mappings in the kernel memory area (above
- * PAGE_OFFSET), or device mappings in the vmalloc range (from
- * VMALLOC_START to VMALLOC_END).
- *
- * boot_hyp_pgd should only map two pages for the init code.
- */
-void free_hyp_pgds(void)
-{
- unsigned long addr;
-
- mutex_lock(&kvm_hyp_pgd_mutex);
-
- if (boot_hyp_pgd) {
- unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
- free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
- boot_hyp_pgd = NULL;
- }
-
- if (hyp_pgd) {
- unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
- for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
- unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
- for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
- unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
-
- free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
- hyp_pgd = NULL;
- }
- if (merged_hyp_pgd) {
- clear_page(merged_hyp_pgd);
- free_page((unsigned long)merged_hyp_pgd);
- merged_hyp_pgd = NULL;
- }
-
- mutex_unlock(&kvm_hyp_pgd_mutex);
-}
-
-static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
- unsigned long end, unsigned long pfn,
- pgprot_t prot)
-{
- pte_t *pte;
- unsigned long addr;
-
- addr = start;
- do {
- pte = pte_offset_kernel(pmd, addr);
- kvm_set_pte(pte, pfn_pte(pfn, prot));
- get_page(virt_to_page(pte));
- kvm_flush_dcache_to_poc(pte, sizeof(*pte));
- pfn++;
- } while (addr += PAGE_SIZE, addr != end);
-}
-
-static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
- unsigned long end, unsigned long pfn,
- pgprot_t prot)
-{
- pmd_t *pmd;
- pte_t *pte;
- unsigned long addr, next;
-
- addr = start;
- do {
- pmd = pmd_offset(pud, addr);
-
- BUG_ON(pmd_sect(*pmd));
-
- if (pmd_none(*pmd)) {
- pte = pte_alloc_one_kernel(NULL, addr);
- if (!pte) {
- kvm_err("Cannot allocate Hyp pte\n");
- return -ENOMEM;
- }
- pmd_populate_kernel(NULL, pmd, pte);
- get_page(virt_to_page(pmd));
- kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
- }
-
- next = pmd_addr_end(addr, end);
-
- create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
- pfn += (next - addr) >> PAGE_SHIFT;
- } while (addr = next, addr != end);
-
- return 0;
-}
-
-static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
- unsigned long end, unsigned long pfn,
- pgprot_t prot)
-{
- pud_t *pud;
- pmd_t *pmd;
- unsigned long addr, next;
- int ret;
-
- addr = start;
- do {
- pud = pud_offset(pgd, addr);
-
- if (pud_none_or_clear_bad(pud)) {
- pmd = pmd_alloc_one(NULL, addr);
- if (!pmd) {
- kvm_err("Cannot allocate Hyp pmd\n");
- return -ENOMEM;
- }
- pud_populate(NULL, pud, pmd);
- get_page(virt_to_page(pud));
- kvm_flush_dcache_to_poc(pud, sizeof(*pud));
- }
-
- next = pud_addr_end(addr, end);
- ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
- if (ret)
- return ret;
- pfn += (next - addr) >> PAGE_SHIFT;
- } while (addr = next, addr != end);
-
- return 0;
-}
-
-static int __create_hyp_mappings(pgd_t *pgdp,
- unsigned long start, unsigned long end,
- unsigned long pfn, pgprot_t prot)
-{
- pgd_t *pgd;
- pud_t *pud;
- unsigned long addr, next;
- int err = 0;
-
- mutex_lock(&kvm_hyp_pgd_mutex);
- addr = start & PAGE_MASK;
- end = PAGE_ALIGN(end);
- do {
- pgd = pgdp + pgd_index(addr);
-
- if (pgd_none(*pgd)) {
- pud = pud_alloc_one(NULL, addr);
- if (!pud) {
- kvm_err("Cannot allocate Hyp pud\n");
- err = -ENOMEM;
- goto out;
- }
- pgd_populate(NULL, pgd, pud);
- get_page(virt_to_page(pgd));
- kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
- }
-
- next = pgd_addr_end(addr, end);
- err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
- if (err)
- goto out;
- pfn += (next - addr) >> PAGE_SHIFT;
- } while (addr = next, addr != end);
-out:
- mutex_unlock(&kvm_hyp_pgd_mutex);
- return err;
-}
-
-static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
-{
- if (!is_vmalloc_addr(kaddr)) {
- BUG_ON(!virt_addr_valid(kaddr));
- return __pa(kaddr);
- } else {
- return page_to_phys(vmalloc_to_page(kaddr)) +
- offset_in_page(kaddr);
- }
-}
-
-/**
- * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
- * @from: The virtual kernel start address of the range
- * @to: The virtual kernel end address of the range (exclusive)
- * @prot: The protection to be applied to this range
- *
- * The same virtual address as the kernel virtual address is also used
- * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
- * physical pages.
- */
-int create_hyp_mappings(void *from, void *to, pgprot_t prot)
-{
- phys_addr_t phys_addr;
- unsigned long virt_addr;
- unsigned long start = kern_hyp_va((unsigned long)from);
- unsigned long end = kern_hyp_va((unsigned long)to);
-
- if (is_kernel_in_hyp_mode())
- return 0;
-
- start = start & PAGE_MASK;
- end = PAGE_ALIGN(end);
-
- for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
- int err;
-
- phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
- err = __create_hyp_mappings(hyp_pgd, virt_addr,
- virt_addr + PAGE_SIZE,
- __phys_to_pfn(phys_addr),
- prot);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
- * @from: The kernel start VA of the range
- * @to: The kernel end VA of the range (exclusive)
- * @phys_addr: The physical start address which gets mapped
- *
- * The resulting HYP VA is the same as the kernel VA, modulo
- * HYP_PAGE_OFFSET.
- */
-int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
-{
- unsigned long start = kern_hyp_va((unsigned long)from);
- unsigned long end = kern_hyp_va((unsigned long)to);
-
- if (is_kernel_in_hyp_mode())
- return 0;
-
- /* Check for a valid kernel IO mapping */
- if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
- return -EINVAL;
-
- return __create_hyp_mappings(hyp_pgd, start, end,
- __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
-}
-
-/**
- * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
- * @kvm: The KVM struct pointer for the VM.
- *
- * Allocates only the stage-2 HW PGD level table(s) (can support either full
- * 40-bit input addresses or limited to 32-bit input addresses). Clears the
- * allocated pages.
- *
- * Note we don't need locking here as this is only called when the VM is
- * created, which can only be done once.
- */
-int kvm_alloc_stage2_pgd(struct kvm *kvm)
-{
- pgd_t *pgd;
-
- if (kvm->arch.pgd != NULL) {
- kvm_err("kvm_arch already initialized?\n");
- return -EINVAL;
- }
-
- /* Allocate the HW PGD, making sure that each page gets its own refcount */
- pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
- if (!pgd)
- return -ENOMEM;
-
- kvm->arch.pgd = pgd;
- return 0;
-}
-
-static void stage2_unmap_memslot(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
-{
- hva_t hva = memslot->userspace_addr;
- phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
- phys_addr_t size = PAGE_SIZE * memslot->npages;
- hva_t reg_end = hva + size;
-
- /*
- * A memory region could potentially cover multiple VMAs, and any holes
- * between them, so iterate over all of them to find out if we should
- * unmap any of them.
- *
- * +--------------------------------------------+
- * +---------------+----------------+ +----------------+
- * | : VMA 1 | VMA 2 | | VMA 3 : |
- * +---------------+----------------+ +----------------+
- * | memory region |
- * +--------------------------------------------+
- */
- do {
- struct vm_area_struct *vma = find_vma(current->mm, hva);
- hva_t vm_start, vm_end;
-
- if (!vma || vma->vm_start >= reg_end)
- break;
-
- /*
- * Take the intersection of this VMA with the memory region
- */
- vm_start = max(hva, vma->vm_start);
- vm_end = min(reg_end, vma->vm_end);
-
- if (!(vma->vm_flags & VM_PFNMAP)) {
- gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
- unmap_stage2_range(kvm, gpa, vm_end - vm_start);
- }
- hva = vm_end;
- } while (hva < reg_end);
-}
-
-/**
- * stage2_unmap_vm - Unmap Stage-2 RAM mappings
- * @kvm: The struct kvm pointer
- *
- * Go through the memregions and unmap any reguler RAM
- * backing memory already mapped to the VM.
- */
-void stage2_unmap_vm(struct kvm *kvm)
-{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- int idx;
-
- idx = srcu_read_lock(&kvm->srcu);
- down_read(&current->mm->mmap_sem);
- spin_lock(&kvm->mmu_lock);
-
- slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots)
- stage2_unmap_memslot(kvm, memslot);
-
- spin_unlock(&kvm->mmu_lock);
- up_read(&current->mm->mmap_sem);
- srcu_read_unlock(&kvm->srcu, idx);
-}
-
-/**
- * kvm_free_stage2_pgd - free all stage-2 tables
- * @kvm: The KVM struct pointer for the VM.
- *
- * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
- * underlying level-2 and level-3 tables before freeing the actual level-1 table
- * and setting the struct pointer to NULL.
- *
- * Note we don't need locking here as this is only called when the VM is
- * destroyed, which can only be done once.
- */
-void kvm_free_stage2_pgd(struct kvm *kvm)
-{
- if (kvm->arch.pgd == NULL)
- return;
-
- spin_lock(&kvm->mmu_lock);
- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
- spin_unlock(&kvm->mmu_lock);
-
- /* Free the HW pgd, one page at a time */
- free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
- kvm->arch.pgd = NULL;
-}
-
-static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
- phys_addr_t addr)
-{
- pgd_t *pgd;
- pud_t *pud;
-
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
- if (WARN_ON(stage2_pgd_none(*pgd))) {
- if (!cache)
- return NULL;
- pud = mmu_memory_cache_alloc(cache);
- stage2_pgd_populate(pgd, pud);
- get_page(virt_to_page(pgd));
- }
-
- return stage2_pud_offset(pgd, addr);
-}
-
-static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
- phys_addr_t addr)
-{
- pud_t *pud;
- pmd_t *pmd;
-
- pud = stage2_get_pud(kvm, cache, addr);
- if (stage2_pud_none(*pud)) {
- if (!cache)
- return NULL;
- pmd = mmu_memory_cache_alloc(cache);
- stage2_pud_populate(pud, pmd);
- get_page(virt_to_page(pud));
- }
-
- return stage2_pmd_offset(pud, addr);
-}
-
-static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
- *cache, phys_addr_t addr, const pmd_t *new_pmd)
-{
- pmd_t *pmd, old_pmd;
-
- pmd = stage2_get_pmd(kvm, cache, addr);
- VM_BUG_ON(!pmd);
-
- /*
- * Mapping in huge pages should only happen through a fault. If a
- * page is merged into a transparent huge page, the individual
- * subpages of that huge page should be unmapped through MMU
- * notifiers before we get here.
- *
- * Merging of CompoundPages is not supported; they should become
- * splitting first, unmapped, merged, and mapped back in on-demand.
- */
- VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
-
- old_pmd = *pmd;
- if (pmd_present(old_pmd)) {
- pmd_clear(pmd);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- } else {
- get_page(virt_to_page(pmd));
- }
-
- kvm_set_pmd(pmd, *new_pmd);
- return 0;
-}
-
-static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
- phys_addr_t addr, const pte_t *new_pte,
- unsigned long flags)
-{
- pmd_t *pmd;
- pte_t *pte, old_pte;
- bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
- bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
-
- VM_BUG_ON(logging_active && !cache);
-
- /* Create stage-2 page table mapping - Levels 0 and 1 */
- pmd = stage2_get_pmd(kvm, cache, addr);
- if (!pmd) {
- /*
- * Ignore calls from kvm_set_spte_hva for unallocated
- * address ranges.
- */
- return 0;
- }
-
- /*
- * While dirty page logging - dissolve huge PMD, then continue on to
- * allocate page.
- */
- if (logging_active)
- stage2_dissolve_pmd(kvm, addr, pmd);
-
- /* Create stage-2 page mappings - Level 2 */
- if (pmd_none(*pmd)) {
- if (!cache)
- return 0; /* ignore calls from kvm_set_spte_hva */
- pte = mmu_memory_cache_alloc(cache);
- pmd_populate_kernel(NULL, pmd, pte);
- get_page(virt_to_page(pmd));
- }
-
- pte = pte_offset_kernel(pmd, addr);
-
- if (iomap && pte_present(*pte))
- return -EFAULT;
-
- /* Create 2nd stage page table mapping - Level 3 */
- old_pte = *pte;
- if (pte_present(old_pte)) {
- kvm_set_pte(pte, __pte(0));
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- } else {
- get_page(virt_to_page(pte));
- }
-
- kvm_set_pte(pte, *new_pte);
- return 0;
-}
-
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static int stage2_ptep_test_and_clear_young(pte_t *pte)
-{
- if (pte_young(*pte)) {
- *pte = pte_mkold(*pte);
- return 1;
- }
- return 0;
-}
-#else
-static int stage2_ptep_test_and_clear_young(pte_t *pte)
-{
- return __ptep_test_and_clear_young(pte);
-}
-#endif
-
-static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
-{
- return stage2_ptep_test_and_clear_young((pte_t *)pmd);
-}
-
-/**
- * kvm_phys_addr_ioremap - map a device range to guest IPA
- *
- * @kvm: The KVM pointer
- * @guest_ipa: The IPA at which to insert the mapping
- * @pa: The physical address of the device
- * @size: The size of the mapping
- */
-int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
- phys_addr_t pa, unsigned long size, bool writable)
-{
- phys_addr_t addr, end;
- int ret = 0;
- unsigned long pfn;
- struct kvm_mmu_memory_cache cache = { 0, };
-
- end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
- pfn = __phys_to_pfn(pa);
-
- for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
- pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
-
- if (writable)
- pte = kvm_s2pte_mkwrite(pte);
-
- ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
- if (ret)
- goto out;
- spin_lock(&kvm->mmu_lock);
- ret = stage2_set_pte(kvm, &cache, addr, &pte,
- KVM_S2PTE_FLAG_IS_IOMAP);
- spin_unlock(&kvm->mmu_lock);
- if (ret)
- goto out;
-
- pfn++;
- }
-
-out:
- mmu_free_memory_cache(&cache);
- return ret;
-}
-
-static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
-{
- kvm_pfn_t pfn = *pfnp;
- gfn_t gfn = *ipap >> PAGE_SHIFT;
-
- if (PageTransCompoundMap(pfn_to_page(pfn))) {
- unsigned long mask;
- /*
- * The address we faulted on is backed by a transparent huge
- * page. However, because we map the compound huge page and
- * not the individual tail page, we need to transfer the
- * refcount to the head page. We have to be careful that the
- * THP doesn't start to split while we are adjusting the
- * refcounts.
- *
- * We are sure this doesn't happen, because mmu_notifier_retry
- * was successful and we are holding the mmu_lock, so if this
- * THP is trying to split, it will be blocked in the mmu
- * notifier before touching any of the pages, specifically
- * before being able to call __split_huge_page_refcount().
- *
- * We can therefore safely transfer the refcount from PG_tail
- * to PG_head and switch the pfn from a tail page to the head
- * page accordingly.
- */
- mask = PTRS_PER_PMD - 1;
- VM_BUG_ON((gfn & mask) != (pfn & mask));
- if (pfn & mask) {
- *ipap &= PMD_MASK;
- kvm_release_pfn_clean(pfn);
- pfn &= ~mask;
- kvm_get_pfn(pfn);
- *pfnp = pfn;
- }
-
- return true;
- }
-
- return false;
-}
-
-static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
-{
- if (kvm_vcpu_trap_is_iabt(vcpu))
- return false;
-
- return kvm_vcpu_dabt_iswrite(vcpu);
-}
-
-/**
- * stage2_wp_ptes - write protect PMD range
- * @pmd: pointer to pmd entry
- * @addr: range start address
- * @end: range end address
- */
-static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
-{
- pte_t *pte;
-
- pte = pte_offset_kernel(pmd, addr);
- do {
- if (!pte_none(*pte)) {
- if (!kvm_s2pte_readonly(pte))
- kvm_set_s2pte_readonly(pte);
- }
- } while (pte++, addr += PAGE_SIZE, addr != end);
-}
-
-/**
- * stage2_wp_pmds - write protect PUD range
- * @pud: pointer to pud entry
- * @addr: range start address
- * @end: range end address
- */
-static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
-{
- pmd_t *pmd;
- phys_addr_t next;
-
- pmd = stage2_pmd_offset(pud, addr);
-
- do {
- next = stage2_pmd_addr_end(addr, end);
- if (!pmd_none(*pmd)) {
- if (pmd_thp_or_huge(*pmd)) {
- if (!kvm_s2pmd_readonly(pmd))
- kvm_set_s2pmd_readonly(pmd);
- } else {
- stage2_wp_ptes(pmd, addr, next);
- }
- }
- } while (pmd++, addr = next, addr != end);
-}
-
-/**
- * stage2_wp_puds - write protect PGD range
- * @pgd: pointer to pgd entry
- * @addr: range start address
- * @end: range end address
- *
- * Process PUD entries, for a huge PUD we cause a panic.
- */
-static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
-{
- pud_t *pud;
- phys_addr_t next;
-
- pud = stage2_pud_offset(pgd, addr);
- do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
- /* TODO:PUD not supported, revisit later if supported */
- BUG_ON(stage2_pud_huge(*pud));
- stage2_wp_pmds(pud, addr, next);
- }
- } while (pud++, addr = next, addr != end);
-}
-
-/**
- * stage2_wp_range() - write protect stage2 memory region range
- * @kvm: The KVM pointer
- * @addr: Start address of range
- * @end: End address of range
- */
-static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
-{
- pgd_t *pgd;
- phys_addr_t next;
-
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
- do {
- /*
- * Release kvm_mmu_lock periodically if the memory region is
- * large. Otherwise, we may see kernel panics with
- * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
- * CONFIG_LOCKDEP. Additionally, holding the lock too long
- * will also starve other vCPUs.
- */
- if (need_resched() || spin_needbreak(&kvm->mmu_lock))
- cond_resched_lock(&kvm->mmu_lock);
-
- next = stage2_pgd_addr_end(addr, end);
- if (stage2_pgd_present(*pgd))
- stage2_wp_puds(pgd, addr, next);
- } while (pgd++, addr = next, addr != end);
-}
-
-/**
- * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
- * @kvm: The KVM pointer
- * @slot: The memory slot to write protect
- *
- * Called to start logging dirty pages after memory region
- * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
- * all present PMD and PTEs are write protected in the memory region.
- * Afterwards read of dirty page log can be called.
- *
- * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
- * serializing operations for VM memory regions.
- */
-void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
-{
- struct kvm_memslots *slots = kvm_memslots(kvm);
- struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
- phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
- phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
-
- spin_lock(&kvm->mmu_lock);
- stage2_wp_range(kvm, start, end);
- spin_unlock(&kvm->mmu_lock);
- kvm_flush_remote_tlbs(kvm);
-}
-
-/**
- * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
- * @kvm: The KVM pointer
- * @slot: The memory slot associated with mask
- * @gfn_offset: The gfn offset in memory slot
- * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
- * slot to be write protected
- *
- * Walks bits set in mask write protects the associated pte's. Caller must
- * acquire kvm_mmu_lock.
- */
-static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- gfn_t gfn_offset, unsigned long mask)
-{
- phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
- phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
- phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
-
- stage2_wp_range(kvm, start, end);
-}
-
-/*
- * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
- * dirty pages.
- *
- * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
- * enable dirty logging for them.
- */
-void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- gfn_t gfn_offset, unsigned long mask)
-{
- kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
-}
-
-static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
- unsigned long size)
-{
- __coherent_cache_guest_page(vcpu, pfn, size);
-}
-
-static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_memory_slot *memslot, unsigned long hva,
- unsigned long fault_status)
-{
- int ret;
- bool write_fault, writable, hugetlb = false, force_pte = false;
- unsigned long mmu_seq;
- gfn_t gfn = fault_ipa >> PAGE_SHIFT;
- struct kvm *kvm = vcpu->kvm;
- struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
- struct vm_area_struct *vma;
- kvm_pfn_t pfn;
- pgprot_t mem_type = PAGE_S2;
- bool logging_active = memslot_is_logging(memslot);
- unsigned long flags = 0;
-
- write_fault = kvm_is_write_fault(vcpu);
- if (fault_status == FSC_PERM && !write_fault) {
- kvm_err("Unexpected L2 read permission error\n");
- return -EFAULT;
- }
-
- /* Let's check if we will get back a huge page backed by hugetlbfs */
- down_read(&current->mm->mmap_sem);
- vma = find_vma_intersection(current->mm, hva, hva + 1);
- if (unlikely(!vma)) {
- kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
- up_read(&current->mm->mmap_sem);
- return -EFAULT;
- }
-
- if (is_vm_hugetlb_page(vma) && !logging_active) {
- hugetlb = true;
- gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
- } else {
- /*
- * Pages belonging to memslots that don't have the same
- * alignment for userspace and IPA cannot be mapped using
- * block descriptors even if the pages belong to a THP for
- * the process, because the stage-2 block descriptor will
- * cover more than a single THP and we loose atomicity for
- * unmapping, updates, and splits of the THP or other pages
- * in the stage-2 block range.
- */
- if ((memslot->userspace_addr & ~PMD_MASK) !=
- ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
- force_pte = true;
- }
- up_read(&current->mm->mmap_sem);
-
- /* We need minimum second+third level pages */
- ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
- if (ret)
- return ret;
-
- mmu_seq = vcpu->kvm->mmu_notifier_seq;
- /*
- * Ensure the read of mmu_notifier_seq happens before we call
- * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
- * the page we just got a reference to gets unmapped before we have a
- * chance to grab the mmu_lock, which ensure that if the page gets
- * unmapped afterwards, the call to kvm_unmap_hva will take it away
- * from us again properly. This smp_rmb() interacts with the smp_wmb()
- * in kvm_mmu_notifier_invalidate_<page|range_end>.
- */
- smp_rmb();
-
- pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
- if (is_error_noslot_pfn(pfn))
- return -EFAULT;
-
- if (kvm_is_device_pfn(pfn)) {
- mem_type = PAGE_S2_DEVICE;
- flags |= KVM_S2PTE_FLAG_IS_IOMAP;
- } else if (logging_active) {
- /*
- * Faults on pages in a memslot with logging enabled
- * should not be mapped with huge pages (it introduces churn
- * and performance degradation), so force a pte mapping.
- */
- force_pte = true;
- flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
-
- /*
- * Only actually map the page as writable if this was a write
- * fault.
- */
- if (!write_fault)
- writable = false;
- }
-
- spin_lock(&kvm->mmu_lock);
- if (mmu_notifier_retry(kvm, mmu_seq))
- goto out_unlock;
-
- if (!hugetlb && !force_pte)
- hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
-
- if (hugetlb) {
- pmd_t new_pmd = pfn_pmd(pfn, mem_type);
- new_pmd = pmd_mkhuge(new_pmd);
- if (writable) {
- new_pmd = kvm_s2pmd_mkwrite(new_pmd);
- kvm_set_pfn_dirty(pfn);
- }
- coherent_cache_guest_page(vcpu, pfn, PMD_SIZE);
- ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
- } else {
- pte_t new_pte = pfn_pte(pfn, mem_type);
-
- if (writable) {
- new_pte = kvm_s2pte_mkwrite(new_pte);
- kvm_set_pfn_dirty(pfn);
- mark_page_dirty(kvm, gfn);
- }
- coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE);
- ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
- }
-
-out_unlock:
- spin_unlock(&kvm->mmu_lock);
- kvm_set_pfn_accessed(pfn);
- kvm_release_pfn_clean(pfn);
- return ret;
-}
-
-/*
- * Resolve the access fault by making the page young again.
- * Note that because the faulting entry is guaranteed not to be
- * cached in the TLB, we don't need to invalidate anything.
- * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
- * so there is no need for atomic (pte|pmd)_mkyoung operations.
- */
-static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
-{
- pmd_t *pmd;
- pte_t *pte;
- kvm_pfn_t pfn;
- bool pfn_valid = false;
-
- trace_kvm_access_fault(fault_ipa);
-
- spin_lock(&vcpu->kvm->mmu_lock);
-
- pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
- if (!pmd || pmd_none(*pmd)) /* Nothing there */
- goto out;
-
- if (pmd_thp_or_huge(*pmd)) { /* THP, HugeTLB */
- *pmd = pmd_mkyoung(*pmd);
- pfn = pmd_pfn(*pmd);
- pfn_valid = true;
- goto out;
- }
-
- pte = pte_offset_kernel(pmd, fault_ipa);
- if (pte_none(*pte)) /* Nothing there either */
- goto out;
-
- *pte = pte_mkyoung(*pte); /* Just a page... */
- pfn = pte_pfn(*pte);
- pfn_valid = true;
-out:
- spin_unlock(&vcpu->kvm->mmu_lock);
- if (pfn_valid)
- kvm_set_pfn_accessed(pfn);
-}
-
-/**
- * kvm_handle_guest_abort - handles all 2nd stage aborts
- * @vcpu: the VCPU pointer
- * @run: the kvm_run structure
- *
- * Any abort that gets to the host is almost guaranteed to be caused by a
- * missing second stage translation table entry, which can mean that either the
- * guest simply needs more memory and we must allocate an appropriate page or it
- * can mean that the guest tried to access I/O memory, which is emulated by user
- * space. The distinction is based on the IPA causing the fault and whether this
- * memory region has been registered as standard RAM by user space.
- */
-int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- unsigned long fault_status;
- phys_addr_t fault_ipa;
- struct kvm_memory_slot *memslot;
- unsigned long hva;
- bool is_iabt, write_fault, writable;
- gfn_t gfn;
- int ret, idx;
-
- is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
- if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) {
- kvm_inject_vabt(vcpu);
- return 1;
- }
-
- fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
-
- trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
- kvm_vcpu_get_hfar(vcpu), fault_ipa);
-
- /* Check the stage-2 fault is trans. fault or write fault */
- fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
- if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
- fault_status != FSC_ACCESS) {
- kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
- kvm_vcpu_trap_get_class(vcpu),
- (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
- (unsigned long)kvm_vcpu_get_hsr(vcpu));
- return -EFAULT;
- }
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
-
- gfn = fault_ipa >> PAGE_SHIFT;
- memslot = gfn_to_memslot(vcpu->kvm, gfn);
- hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
- write_fault = kvm_is_write_fault(vcpu);
- if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
- if (is_iabt) {
- /* Prefetch Abort on I/O address */
- kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
- ret = 1;
- goto out_unlock;
- }
-
- /*
- * Check for a cache maintenance operation. Since we
- * ended-up here, we know it is outside of any memory
- * slot. But we can't find out if that is for a device,
- * or if the guest is just being stupid. The only thing
- * we know for sure is that this range cannot be cached.
- *
- * So let's assume that the guest is just being
- * cautious, and skip the instruction.
- */
- if (kvm_vcpu_dabt_is_cm(vcpu)) {
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- ret = 1;
- goto out_unlock;
- }
-
- /*
- * The IPA is reported as [MAX:12], so we need to
- * complement it with the bottom 12 bits from the
- * faulting VA. This is always 12 bits, irrespective
- * of the page size.
- */
- fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
- ret = io_mem_abort(vcpu, run, fault_ipa);
- goto out_unlock;
- }
-
- /* Userspace should not be able to register out-of-bounds IPAs */
- VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
-
- if (fault_status == FSC_ACCESS) {
- handle_access_fault(vcpu, fault_ipa);
- ret = 1;
- goto out_unlock;
- }
-
- ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
- if (ret == 0)
- ret = 1;
-out_unlock:
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- return ret;
-}
-
-static int handle_hva_to_gpa(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- int (*handler)(struct kvm *kvm,
- gpa_t gpa, u64 size,
- void *data),
- void *data)
-{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- int ret = 0;
-
- slots = kvm_memslots(kvm);
-
- /* we only care about the pages that the guest sees */
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gpa;
-
- hva_start = max(start, memslot->userspace_addr);
- hva_end = min(end, memslot->userspace_addr +
- (memslot->npages << PAGE_SHIFT));
- if (hva_start >= hva_end)
- continue;
-
- gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
- ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
- }
-
- return ret;
-}
-
-static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
- unmap_stage2_range(kvm, gpa, size);
- return 0;
-}
-
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
- unsigned long end = hva + PAGE_SIZE;
-
- if (!kvm->arch.pgd)
- return 0;
-
- trace_kvm_unmap_hva(hva);
- handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
- return 0;
-}
-
-int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
-{
- if (!kvm->arch.pgd)
- return 0;
-
- trace_kvm_unmap_hva_range(start, end);
- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
- return 0;
-}
-
-static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
- pte_t *pte = (pte_t *)data;
-
- WARN_ON(size != PAGE_SIZE);
- /*
- * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
- * flag clear because MMU notifiers will have unmapped a huge PMD before
- * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
- * therefore stage2_set_pte() never needs to clear out a huge PMD
- * through this calling path.
- */
- stage2_set_pte(kvm, NULL, gpa, pte, 0);
- return 0;
-}
-
-
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
-{
- unsigned long end = hva + PAGE_SIZE;
- pte_t stage2_pte;
-
- if (!kvm->arch.pgd)
- return;
-
- trace_kvm_set_spte_hva(hva);
- stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
- handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
-}
-
-static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
- pmd_t *pmd;
- pte_t *pte;
-
- WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
- pmd = stage2_get_pmd(kvm, NULL, gpa);
- if (!pmd || pmd_none(*pmd)) /* Nothing there */
- return 0;
-
- if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */
- return stage2_pmdp_test_and_clear_young(pmd);
-
- pte = pte_offset_kernel(pmd, gpa);
- if (pte_none(*pte))
- return 0;
-
- return stage2_ptep_test_and_clear_young(pte);
-}
-
-static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
- pmd_t *pmd;
- pte_t *pte;
-
- WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
- pmd = stage2_get_pmd(kvm, NULL, gpa);
- if (!pmd || pmd_none(*pmd)) /* Nothing there */
- return 0;
-
- if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */
- return pmd_young(*pmd);
-
- pte = pte_offset_kernel(pmd, gpa);
- if (!pte_none(*pte)) /* Just a page... */
- return pte_young(*pte);
-
- return 0;
-}
-
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
-{
- trace_kvm_age_hva(start, end);
- return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
-}
-
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- trace_kvm_test_age_hva(hva);
- return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
-}
-
-void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
-{
- mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
-}
-
-phys_addr_t kvm_mmu_get_httbr(void)
-{
- if (__kvm_cpu_uses_extended_idmap())
- return virt_to_phys(merged_hyp_pgd);
- else
- return virt_to_phys(hyp_pgd);
-}
-
-phys_addr_t kvm_get_idmap_vector(void)
-{
- return hyp_idmap_vector;
-}
-
-static int kvm_map_idmap_text(pgd_t *pgd)
-{
- int err;
-
- /* Create the idmap in the boot page tables */
- err = __create_hyp_mappings(pgd,
- hyp_idmap_start, hyp_idmap_end,
- __phys_to_pfn(hyp_idmap_start),
- PAGE_HYP_EXEC);
- if (err)
- kvm_err("Failed to idmap %lx-%lx\n",
- hyp_idmap_start, hyp_idmap_end);
-
- return err;
-}
-
-int kvm_mmu_init(void)
-{
- int err;
-
- hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
- hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
- hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
-
- /*
- * We rely on the linker script to ensure at build time that the HYP
- * init code does not cross a page boundary.
- */
- BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
-
- kvm_info("IDMAP page: %lx\n", hyp_idmap_start);
- kvm_info("HYP VA range: %lx:%lx\n",
- kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
-
- if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
- hyp_idmap_start < kern_hyp_va(~0UL) &&
- hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
- /*
- * The idmap page is intersecting with the VA space,
- * it is not safe to continue further.
- */
- kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
- err = -EINVAL;
- goto out;
- }
-
- hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
- if (!hyp_pgd) {
- kvm_err("Hyp mode PGD not allocated\n");
- err = -ENOMEM;
- goto out;
- }
-
- if (__kvm_cpu_uses_extended_idmap()) {
- boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- hyp_pgd_order);
- if (!boot_hyp_pgd) {
- kvm_err("Hyp boot PGD not allocated\n");
- err = -ENOMEM;
- goto out;
- }
-
- err = kvm_map_idmap_text(boot_hyp_pgd);
- if (err)
- goto out;
-
- merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (!merged_hyp_pgd) {
- kvm_err("Failed to allocate extra HYP pgd\n");
- goto out;
- }
- __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
- hyp_idmap_start);
- } else {
- err = kvm_map_idmap_text(hyp_pgd);
- if (err)
- goto out;
- }
-
- return 0;
-out:
- free_hyp_pgds();
- return err;
-}
-
-void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
-{
- /*
- * At this point memslot has been committed and there is an
- * allocated dirty_bitmap[], dirty pages will be be tracked while the
- * memory slot is write protected.
- */
- if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
- kvm_mmu_wp_memory_region(kvm, mem->slot);
-}
-
-int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
-{
- hva_t hva = mem->userspace_addr;
- hva_t reg_end = hva + mem->memory_size;
- bool writable = !(mem->flags & KVM_MEM_READONLY);
- int ret = 0;
-
- if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
- change != KVM_MR_FLAGS_ONLY)
- return 0;
-
- /*
- * Prevent userspace from creating a memory region outside of the IPA
- * space addressable by the KVM guest IPA space.
- */
- if (memslot->base_gfn + memslot->npages >=
- (KVM_PHYS_SIZE >> PAGE_SHIFT))
- return -EFAULT;
-
- down_read(&current->mm->mmap_sem);
- /*
- * A memory region could potentially cover multiple VMAs, and any holes
- * between them, so iterate over all of them to find out if we can map
- * any of them right now.
- *
- * +--------------------------------------------+
- * +---------------+----------------+ +----------------+
- * | : VMA 1 | VMA 2 | | VMA 3 : |
- * +---------------+----------------+ +----------------+
- * | memory region |
- * +--------------------------------------------+
- */
- do {
- struct vm_area_struct *vma = find_vma(current->mm, hva);
- hva_t vm_start, vm_end;
-
- if (!vma || vma->vm_start >= reg_end)
- break;
-
- /*
- * Mapping a read-only VMA is only allowed if the
- * memory region is configured as read-only.
- */
- if (writable && !(vma->vm_flags & VM_WRITE)) {
- ret = -EPERM;
- break;
- }
-
- /*
- * Take the intersection of this VMA with the memory region
- */
- vm_start = max(hva, vma->vm_start);
- vm_end = min(reg_end, vma->vm_end);
-
- if (vma->vm_flags & VM_PFNMAP) {
- gpa_t gpa = mem->guest_phys_addr +
- (vm_start - mem->userspace_addr);
- phys_addr_t pa;
-
- pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
- pa += vm_start - vma->vm_start;
-
- /* IO region dirty page logging not allowed */
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
- vm_end - vm_start,
- writable);
- if (ret)
- break;
- }
- hva = vm_end;
- } while (hva < reg_end);
-
- if (change == KVM_MR_FLAGS_ONLY)
- goto out;
-
- spin_lock(&kvm->mmu_lock);
- if (ret)
- unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
- else
- stage2_flush_memslot(kvm, memslot);
- spin_unlock(&kvm->mmu_lock);
-out:
- up_read(&current->mm->mmap_sem);
- return ret;
-}
-
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
-{
-}
-
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
-{
- return 0;
-}
-
-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
-{
-}
-
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
- kvm_free_stage2_pgd(kvm);
-}
-
-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
- gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
- phys_addr_t size = slot->npages << PAGE_SHIFT;
-
- spin_lock(&kvm->mmu_lock);
- unmap_stage2_range(kvm, gpa, size);
- spin_unlock(&kvm->mmu_lock);
-}
-
-/*
- * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
- *
- * Main problems:
- * - S/W ops are local to a CPU (not broadcast)
- * - We have line migration behind our back (speculation)
- * - System caches don't support S/W at all (damn!)
- *
- * In the face of the above, the best we can do is to try and convert
- * S/W ops to VA ops. Because the guest is not allowed to infer the
- * S/W to PA mapping, it can only use S/W to nuke the whole cache,
- * which is a rather good thing for us.
- *
- * Also, it is only used when turning caches on/off ("The expected
- * usage of the cache maintenance instructions that operate by set/way
- * is associated with the cache maintenance instructions associated
- * with the powerdown and powerup of caches, if this is required by
- * the implementation.").
- *
- * We use the following policy:
- *
- * - If we trap a S/W operation, we enable VM trapping to detect
- * caches being turned on/off, and do a full clean.
- *
- * - We flush the caches on both caches being turned on and off.
- *
- * - Once the caches are enabled, we stop trapping VM ops.
- */
-void kvm_set_way_flush(struct kvm_vcpu *vcpu)
-{
- unsigned long hcr = vcpu_get_hcr(vcpu);
-
- /*
- * If this is the first time we do a S/W operation
- * (i.e. HCR_TVM not set) flush the whole memory, and set the
- * VM trapping.
- *
- * Otherwise, rely on the VM trapping to wait for the MMU +
- * Caches to be turned off. At that point, we'll be able to
- * clean the caches again.
- */
- if (!(hcr & HCR_TVM)) {
- trace_kvm_set_way_flush(*vcpu_pc(vcpu),
- vcpu_has_cache_enabled(vcpu));
- stage2_flush_vm(vcpu->kvm);
- vcpu_set_hcr(vcpu, hcr | HCR_TVM);
- }
-}
-
-void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
-{
- bool now_enabled = vcpu_has_cache_enabled(vcpu);
-
- /*
- * If switching the MMU+caches on, need to invalidate the caches.
- * If switching it off, need to clean the caches.
- * Clean + invalidate does the trick always.
- */
- if (now_enabled != was_enabled)
- stage2_flush_vm(vcpu->kvm);
-
- /* Caches are now on, stop trapping VM ops (until a S/W op) */
- if (now_enabled)
- vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
-
- trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
-}
diff --git a/arch/arm/kvm/perf.c b/arch/arm/kvm/perf.c
deleted file mode 100644
index 1a3849da0b4b..000000000000
--- a/arch/arm/kvm/perf.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Based on the x86 implementation.
- *
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/perf_event.h>
-#include <linux/kvm_host.h>
-
-#include <asm/kvm_emulate.h>
-
-static int kvm_is_in_guest(void)
-{
- return kvm_arm_get_running_vcpu() != NULL;
-}
-
-static int kvm_is_user_mode(void)
-{
- struct kvm_vcpu *vcpu;
-
- vcpu = kvm_arm_get_running_vcpu();
-
- if (vcpu)
- return !vcpu_mode_priv(vcpu);
-
- return 0;
-}
-
-static unsigned long kvm_get_guest_ip(void)
-{
- struct kvm_vcpu *vcpu;
-
- vcpu = kvm_arm_get_running_vcpu();
-
- if (vcpu)
- return *vcpu_pc(vcpu);
-
- return 0;
-}
-
-static struct perf_guest_info_callbacks kvm_guest_cbs = {
- .is_in_guest = kvm_is_in_guest,
- .is_user_mode = kvm_is_user_mode,
- .get_guest_ip = kvm_get_guest_ip,
-};
-
-int kvm_perf_init(void)
-{
- return perf_register_guest_info_callbacks(&kvm_guest_cbs);
-}
-
-int kvm_perf_teardown(void)
-{
- return perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
-}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
deleted file mode 100644
index a08d7a93aebb..000000000000
--- a/arch/arm/kvm/psci.c
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Copyright (C) 2012 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/preempt.h>
-#include <linux/kvm_host.h>
-#include <linux/wait.h>
-
-#include <asm/cputype.h>
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_psci.h>
-#include <asm/kvm_host.h>
-
-#include <uapi/linux/psci.h>
-
-/*
- * This is an implementation of the Power State Coordination Interface
- * as described in ARM document number ARM DEN 0022A.
- */
-
-#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
-
-static unsigned long psci_affinity_mask(unsigned long affinity_level)
-{
- if (affinity_level <= 3)
- return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
-
- return 0;
-}
-
-static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
-{
- /*
- * NOTE: For simplicity, we make VCPU suspend emulation to be
- * same-as WFI (Wait-for-interrupt) emulation.
- *
- * This means for KVM the wakeup events are interrupts and
- * this is consistent with intended use of StateID as described
- * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
- *
- * Further, we also treat power-down request to be same as
- * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
- * specification (ARM DEN 0022A). This means all suspend states
- * for KVM will preserve the register state.
- */
- kvm_vcpu_block(vcpu);
-
- return PSCI_RET_SUCCESS;
-}
-
-static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.power_off = true;
-}
-
-static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
-{
- struct kvm *kvm = source_vcpu->kvm;
- struct kvm_vcpu *vcpu = NULL;
- struct swait_queue_head *wq;
- unsigned long cpu_id;
- unsigned long context_id;
- phys_addr_t target_pc;
-
- cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
- if (vcpu_mode_is_32bit(source_vcpu))
- cpu_id &= ~((u32) 0);
-
- vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
-
- /*
- * Make sure the caller requested a valid CPU and that the CPU is
- * turned off.
- */
- if (!vcpu)
- return PSCI_RET_INVALID_PARAMS;
- if (!vcpu->arch.power_off) {
- if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
- return PSCI_RET_ALREADY_ON;
- else
- return PSCI_RET_INVALID_PARAMS;
- }
-
- target_pc = vcpu_get_reg(source_vcpu, 2);
- context_id = vcpu_get_reg(source_vcpu, 3);
-
- kvm_reset_vcpu(vcpu);
-
- /* Gracefully handle Thumb2 entry point */
- if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
- target_pc &= ~((phys_addr_t) 1);
- vcpu_set_thumb(vcpu);
- }
-
- /* Propagate caller endianness */
- if (kvm_vcpu_is_be(source_vcpu))
- kvm_vcpu_set_be(vcpu);
-
- *vcpu_pc(vcpu) = target_pc;
- /*
- * NOTE: We always update r0 (or x0) because for PSCI v0.1
- * the general puspose registers are undefined upon CPU_ON.
- */
- vcpu_set_reg(vcpu, 0, context_id);
- vcpu->arch.power_off = false;
- smp_mb(); /* Make sure the above is visible */
-
- wq = kvm_arch_vcpu_wq(vcpu);
- swake_up(wq);
-
- return PSCI_RET_SUCCESS;
-}
-
-static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
-{
- int i, matching_cpus = 0;
- unsigned long mpidr;
- unsigned long target_affinity;
- unsigned long target_affinity_mask;
- unsigned long lowest_affinity_level;
- struct kvm *kvm = vcpu->kvm;
- struct kvm_vcpu *tmp;
-
- target_affinity = vcpu_get_reg(vcpu, 1);
- lowest_affinity_level = vcpu_get_reg(vcpu, 2);
-
- /* Determine target affinity mask */
- target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
- if (!target_affinity_mask)
- return PSCI_RET_INVALID_PARAMS;
-
- /* Ignore other bits of target affinity */
- target_affinity &= target_affinity_mask;
-
- /*
- * If one or more VCPU matching target affinity are running
- * then ON else OFF
- */
- kvm_for_each_vcpu(i, tmp, kvm) {
- mpidr = kvm_vcpu_get_mpidr_aff(tmp);
- if ((mpidr & target_affinity_mask) == target_affinity) {
- matching_cpus++;
- if (!tmp->arch.power_off)
- return PSCI_0_2_AFFINITY_LEVEL_ON;
- }
- }
-
- if (!matching_cpus)
- return PSCI_RET_INVALID_PARAMS;
-
- return PSCI_0_2_AFFINITY_LEVEL_OFF;
-}
-
-static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
-{
- int i;
- struct kvm_vcpu *tmp;
-
- /*
- * The KVM ABI specifies that a system event exit may call KVM_RUN
- * again and may perform shutdown/reboot at a later time that when the
- * actual request is made. Since we are implementing PSCI and a
- * caller of PSCI reboot and shutdown expects that the system shuts
- * down or reboots immediately, let's make sure that VCPUs are not run
- * after this call is handled and before the VCPUs have been
- * re-initialized.
- */
- kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
- tmp->arch.power_off = true;
- kvm_vcpu_kick(tmp);
- }
-
- memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
- vcpu->run->system_event.type = type;
- vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
-}
-
-static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
-{
- kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
-}
-
-static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
-{
- kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
-}
-
-int kvm_psci_version(struct kvm_vcpu *vcpu)
-{
- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
- return KVM_ARM_PSCI_0_2;
-
- return KVM_ARM_PSCI_0_1;
-}
-
-static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = vcpu->kvm;
- unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
- unsigned long val;
- int ret = 1;
-
- switch (psci_fn) {
- case PSCI_0_2_FN_PSCI_VERSION:
- /*
- * Bits[31:16] = Major Version = 0
- * Bits[15:0] = Minor Version = 2
- */
- val = 2;
- break;
- case PSCI_0_2_FN_CPU_SUSPEND:
- case PSCI_0_2_FN64_CPU_SUSPEND:
- val = kvm_psci_vcpu_suspend(vcpu);
- break;
- case PSCI_0_2_FN_CPU_OFF:
- kvm_psci_vcpu_off(vcpu);
- val = PSCI_RET_SUCCESS;
- break;
- case PSCI_0_2_FN_CPU_ON:
- case PSCI_0_2_FN64_CPU_ON:
- mutex_lock(&kvm->lock);
- val = kvm_psci_vcpu_on(vcpu);
- mutex_unlock(&kvm->lock);
- break;
- case PSCI_0_2_FN_AFFINITY_INFO:
- case PSCI_0_2_FN64_AFFINITY_INFO:
- val = kvm_psci_vcpu_affinity_info(vcpu);
- break;
- case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
- /*
- * Trusted OS is MP hence does not require migration
- * or
- * Trusted OS is not present
- */
- val = PSCI_0_2_TOS_MP;
- break;
- case PSCI_0_2_FN_SYSTEM_OFF:
- kvm_psci_system_off(vcpu);
- /*
- * We should'nt be going back to guest VCPU after
- * receiving SYSTEM_OFF request.
- *
- * If user space accidently/deliberately resumes
- * guest VCPU after SYSTEM_OFF request then guest
- * VCPU should see internal failure from PSCI return
- * value. To achieve this, we preload r0 (or x0) with
- * PSCI return value INTERNAL_FAILURE.
- */
- val = PSCI_RET_INTERNAL_FAILURE;
- ret = 0;
- break;
- case PSCI_0_2_FN_SYSTEM_RESET:
- kvm_psci_system_reset(vcpu);
- /*
- * Same reason as SYSTEM_OFF for preloading r0 (or x0)
- * with PSCI return value INTERNAL_FAILURE.
- */
- val = PSCI_RET_INTERNAL_FAILURE;
- ret = 0;
- break;
- default:
- val = PSCI_RET_NOT_SUPPORTED;
- break;
- }
-
- vcpu_set_reg(vcpu, 0, val);
- return ret;
-}
-
-static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = vcpu->kvm;
- unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
- unsigned long val;
-
- switch (psci_fn) {
- case KVM_PSCI_FN_CPU_OFF:
- kvm_psci_vcpu_off(vcpu);
- val = PSCI_RET_SUCCESS;
- break;
- case KVM_PSCI_FN_CPU_ON:
- mutex_lock(&kvm->lock);
- val = kvm_psci_vcpu_on(vcpu);
- mutex_unlock(&kvm->lock);
- break;
- default:
- val = PSCI_RET_NOT_SUPPORTED;
- break;
- }
-
- vcpu_set_reg(vcpu, 0, val);
- return 1;
-}
-
-/**
- * kvm_psci_call - handle PSCI call if r0 value is in range
- * @vcpu: Pointer to the VCPU struct
- *
- * Handle PSCI calls from guests through traps from HVC instructions.
- * The calling convention is similar to SMC calls to the secure world
- * where the function number is placed in r0.
- *
- * This function returns: > 0 (success), 0 (success but exit to user
- * space), and < 0 (errors)
- *
- * Errors:
- * -EINVAL: Unrecognized PSCI function
- */
-int kvm_psci_call(struct kvm_vcpu *vcpu)
-{
- switch (kvm_psci_version(vcpu)) {
- case KVM_ARM_PSCI_0_2:
- return kvm_psci_0_2_call(vcpu);
- case KVM_ARM_PSCI_0_1:
- return kvm_psci_0_1_call(vcpu);
- default:
- return -EINVAL;
- };
-}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index c25a88598eb0..fc0943776db2 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -6,133 +6,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
-/*
- * Tracepoints for entry/exit to guest
- */
-TRACE_EVENT(kvm_entry,
- TP_PROTO(unsigned long vcpu_pc),
- TP_ARGS(vcpu_pc),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- ),
-
- TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
-);
-
-TRACE_EVENT(kvm_exit,
- TP_PROTO(int idx, unsigned int exit_reason, unsigned long vcpu_pc),
- TP_ARGS(idx, exit_reason, vcpu_pc),
-
- TP_STRUCT__entry(
- __field( int, idx )
- __field( unsigned int, exit_reason )
- __field( unsigned long, vcpu_pc )
- ),
-
- TP_fast_assign(
- __entry->idx = idx;
- __entry->exit_reason = exit_reason;
- __entry->vcpu_pc = vcpu_pc;
- ),
-
- TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
- __print_symbolic(__entry->idx, kvm_arm_exception_type),
- __entry->exit_reason,
- __print_symbolic(__entry->exit_reason, kvm_arm_exception_class),
- __entry->vcpu_pc)
-);
-
-TRACE_EVENT(kvm_guest_fault,
- TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
- unsigned long hxfar,
- unsigned long long ipa),
- TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- __field( unsigned long, hsr )
- __field( unsigned long, hxfar )
- __field( unsigned long long, ipa )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- __entry->hsr = hsr;
- __entry->hxfar = hxfar;
- __entry->ipa = ipa;
- ),
-
- TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
- __entry->ipa, __entry->hsr,
- __entry->hxfar, __entry->vcpu_pc)
-);
-
-TRACE_EVENT(kvm_access_fault,
- TP_PROTO(unsigned long ipa),
- TP_ARGS(ipa),
-
- TP_STRUCT__entry(
- __field( unsigned long, ipa )
- ),
-
- TP_fast_assign(
- __entry->ipa = ipa;
- ),
-
- TP_printk("IPA: %lx", __entry->ipa)
-);
-
-TRACE_EVENT(kvm_irq_line,
- TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
- TP_ARGS(type, vcpu_idx, irq_num, level),
-
- TP_STRUCT__entry(
- __field( unsigned int, type )
- __field( int, vcpu_idx )
- __field( int, irq_num )
- __field( int, level )
- ),
-
- TP_fast_assign(
- __entry->type = type;
- __entry->vcpu_idx = vcpu_idx;
- __entry->irq_num = irq_num;
- __entry->level = level;
- ),
-
- TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d",
- (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" :
- (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" :
- (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN",
- __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
-);
-
-TRACE_EVENT(kvm_mmio_emulate,
- TP_PROTO(unsigned long vcpu_pc, unsigned long instr,
- unsigned long cpsr),
- TP_ARGS(vcpu_pc, instr, cpsr),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- __field( unsigned long, instr )
- __field( unsigned long, cpsr )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- __entry->instr = instr;
- __entry->cpsr = cpsr;
- ),
-
- TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
- __entry->vcpu_pc, __entry->instr, __entry->cpsr)
-);
-
/* Architecturally implementation defined CP15 register access */
TRACE_EVENT(kvm_emulate_cp15_imp,
TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
@@ -181,87 +54,6 @@ TRACE_EVENT(kvm_wfx,
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
);
-TRACE_EVENT(kvm_unmap_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
-);
-
-TRACE_EVENT(kvm_unmap_hva_range,
- TP_PROTO(unsigned long start, unsigned long end),
- TP_ARGS(start, end),
-
- TP_STRUCT__entry(
- __field( unsigned long, start )
- __field( unsigned long, end )
- ),
-
- TP_fast_assign(
- __entry->start = start;
- __entry->end = end;
- ),
-
- TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
- __entry->start, __entry->end)
-);
-
-TRACE_EVENT(kvm_set_spte_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
-);
-
-TRACE_EVENT(kvm_age_hva,
- TP_PROTO(unsigned long start, unsigned long end),
- TP_ARGS(start, end),
-
- TP_STRUCT__entry(
- __field( unsigned long, start )
- __field( unsigned long, end )
- ),
-
- TP_fast_assign(
- __entry->start = start;
- __entry->end = end;
- ),
-
- TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
- __entry->start, __entry->end)
-);
-
-TRACE_EVENT(kvm_test_age_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
-);
-
TRACE_EVENT(kvm_hvc,
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
TP_ARGS(vcpu_pc, r0, imm),
@@ -282,45 +74,6 @@ TRACE_EVENT(kvm_hvc,
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
-TRACE_EVENT(kvm_set_way_flush,
- TP_PROTO(unsigned long vcpu_pc, bool cache),
- TP_ARGS(vcpu_pc, cache),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- __field( bool, cache )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- __entry->cache = cache;
- ),
-
- TP_printk("S/W flush at 0x%016lx (cache %s)",
- __entry->vcpu_pc, __entry->cache ? "on" : "off")
-);
-
-TRACE_EVENT(kvm_toggle_cache,
- TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
- TP_ARGS(vcpu_pc, was, now),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- __field( bool, was )
- __field( bool, now )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- __entry->was = was;
- __entry->now = now;
- ),
-
- TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
- __entry->vcpu_pc, __entry->was ? "on" : "off",
- __entry->now ? "on" : "off")
-);
-
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/arm/mach-omap2/clkt2xxx_dpllcore.c b/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
index 59cf310bc1e9..e8d417309f33 100644
--- a/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
+++ b/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
@@ -138,7 +138,8 @@ int omap2_reprogram_dpllcore(struct clk_hw *hw, unsigned long rate,
if (!dd)
return -EINVAL;
- tmpset.cm_clksel1_pll = readl_relaxed(dd->mult_div1_reg);
+ tmpset.cm_clksel1_pll =
+ omap_clk_ll_ops.clk_readl(&dd->mult_div1_reg);
tmpset.cm_clksel1_pll &= ~(dd->mult_mask |
dd->div1_mask);
div = ((curr_prcm_set->xtal_speed / 1000000) - 1);
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 1270afdcacdf..42881f21cede 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -54,9 +54,10 @@ u16 cpu_mask;
#define OMAP3PLUS_DPLL_FINT_MIN 32000
#define OMAP3PLUS_DPLL_FINT_MAX 52000000
-static struct ti_clk_ll_ops omap_clk_ll_ops = {
+struct ti_clk_ll_ops omap_clk_ll_ops = {
.clkdm_clk_enable = clkdm_clk_enable,
.clkdm_clk_disable = clkdm_clk_disable,
+ .clkdm_lookup = clkdm_lookup,
.cm_wait_module_ready = omap_cm_wait_module_ready,
.cm_split_idlest_reg = cm_split_idlest_reg,
};
@@ -78,38 +79,6 @@ int __init omap2_clk_setup_ll_ops(void)
* OMAP2+ specific clock functions
*/
-/* Public functions */
-
-/**
- * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
- * @clk: OMAP clock struct ptr to use
- *
- * Convert a clockdomain name stored in a struct clk 'clk' into a
- * clockdomain pointer, and save it into the struct clk. Intended to be
- * called during clk_register(). No return value.
- */
-void omap2_init_clk_clkdm(struct clk_hw *hw)
-{
- struct clk_hw_omap *clk = to_clk_hw_omap(hw);
- struct clockdomain *clkdm;
- const char *clk_name;
-
- if (!clk->clkdm_name)
- return;
-
- clk_name = __clk_get_name(hw->clk);
-
- clkdm = clkdm_lookup(clk->clkdm_name);
- if (clkdm) {
- pr_debug("clock: associated clk %s to clkdm %s\n",
- clk_name, clk->clkdm_name);
- clk->clkdm = clkdm;
- } else {
- pr_debug("clock: could not associate clk %s to clkdm %s\n",
- clk_name, clk->clkdm_name);
- }
-}
-
/**
* ti_clk_init_features - init clock features struct for the SoC
*
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 4e66295dca25..cf45550197e6 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -64,6 +64,8 @@
#define OMAP4XXX_EN_DPLL_FRBYPASS 0x6
#define OMAP4XXX_EN_DPLL_LOCKED 0x7
+extern struct ti_clk_ll_ops omap_clk_ll_ops;
+
extern u16 cpu_mask;
extern const struct clkops clkops_omap2_dflt_wait;
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h
index 1fe3e6b833d2..de75cbcdc9d1 100644
--- a/arch/arm/mach-omap2/cm.h
+++ b/arch/arm/mach-omap2/cm.h
@@ -23,6 +23,7 @@
#define MAX_MODULE_READY_TIME 2000
# ifndef __ASSEMBLER__
+#include <linux/clk/ti.h>
extern void __iomem *cm_base;
extern void __iomem *cm2_base;
extern void omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2);
@@ -50,7 +51,7 @@ extern void omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2);
* @module_disable: ptr to the SoC CM-specific module_disable impl
*/
struct cm_ll_data {
- int (*split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst,
+ int (*split_idlest_reg)(struct clk_omap_reg *idlest_reg, s16 *prcm_inst,
u8 *idlest_reg_id);
int (*wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg,
u8 idlest_shift);
@@ -60,7 +61,7 @@ struct cm_ll_data {
void (*module_disable)(u8 part, u16 inst, u16 clkctrl_offs);
};
-extern int cm_split_idlest_reg(void __iomem *idlest_reg, s16 *prcm_inst,
+extern int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst,
u8 *idlest_reg_id);
int omap_cm_wait_module_ready(u8 part, s16 prcm_mod, u16 idlest_reg,
u8 idlest_shift);
diff --git a/arch/arm/mach-omap2/cm2xxx.c b/arch/arm/mach-omap2/cm2xxx.c
index 3e5fd3587eb1..cd90b4c6a06b 100644
--- a/arch/arm/mach-omap2/cm2xxx.c
+++ b/arch/arm/mach-omap2/cm2xxx.c
@@ -204,7 +204,7 @@ void omap2xxx_cm_apll96_disable(void)
* XXX This function is only needed until absolute register addresses are
* removed from the OMAP struct clk records.
*/
-static int omap2xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
+static int omap2xxx_cm_split_idlest_reg(struct clk_omap_reg *idlest_reg,
s16 *prcm_inst,
u8 *idlest_reg_id)
{
@@ -212,10 +212,7 @@ static int omap2xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
u8 idlest_offs;
int i;
- if (idlest_reg < cm_base || idlest_reg > (cm_base + 0x0fff))
- return -EINVAL;
-
- idlest_offs = (unsigned long)idlest_reg & 0xff;
+ idlest_offs = idlest_reg->offset & 0xff;
for (i = 0; i < ARRAY_SIZE(omap2xxx_cm_idlest_offs); i++) {
if (idlest_offs == omap2xxx_cm_idlest_offs[i]) {
*idlest_reg_id = i + 1;
@@ -226,7 +223,7 @@ static int omap2xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
if (i == ARRAY_SIZE(omap2xxx_cm_idlest_offs))
return -EINVAL;
- offs = idlest_reg - cm_base;
+ offs = idlest_reg->offset;
offs &= 0xff00;
*prcm_inst = offs;
diff --git a/arch/arm/mach-omap2/cm3xxx.c b/arch/arm/mach-omap2/cm3xxx.c
index d91ae8206d1e..55b046a719dc 100644
--- a/arch/arm/mach-omap2/cm3xxx.c
+++ b/arch/arm/mach-omap2/cm3xxx.c
@@ -118,7 +118,7 @@ static int omap3xxx_cm_wait_module_ready(u8 part, s16 prcm_mod, u16 idlest_id,
* XXX This function is only needed until absolute register addresses are
* removed from the OMAP struct clk records.
*/
-static int omap3xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
+static int omap3xxx_cm_split_idlest_reg(struct clk_omap_reg *idlest_reg,
s16 *prcm_inst,
u8 *idlest_reg_id)
{
@@ -126,11 +126,7 @@ static int omap3xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
u8 idlest_offs;
int i;
- if (idlest_reg < (cm_base + OMAP3430_IVA2_MOD) ||
- idlest_reg > (cm_base + 0x1ffff))
- return -EINVAL;
-
- idlest_offs = (unsigned long)idlest_reg & 0xff;
+ idlest_offs = idlest_reg->offset & 0xff;
for (i = 0; i < ARRAY_SIZE(omap3xxx_cm_idlest_offs); i++) {
if (idlest_offs == omap3xxx_cm_idlest_offs[i]) {
*idlest_reg_id = i + 1;
@@ -141,7 +137,7 @@ static int omap3xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
if (i == ARRAY_SIZE(omap3xxx_cm_idlest_offs))
return -EINVAL;
- offs = idlest_reg - cm_base;
+ offs = idlest_reg->offset;
offs &= 0xff00;
*prcm_inst = offs;
diff --git a/arch/arm/mach-omap2/cm_common.c b/arch/arm/mach-omap2/cm_common.c
index 23e8bcec34e3..bbe41f4c9dc8 100644
--- a/arch/arm/mach-omap2/cm_common.c
+++ b/arch/arm/mach-omap2/cm_common.c
@@ -65,7 +65,7 @@ void __init omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2)
* or 0 upon success. XXX This function is only needed until absolute
* register addresses are removed from the OMAP struct clk records.
*/
-int cm_split_idlest_reg(void __iomem *idlest_reg, s16 *prcm_inst,
+int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst,
u8 *idlest_reg_id)
{
if (!cm_ll_data->split_idlest_reg) {
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 0268584f1fa0..c742dfd2967b 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2408,6 +2408,15 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent;
+
+ /*
+ * Don't override the dma_ops if they have already been set. Ideally
+ * this should be the only location where dma_ops are set, remove this
+ * check when all other callers of set_dma_ops will have disappeared.
+ */
+ if (dev->dma_ops)
+ return;
+
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
dma_ops = arm_get_iommu_dma_map_ops(coherent);
else
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 03fac123676d..dc269d9143bc 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -10,7 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/amba/pl330.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 7dedf2d8494e..f839ecd919f9 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -102,12 +102,12 @@ libs-y := arch/arm64/lib/ $(libs-y)
core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
# Default target when executing plain make
-KBUILD_IMAGE := Image.gz
+boot := arch/arm64/boot
+KBUILD_IMAGE := $(boot)/Image.gz
KBUILD_DTBS := dtbs
-all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
+all: Image.gz $(KBUILD_DTBS)
-boot := arch/arm64/boot
Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index df411f3e083c..ecd9788cd298 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -62,4 +62,13 @@ alternative_if ARM64_ALT_PAN_NOT_UAO
alternative_else_nop_endif
.endm
+/*
+ * Remove the address tag from a virtual address, if present.
+ */
+ .macro clear_address_tag, dst, addr
+ tst \addr, #(1 << 55)
+ bic \dst, \addr, #(0xff << 56)
+ csel \dst, \dst, \addr, eq
+ .endm
+
#endif
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 7457ce082b5f..99fa69c9c3cf 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -322,7 +322,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
{ \
- register long x0 asm ("w0") = i; \
+ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
@@ -394,7 +394,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
{ \
- register long x0 asm ("w0") = i; \
+ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 4e0497f581a0..0fe7e43b7fbc 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -42,25 +42,35 @@
#define __smp_rmb() dmb(ishld)
#define __smp_wmb() dmb(ishst)
-#define __smp_store_release(p, v) \
+#define __smp_store_release(p, v) \
do { \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(*p)) (v) }; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
asm volatile ("stlrb %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u8 *)__u.__c) \
+ : "memory"); \
break; \
case 2: \
asm volatile ("stlrh %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u16 *)__u.__c) \
+ : "memory"); \
break; \
case 4: \
asm volatile ("stlr %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u32 *)__u.__c) \
+ : "memory"); \
break; \
case 8: \
asm volatile ("stlr %1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u64 *)__u.__c) \
+ : "memory"); \
break; \
} \
} while (0)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 91b26d26af8a..ae852add053d 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -46,7 +46,7 @@ static inline unsigned long __xchg_case_##name(unsigned long x, \
" swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
__nops(3) \
" " #nop_lse) \
- : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
: "r" (x) \
: cl); \
\
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index f5ea0ba70f07..fe39e6841326 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -240,6 +240,12 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
}
+static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
+{
+ u32 esr = kvm_vcpu_get_hsr(vcpu);
+ return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+}
+
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index ba497172610d..7b8a04789cef 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -69,20 +69,21 @@ static inline void set_fs(mm_segment_t fs)
*/
#define __range_ok(addr, size) \
({ \
+ unsigned long __addr = (unsigned long __force)(addr); \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
: "=&r" (flag), "=&r" (roksum) \
- : "1" (addr), "Ir" (size), \
+ : "1" (__addr), "Ir" (size), \
"r" (current_thread_info()->addr_limit) \
: "cc"); \
flag; \
})
/*
- * When dealing with data aborts or instruction traps we may end up with
- * a tagged userland pointer. Clear the tag to get a sane pointer to pass
- * on to access_ok(), for instance.
+ * When dealing with data aborts, watchpoints, or instruction traps we may end
+ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
+ * pass on to access_ok(), for instance.
*/
#define untagged_addr(addr) sign_extend64(addr, 55)
@@ -230,7 +231,7 @@ do { \
(err), ARM64_HAS_UAO); \
break; \
case 8: \
- __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
+ __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
(err), ARM64_HAS_UAO); \
break; \
default: \
@@ -297,7 +298,7 @@ do { \
(err), ARM64_HAS_UAO); \
break; \
case 8: \
- __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
+ __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
(err), ARM64_HAS_UAO); \
break; \
default: \
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index 825b0fe51c2b..13a97aa2285f 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -2,21 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += fcntl.h
-header-y += hwcap.h
-header-y += kvm_para.h
-header-y += perf_regs.h
-header-y += param.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += stat.h
-header-y += statfs.h
-header-y += ucontext.h
-header-y += unistd.h
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 869ee480deed..70eea2ecc663 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -216,13 +216,17 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
+#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
#define VGIC_LEVEL_INFO_LINE_LEVEL 0
-#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
+#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
+#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 657977e77ec8..f0e6d717885b 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -306,7 +306,8 @@ do { \
_ASM_EXTABLE(0b, 4b) \
_ASM_EXTABLE(1b, 4b) \
: "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
- : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \
+ : "r" ((unsigned long)addr), "i" (-EAGAIN), \
+ "i" (-EFAULT), \
"i" (__SWP_LL_SC_LOOPS) \
: "memory"); \
uaccess_disable(); \
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 43512d4d7df2..b738880350f9 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -428,12 +428,13 @@ el1_da:
/*
* Data abort handling
*/
- mrs x0, far_el1
+ mrs x3, far_el1
enable_dbg
// re-enable interrupts if they were enabled in the aborted context
tbnz x23, #7, 1f // PSR_I_BIT
enable_irq
1:
+ clear_address_tag x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
@@ -594,7 +595,7 @@ el0_da:
// enable interrupts before calling the main handler
enable_dbg_and_irq
ct_user_exit
- bic x0, x26, #(0xff << 56)
+ clear_address_tag x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 0296e7924240..749f81779420 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -36,6 +36,7 @@
#include <asm/traps.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
+#include <asm/uaccess.h>
/* Breakpoint currently in use for each BRP. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
@@ -721,6 +722,8 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
u64 wp_low, wp_high;
u32 lens, lene;
+ addr = untagged_addr(addr);
+
lens = __ffs(ctrl->len);
lene = __fls(ctrl->len);
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index c9a2ab446dc6..f035ff6fb223 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -32,11 +32,16 @@
void *module_alloc(unsigned long size)
{
+ gfp_t gfp_mask = GFP_KERNEL;
void *p;
+ /* Silence the initial allocation */
+ if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+ gfp_mask |= __GFP_NOWARN;
+
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_base + MODULES_VSIZE,
- GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+ gfp_mask, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index d4d6ae02cd55..0805b44f986a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -443,7 +443,7 @@ int cpu_enable_cache_maint_trap(void *__unused)
}
#define __user_cache_maint(insn, address, res) \
- if (untagged_addr(address) >= user_addr_max()) { \
+ if (address >= user_addr_max()) { \
res = -EFAULT; \
} else { \
uaccess_ttbr0_enable(); \
@@ -469,7 +469,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
int ret = 0;
- address = pt_regs_read_reg(regs, rt);
+ address = untagged_addr(pt_regs_read_reg(regs, rt));
switch (crm) {
case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index afd51bebb9c5..5d9810086c25 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -7,14 +7,13 @@ CFLAGS_arm.o := -I.
CFLAGS_mmu.o := -I.
KVM=../../../virt/kvm
-ARM=../../../arch/arm/kvm
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp/
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index efbe9e8e7a78..0fe27024a2e1 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1529,8 +1529,8 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
- int Rt = (hsr >> 5) & 0xf;
- int Rt2 = (hsr >> 10) & 0xf;
+ int Rt = kvm_vcpu_sys_get_rt(vcpu);
+ int Rt2 = (hsr >> 10) & 0x1f;
params.is_aarch32 = true;
params.is_32bit = false;
@@ -1586,7 +1586,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
- int Rt = (hsr >> 5) & 0xf;
+ int Rt = kvm_vcpu_sys_get_rt(vcpu);
params.is_aarch32 = true;
params.is_32bit = true;
@@ -1688,7 +1688,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
- int Rt = (esr >> 5) & 0x1f;
+ int Rt = kvm_vcpu_sys_get_rt(vcpu);
int ret;
trace_kvm_handle_sys_reg(esr);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 4dac4afc95a5..3216e098c058 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -28,6 +28,7 @@
#include <linux/dma-contiguous.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
+#include <linux/pci.h>
#include <asm/cacheflush.h>
@@ -879,34 +880,26 @@ static const struct dma_map_ops iommu_dma_ops = {
.mapping_error = iommu_dma_mapping_error,
};
-/*
- * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
- * everything it needs to - the device is only partially created and the
- * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
- * need this delayed attachment dance. Once IOMMU probe ordering is sorted
- * to move the arch_setup_dma_ops() call later, all the notifier bits below
- * become unnecessary, and will go away.
- */
-struct iommu_dma_notifier_data {
- struct list_head list;
- struct device *dev;
- const struct iommu_ops *ops;
- u64 dma_base;
- u64 size;
-};
-static LIST_HEAD(iommu_dma_masters);
-static DEFINE_MUTEX(iommu_dma_notifier_lock);
+static int __init __iommu_dma_init(void)
+{
+ return iommu_dma_init();
+}
+arch_initcall(__iommu_dma_init);
-static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
- u64 dma_base, u64 size)
+static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *ops)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_domain *domain;
+
+ if (!ops)
+ return;
/*
- * If the IOMMU driver has the DMA domain support that we require,
- * then the IOMMU core will have already configured a group for this
- * device, and allocated the default domain for that group.
+ * The IOMMU core code allocates the default DMA domain, which the
+ * underlying IOMMU driver needs to support via the dma-iommu layer.
*/
+ domain = iommu_get_domain_for_dev(dev);
+
if (!domain)
goto out_err;
@@ -917,109 +910,11 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
dev->dma_ops = &iommu_dma_ops;
}
- return true;
+ return;
+
out_err:
- pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+ pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
dev_name(dev));
- return false;
-}
-
-static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
- u64 dma_base, u64 size)
-{
- struct iommu_dma_notifier_data *iommudata;
-
- iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
- if (!iommudata)
- return;
-
- iommudata->dev = dev;
- iommudata->ops = ops;
- iommudata->dma_base = dma_base;
- iommudata->size = size;
-
- mutex_lock(&iommu_dma_notifier_lock);
- list_add(&iommudata->list, &iommu_dma_masters);
- mutex_unlock(&iommu_dma_notifier_lock);
-}
-
-static int __iommu_attach_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct iommu_dma_notifier_data *master, *tmp;
-
- if (action != BUS_NOTIFY_BIND_DRIVER)
- return 0;
-
- mutex_lock(&iommu_dma_notifier_lock);
- list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
- if (data == master->dev && do_iommu_attach(master->dev,
- master->ops, master->dma_base, master->size)) {
- list_del(&master->list);
- kfree(master);
- break;
- }
- }
- mutex_unlock(&iommu_dma_notifier_lock);
- return 0;
-}
-
-static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
-{
- struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
- int ret;
-
- if (!nb)
- return -ENOMEM;
-
- nb->notifier_call = __iommu_attach_notifier;
-
- ret = bus_register_notifier(bus, nb);
- if (ret) {
- pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
- bus->name);
- kfree(nb);
- }
- return ret;
-}
-
-static int __init __iommu_dma_init(void)
-{
- int ret;
-
- ret = iommu_dma_init();
- if (!ret)
- ret = register_iommu_dma_ops_notifier(&platform_bus_type);
- if (!ret)
- ret = register_iommu_dma_ops_notifier(&amba_bustype);
-#ifdef CONFIG_PCI
- if (!ret)
- ret = register_iommu_dma_ops_notifier(&pci_bus_type);
-#endif
- return ret;
-}
-arch_initcall(__iommu_dma_init);
-
-static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *ops)
-{
- struct iommu_group *group;
-
- if (!ops)
- return;
- /*
- * TODO: As a concession to the future, we're ready to handle being
- * called both early and late (i.e. after bus_add_device). Once all
- * the platform bus code is reworked to call us late and the notifier
- * junk above goes away, move the body of do_iommu_attach here.
- */
- group = iommu_group_get(dev);
- if (group) {
- do_iommu_attach(dev, ops, dma_base, size);
- iommu_group_put(group);
- } else {
- queue_iommu_attach(dev, ops, dma_base, size);
- }
}
void arch_teardown_dma_ops(struct device *dev)
diff --git a/arch/blackfin/include/uapi/asm/Kbuild b/arch/blackfin/include/uapi/asm/Kbuild
index 0bd28f77abc3..b15bf6bc0e94 100644
--- a/arch/blackfin/include/uapi/asm/Kbuild
+++ b/arch/blackfin/include/uapi/asm/Kbuild
@@ -1,19 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += bfin_sport.h
-header-y += byteorder.h
-header-y += cachectl.h
-header-y += fcntl.h
-header-y += fixed_code.h
-header-y += ioctls.h
-header-y += kvm_para.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += stat.h
-header-y += swab.h
-header-y += unistd.h
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild
index e9bc2b2b8147..13a97aa2285f 100644
--- a/arch/c6x/include/uapi/asm/Kbuild
+++ b/arch/c6x/include/uapi/asm/Kbuild
@@ -2,11 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
-
-header-y += byteorder.h
-header-y += kvm_para.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += swab.h
-header-y += unistd.h
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 2735eb7671a5..b7cd6b9209a9 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -136,7 +136,6 @@ config ETRAX_NANDFLASH
bool "NAND flash support"
depends on ETRAX_ARCH_V32
select MTD_NAND
- select MTD_NAND_IDS
help
This option enables MTD mapping of NAND flash devices. Needed to use
NAND flash memories. If unsure, say Y.
diff --git a/arch/cris/include/arch-v10/arch/Kbuild b/arch/cris/include/arch-v10/arch/Kbuild
deleted file mode 100644
index 1f0fc7a66f5f..000000000000
--- a/arch/cris/include/arch-v10/arch/Kbuild
+++ /dev/null
@@ -1 +0,0 @@
-# CRISv10 arch
diff --git a/arch/cris/include/arch-v32/arch/Kbuild b/arch/cris/include/arch-v32/arch/Kbuild
deleted file mode 100644
index 2fd65c7e15c9..000000000000
--- a/arch/cris/include/arch-v32/arch/Kbuild
+++ /dev/null
@@ -1 +0,0 @@
-# CRISv32 arch
diff --git a/arch/cris/include/uapi/arch-v10/arch/Kbuild b/arch/cris/include/uapi/arch-v10/arch/Kbuild
deleted file mode 100644
index 9048c87a782b..000000000000
--- a/arch/cris/include/uapi/arch-v10/arch/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
-# UAPI Header export list
-header-y += sv_addr.agh
-header-y += sv_addr_ag.h
-header-y += svinto.h
-header-y += user.h
diff --git a/arch/cris/include/uapi/arch-v32/arch/Kbuild b/arch/cris/include/uapi/arch-v32/arch/Kbuild
deleted file mode 100644
index 59efffd16b61..000000000000
--- a/arch/cris/include/uapi/arch-v32/arch/Kbuild
+++ /dev/null
@@ -1,3 +0,0 @@
-# UAPI Header export list
-header-y += cryptocop.h
-header-y += user.h
diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
index d5564a0ae66a..b15bf6bc0e94 100644
--- a/arch/cris/include/uapi/asm/Kbuild
+++ b/arch/cris/include/uapi/asm/Kbuild
@@ -1,44 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += ../arch-v10/arch/
-header-y += ../arch-v32/arch/
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += elf.h
-header-y += elf_v10.h
-header-y += elf_v32.h
-header-y += errno.h
-header-y += ethernet.h
-header-y += etraxgpio.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += ptrace_v10.h
-header-y += ptrace_v32.h
-header-y += resource.h
-header-y += rs485.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += sync_serial.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/frv/include/uapi/asm/Kbuild b/arch/frv/include/uapi/asm/Kbuild
index 42a2b33461c0..b15bf6bc0e94 100644
--- a/arch/frv/include/uapi/asm/Kbuild
+++ b/arch/frv/include/uapi/asm/Kbuild
@@ -1,35 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += registers.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/frv/kernel/asm-offsets.c b/arch/frv/kernel/asm-offsets.c
index 8414293f213a..20c5b79b55f9 100644
--- a/arch/frv/kernel/asm-offsets.c
+++ b/arch/frv/kernel/asm-offsets.c
@@ -14,21 +14,10 @@
#include <asm/thread_info.h>
#include <asm/gdb-stub.h>
-#define DEF_PTREG(sym, reg) \
- asm volatile("\n->" #sym " %0 offsetof(struct pt_regs, " #reg ")" \
- : : "i" (offsetof(struct pt_regs, reg)))
-
-#define DEF_IREG(sym, reg) \
- asm volatile("\n->" #sym " %0 offsetof(struct user_context, " #reg ")" \
- : : "i" (offsetof(struct user_context, reg)))
-
-#define DEF_FREG(sym, reg) \
- asm volatile("\n->" #sym " %0 offsetof(struct user_context, " #reg ")" \
- : : "i" (offsetof(struct user_context, reg)))
-
-#define DEF_0REG(sym, reg) \
- asm volatile("\n->" #sym " %0 offsetof(struct frv_frame0, " #reg ")" \
- : : "i" (offsetof(struct frv_frame0, reg)))
+#define DEF_PTREG(sym, reg) OFFSET(sym, pt_regs, reg)
+#define DEF_IREG(sym, reg) OFFSET(sym, user_context, reg)
+#define DEF_FREG(sym, reg) OFFSET(sym, user_context, reg)
+#define DEF_0REG(sym, reg) OFFSET(sym, frv_frame0, reg)
void foo(void)
{
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
index fb6101a5d4f1..b15bf6bc0e94 100644
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ b/arch/h8300/include/uapi/asm/Kbuild
@@ -1,30 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += siginfo.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/h8300/include/asm/bitsperlong.h b/arch/h8300/include/uapi/asm/bitsperlong.h
index e140e46729ac..34212608371e 100644
--- a/arch/h8300/include/asm/bitsperlong.h
+++ b/arch/h8300/include/uapi/asm/bitsperlong.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_H8300_BITS_PER_LONG
-#define __ASM_H8300_BITS_PER_LONG
+#ifndef _UAPI__ASM_H8300_BITS_PER_LONG
+#define _UAPI__ASM_H8300_BITS_PER_LONG
#include <asm-generic/bitsperlong.h>
@@ -11,4 +11,4 @@ typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
#endif
-#endif /* __ASM_H8300_BITS_PER_LONG */
+#endif /* _UAPI__ASM_H8300_BITS_PER_LONG */
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index a2036bfda8af..6b45ef79eb8f 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -1,6 +1,3 @@
-
-header-y += ucontext.h
-
generic-y += auxvec.h
generic-y += barrier.h
generic-y += bug.h
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild
index c31706c38631..b15bf6bc0e94 100644
--- a/arch/hexagon/include/uapi/asm/Kbuild
+++ b/arch/hexagon/include/uapi/asm/Kbuild
@@ -1,15 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += kvm_para.h
-header-y += param.h
-header-y += ptrace.h
-header-y += registers.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += signal.h
-header-y += swab.h
-header-y += unistd.h
-header-y += user.h
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
index 891002bbb995..13a97aa2285f 100644
--- a/arch/ia64/include/uapi/asm/Kbuild
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -2,48 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += break.h
-header-y += byteorder.h
-header-y += cmpxchg.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += fpu.h
-header-y += gcc_intrin.h
-header-y += ia64regs.h
-header-y += intel_intrin.h
-header-y += intrinsics.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += perfmon.h
-header-y += perfmon_default_smpl.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += ptrace_offsets.h
-header-y += resource.h
-header-y += rse.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += ucontext.h
-header-y += unistd.h
-header-y += ustack.h
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 3686d6abafde..9edda5466020 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -50,32 +50,10 @@ CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
# The gate DSO image is built using a special linker script.
include $(src)/Makefile.gate
-# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
-define sed-y
- "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
-endef
-quiet_cmd_nr_irqs = GEN $@
-define cmd_nr_irqs
- (set -e; \
- echo "#ifndef __ASM_NR_IRQS_H__"; \
- echo "#define __ASM_NR_IRQS_H__"; \
- echo "/*"; \
- echo " * DO NOT MODIFY."; \
- echo " *"; \
- echo " * This file was generated by Kbuild"; \
- echo " *"; \
- echo " */"; \
- echo ""; \
- sed -ne $(sed-y) $<; \
- echo ""; \
- echo "#endif" ) > $@
-endef
-
# We use internal kbuild rules to avoid the "is up to date" message from make
arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
$(Q)mkdir -p $(dir $@)
$(call if_changed_dep,cc_s_c)
-include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
- $(Q)mkdir -p $(dir $@)
- $(call cmd,nr_irqs)
+include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s FORCE
+ $(call filechk,offsets,__ASM_NR_IRQS_H__)
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
index ceeffc509764..a32903ada016 100644
--- a/arch/ia64/kernel/Makefile.gate
+++ b/arch/ia64/kernel/Makefile.gate
@@ -6,7 +6,7 @@ extra-y += gate.so gate-syms.o gate.lds gate.o
CPPFLAGS_gate.lds := -P -C -U$(ARCH)
-quiet_cmd_gate = GATE $@
+quiet_cmd_gate = GATE $@
cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
diff --git a/arch/m32r/include/uapi/asm/Kbuild b/arch/m32r/include/uapi/asm/Kbuild
index 43937a61d6cf..b15bf6bc0e94 100644
--- a/arch/m32r/include/uapi/asm/Kbuild
+++ b/arch/m32r/include/uapi/asm/Kbuild
@@ -1,33 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index 6a2d257bdfb2..64368077235a 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -9,27 +9,3 @@ generic-y += socket.h
generic-y += sockios.h
generic-y += termbits.h
generic-y += termios.h
-
-header-y += a.out.h
-header-y += bootinfo.h
-header-y += bootinfo-amiga.h
-header-y += bootinfo-apollo.h
-header-y += bootinfo-atari.h
-header-y += bootinfo-hp300.h
-header-y += bootinfo-mac.h
-header-y += bootinfo-q40.h
-header-y += bootinfo-vme.h
-header-y += byteorder.h
-header-y += cachectl.h
-header-y += fcntl.h
-header-y += ioctls.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += signal.h
-header-y += stat.h
-header-y += swab.h
-header-y += unistd.h
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 5ebc2850690e..9c8fbf8fb5aa 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -24,24 +24,32 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
-#define __kernel_ok (uaccess_kernel())
-/*
- * Explicitly allow NULL pointers here. Parts of the kernel such
- * as readv/writev use access_ok to validate pointers, but want
- * to allow NULL pointers for various reasons. NULL pointers are
- * safe to allow through because the first page is not mappable on
- * Meta.
- *
- * We also wish to avoid letting user code access the system area
- * and the kernel half of the address space.
- */
-#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
- ((addr) > PAGE_OFFSET && \
- (addr) < LINCORE_BASE))
-
static inline int __access_ok(unsigned long addr, unsigned long size)
{
- return __kernel_ok || !__user_bad(addr, size);
+ /*
+ * Allow access to the user mapped memory area, but not the system area
+ * before it. The check extends to the top of the address space when
+ * kernel access is allowed (there's no real reason to user copy to the
+ * system area in any case).
+ */
+ if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
+ size <= get_fs().seg - addr))
+ return true;
+ /*
+ * Explicitly allow NULL pointers here. Parts of the kernel such
+ * as readv/writev use access_ok to validate pointers, but want
+ * to allow NULL pointers for various reasons. NULL pointers are
+ * safe to allow through because the first page is not mappable on
+ * Meta.
+ */
+ if (!addr)
+ return true;
+ /* Allow access to core code memory area... */
+ if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
+ size <= LINCORE_CODE_LIMIT + 1 - addr)
+ return true;
+ /* ... but no other areas. */
+ return false;
}
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \
@@ -113,7 +121,8 @@ extern long __get_user_bad(void);
#define __get_user_nocheck(x, ptr, size) \
({ \
- long __gu_err, __gu_val; \
+ long __gu_err; \
+ long long __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -121,7 +130,8 @@ extern long __get_user_bad(void);
#define __get_user_check(x, ptr, size) \
({ \
- long __gu_err = -EFAULT, __gu_val = 0; \
+ long __gu_err = -EFAULT; \
+ long long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ, __gu_addr, size)) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
@@ -132,6 +142,7 @@ extern long __get_user_bad(void);
extern unsigned char __get_user_asm_b(const void __user *addr, long *err);
extern unsigned short __get_user_asm_w(const void __user *addr, long *err);
extern unsigned int __get_user_asm_d(const void __user *addr, long *err);
+extern unsigned long long __get_user_asm_l(const void __user *addr, long *err);
#define __get_user_size(x, ptr, size, retval) \
do { \
@@ -143,6 +154,8 @@ do { \
x = __get_user_asm_w(ptr, &retval); break; \
case 4: \
x = __get_user_asm_d(ptr, &retval); break; \
+ case 8: \
+ x = __get_user_asm_l(ptr, &retval); break; \
default: \
(x) = __get_user_bad(); \
} \
@@ -161,8 +174,13 @@ do { \
extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
long count);
-#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count)
-
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ if (!access_ok(VERIFY_READ, src, 1))
+ return -EFAULT;
+ return __strncpy_from_user(dst, src, count);
+}
/*
* Return the size of a string (including the ending 0)
*
diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild
index ab78be2b6eb0..b29731ebd7a9 100644
--- a/arch/metag/include/uapi/asm/Kbuild
+++ b/arch/metag/include/uapi/asm/Kbuild
@@ -1,14 +1,6 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += byteorder.h
-header-y += ech.h
-header-y += ptrace.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += swab.h
-header-y += unistd.h
-
generic-y += mman.h
generic-y += resource.h
generic-y += setup.h
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index e8a4ea83cabb..c941abdb8f85 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -246,65 +246,47 @@
#define __asm_copy_user_64bit_rapf_loop( \
to, from, ret, n, id, FIXUP) \
asm volatile ( \
- ".balign 8\n" \
- "MOV RAPF, %1\n" \
- "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
- "MOV D0Ar6, #0\n" \
- "LSR D1Ar5, %3, #6\n" \
- "SUB TXRPT, D1Ar5, #2\n" \
- "MOV RAPF, %1\n" \
+ ".balign 8\n" \
+ " MOV RAPF, %1\n" \
+ " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
+ " MOV D0Ar6, #0\n" \
+ " LSR D1Ar5, %3, #6\n" \
+ " SUB TXRPT, D1Ar5, #2\n" \
+ " MOV RAPF, %1\n" \
"$Lloop"id":\n" \
- "ADD RAPF, %1, #64\n" \
- "21:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "22:\n" \
- "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "23:\n" \
- "SUB %3, %3, #32\n" \
- "24:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "25:\n" \
- "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "26:\n" \
- "SUB %3, %3, #32\n" \
- "DCACHE [%1+#-64], D0Ar6\n" \
- "BR $Lloop"id"\n" \
+ " ADD RAPF, %1, #64\n" \
+ "21: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "22: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "23: SUB %3, %3, #32\n" \
+ "24: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "25: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26: SUB %3, %3, #32\n" \
+ " DCACHE [%1+#-64], D0Ar6\n" \
+ " BR $Lloop"id"\n" \
\
- "MOV RAPF, %1\n" \
- "27:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "28:\n" \
- "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "29:\n" \
- "SUB %3, %3, #32\n" \
- "30:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "31:\n" \
- "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "32:\n" \
- "SUB %0, %0, #8\n" \
- "33:\n" \
- "SETL [%0++], D0.7, D1.7\n" \
- "SUB %3, %3, #32\n" \
- "1:" \
- "DCACHE [%1+#-64], D0Ar6\n" \
- "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
- "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
- "GETL D0.5, D1.5, [A0StP+#-24]\n" \
- "GETL D0.6, D1.6, [A0StP+#-16]\n" \
- "GETL D0.7, D1.7, [A0StP+#-8]\n" \
- "SUB A0StP, A0StP, #40\n" \
+ " MOV RAPF, %1\n" \
+ "27: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "28: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "29: SUB %3, %3, #32\n" \
+ "30: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "31: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32: SETL [%0+#-8], D0.7, D1.7\n" \
+ " SUB %3, %3, #32\n" \
+ "1: DCACHE [%1+#-64], D0Ar6\n" \
+ " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
+ " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
+ " GETL D0.5, D1.5, [A0StP+#-24]\n" \
+ " GETL D0.6, D1.6, [A0StP+#-16]\n" \
+ " GETL D0.7, D1.7, [A0StP+#-8]\n" \
+ " SUB A0StP, A0StP, #40\n" \
" .section .fixup,\"ax\"\n" \
- "4:\n" \
- " ADD %0, %0, #8\n" \
- "3:\n" \
- " MOV D0Ar2, TXSTATUS\n" \
+ "3: MOV D0Ar2, TXSTATUS\n" \
" MOV D1Ar1, TXSTATUS\n" \
" AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
" MOV TXSTATUS, D1Ar1\n" \
FIXUP \
- " MOVT D0Ar2,#HI(1b)\n" \
- " JUMP D0Ar2,#LO(1b)\n" \
+ " MOVT D0Ar2, #HI(1b)\n" \
+ " JUMP D0Ar2, #LO(1b)\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .long 21b,3b\n" \
@@ -319,7 +301,6 @@
" .long 30b,3b\n" \
" .long 31b,3b\n" \
" .long 32b,3b\n" \
- " .long 33b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
@@ -397,89 +378,59 @@
#define __asm_copy_user_32bit_rapf_loop( \
to, from, ret, n, id, FIXUP) \
asm volatile ( \
- ".balign 8\n" \
- "MOV RAPF, %1\n" \
- "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
- "MOV D0Ar6, #0\n" \
- "LSR D1Ar5, %3, #6\n" \
- "SUB TXRPT, D1Ar5, #2\n" \
- "MOV RAPF, %1\n" \
- "$Lloop"id":\n" \
- "ADD RAPF, %1, #64\n" \
- "21:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "22:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "23:\n" \
- "SUB %3, %3, #16\n" \
- "24:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "25:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "26:\n" \
- "SUB %3, %3, #16\n" \
- "27:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "28:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "29:\n" \
- "SUB %3, %3, #16\n" \
- "30:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "31:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "32:\n" \
- "SUB %3, %3, #16\n" \
- "DCACHE [%1+#-64], D0Ar6\n" \
- "BR $Lloop"id"\n" \
+ ".balign 8\n" \
+ " MOV RAPF, %1\n" \
+ " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
+ " MOV D0Ar6, #0\n" \
+ " LSR D1Ar5, %3, #6\n" \
+ " SUB TXRPT, D1Ar5, #2\n" \
+ " MOV RAPF, %1\n" \
+ "$Lloop"id":\n" \
+ " ADD RAPF, %1, #64\n" \
+ "21: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "22: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "23: SUB %3, %3, #16\n" \
+ "24: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "25: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26: SUB %3, %3, #16\n" \
+ "27: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "28: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "29: SUB %3, %3, #16\n" \
+ "30: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "31: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32: SUB %3, %3, #16\n" \
+ " DCACHE [%1+#-64], D0Ar6\n" \
+ " BR $Lloop"id"\n" \
\
- "MOV RAPF, %1\n" \
- "33:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "34:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "35:\n" \
- "SUB %3, %3, #16\n" \
- "36:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "37:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "38:\n" \
- "SUB %3, %3, #16\n" \
- "39:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "40:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "41:\n" \
- "SUB %3, %3, #16\n" \
- "42:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "43:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "44:\n" \
- "SUB %0, %0, #4\n" \
- "45:\n" \
- "SETD [%0++], D0.7\n" \
- "SUB %3, %3, #16\n" \
- "1:" \
- "DCACHE [%1+#-64], D0Ar6\n" \
- "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
- "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
- "GETL D0.5, D1.5, [A0StP+#-24]\n" \
- "GETL D0.6, D1.6, [A0StP+#-16]\n" \
- "GETL D0.7, D1.7, [A0StP+#-8]\n" \
- "SUB A0StP, A0StP, #40\n" \
+ " MOV RAPF, %1\n" \
+ "33: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "34: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "35: SUB %3, %3, #16\n" \
+ "36: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "37: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "38: SUB %3, %3, #16\n" \
+ "39: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "40: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "41: SUB %3, %3, #16\n" \
+ "42: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "43: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "44: SETD [%0+#-4], D0.7\n" \
+ " SUB %3, %3, #16\n" \
+ "1: DCACHE [%1+#-64], D0Ar6\n" \
+ " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
+ " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
+ " GETL D0.5, D1.5, [A0StP+#-24]\n" \
+ " GETL D0.6, D1.6, [A0StP+#-16]\n" \
+ " GETL D0.7, D1.7, [A0StP+#-8]\n" \
+ " SUB A0StP, A0StP, #40\n" \
" .section .fixup,\"ax\"\n" \
- "4:\n" \
- " ADD %0, %0, #4\n" \
- "3:\n" \
- " MOV D0Ar2, TXSTATUS\n" \
+ "3: MOV D0Ar2, TXSTATUS\n" \
" MOV D1Ar1, TXSTATUS\n" \
" AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
" MOV TXSTATUS, D1Ar1\n" \
FIXUP \
- " MOVT D0Ar2,#HI(1b)\n" \
- " JUMP D0Ar2,#LO(1b)\n" \
+ " MOVT D0Ar2, #HI(1b)\n" \
+ " JUMP D0Ar2, #LO(1b)\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .long 21b,3b\n" \
@@ -506,7 +457,6 @@
" .long 42b,3b\n" \
" .long 43b,3b\n" \
" .long 44b,3b\n" \
- " .long 45b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
@@ -1094,6 +1044,30 @@ unsigned int __get_user_asm_d(const void __user *addr, long *err)
}
EXPORT_SYMBOL(__get_user_asm_d);
+unsigned long long __get_user_asm_l(const void __user *addr, long *err)
+{
+ register unsigned long long x asm ("D0Re0") = 0;
+ asm volatile (
+ " GETL %0,%t0,[%2]\n"
+ "1:\n"
+ " GETL %0,%t0,[%2]\n"
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ "3: MOV D0FrT,%3\n"
+ " SETD [%1],D0FrT\n"
+ " MOVT D0FrT,#HI(2b)\n"
+ " JUMP D0FrT,#LO(2b)\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ " .long 1b,3b\n"
+ " .previous\n"
+ : "=r" (x)
+ : "r" (err), "r" (addr), "P" (-EFAULT)
+ : "D0FrT");
+ return x;
+}
+EXPORT_SYMBOL(__get_user_asm_l);
+
long __put_user_asm_b(unsigned int x, void __user *addr)
{
register unsigned int err asm ("D0Re0") = 0;
diff --git a/arch/metag/mm/mmu-meta1.c b/arch/metag/mm/mmu-meta1.c
index 91f4255bcb5c..62ebab90924d 100644
--- a/arch/metag/mm/mmu-meta1.c
+++ b/arch/metag/mm/mmu-meta1.c
@@ -152,6 +152,5 @@ void __init mmu_init(unsigned long mem_end)
p_swapper_pg_dir++;
addr += PGDIR_SIZE;
- entry++;
}
}
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index 1aac99f87df1..2178c78c7c1a 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -2,35 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += types.h
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += elf.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += unistd.h
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
index 5c3f688a5232..5cef58651db0 100644
--- a/arch/mips/Kbuild
+++ b/arch/mips/Kbuild
@@ -1,7 +1,9 @@
# Fail on warnings - also for files referenced in subdirs
# -Werror can be disabled for specific files using:
# CFLAGS_<file.o> := -Wno-error
+ifeq ($(W),)
subdir-ccflags-y := -Werror
+endif
# platform specific definitions
include arch/mips/Kbuild.platforms
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4e9ebf65d071..2828ecde133d 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -46,6 +46,7 @@ config MIPS
select ARCH_DISCARD_MEMBLOCK
select GENERIC_SMP_IDLE_THREAD
select BUILDTIME_EXTABLE_SORT
+ select GENERIC_CPU_AUTOPROBE
select GENERIC_CLOCKEVENTS
select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
select GENERIC_CMOS_UPDATE
@@ -68,6 +69,7 @@ config MIPS
select HANDLE_DOMAIN_IRQ
select HAVE_EXIT_THREAD
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_COPY_THREAD_TLS
menu "Machine selection"
@@ -1039,14 +1041,6 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
-config ARCH_HAS_ILOG2_U32
- bool
- default n
-
-config ARCH_HAS_ILOG2_U64
- bool
- default n
-
config GENERIC_HWEIGHT
bool
default y
@@ -1372,6 +1366,7 @@ config CPU_LOONGSON3
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select MIPS_PGD_C0_CONTEXT
+ select MIPS_L1_CACHE_SHIFT_6
select GPIOLIB
help
The Loongson 3 processor implements the MIPS64R2 instruction
@@ -2120,10 +2115,13 @@ config MIPS_VA_BITS_48
bool "48 bits virtual memory"
depends on 64BIT
help
- Support a maximum at least 48 bits of application virtual memory.
- Default is 40 bits or less, depending on the CPU.
- This option result in a small memory overhead for page tables.
- This option is only supported with 16k and 64k page sizes.
+ Support a maximum at least 48 bits of application virtual
+ memory. Default is 40 bits or less, depending on the CPU.
+ For page sizes 16k and above, this option results in a small
+ memory overhead for page tables. For 4k page size, a fourth
+ level of page tables is added which imposes both a memory
+ overhead as well as slower TLB fault handling.
+
If unsure, say N.
choice
@@ -2133,7 +2131,6 @@ choice
config PAGE_SIZE_4KB
bool "4kB"
depends on !CPU_LOONGSON2 && !CPU_LOONGSON3
- depends on !MIPS_VA_BITS_48
help
This option select the standard 4kB Linux page size. On some
R3000-family processors this is the only available page size. Using
@@ -2982,6 +2979,7 @@ config HAVE_LATENCYTOP_SUPPORT
config PGTABLE_LEVELS
int
+ default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
default 3 if 64BIT && !PAGE_SIZE_64KB
default 2
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 7f975b20b20c..42a97c59200f 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -82,7 +82,7 @@ config CMDLINE_OVERRIDE
config SB1XXX_CORELIS
bool "Corelis Debugger"
depends on SIBYTE_SB1xxx_SOC
- select DEBUG_INFO
+ select DEBUG_INFO if !COMPILE_TEST
help
Select compile flags that produce code that can be processed by the
Corelis mksym utility and UDB Emulator.
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index c370426a7322..5c0b56203bae 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -25,15 +25,6 @@ endif # CPU_CAVIUM_OCTEON
if CAVIUM_OCTEON_SOC
-config CAVIUM_OCTEON_2ND_KERNEL
- bool "Build the kernel to be used as a 2nd kernel on the same chip"
- default "n"
- help
- This option configures this kernel to be linked at a different
- address and use the 2nd uart for output. This allows a kernel built
- with this option to be run at the same time as one built without this
- option.
-
config CAVIUM_OCTEON_LOCK_L2
bool "Lock often used kernel code in the L2"
default "y"
diff --git a/arch/mips/cavium-octeon/Platform b/arch/mips/cavium-octeon/Platform
index 8a301cb12d68..45be853700e6 100644
--- a/arch/mips/cavium-octeon/Platform
+++ b/arch/mips/cavium-octeon/Platform
@@ -4,8 +4,4 @@
platform-$(CONFIG_CAVIUM_OCTEON_SOC) += cavium-octeon/
cflags-$(CONFIG_CAVIUM_OCTEON_SOC) += \
-I$(srctree)/arch/mips/include/asm/mach-cavium-octeon
-ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
-load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff84100000
-else
load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff81100000
-endif
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index 89b5273299ab..f091c9b70603 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2010 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -239,6 +239,7 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
else {
uint64_t counter = 0;
int tad;
+
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
return counter;
@@ -249,6 +250,7 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
else {
uint64_t counter = 0;
int tad;
+
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
return counter;
@@ -259,6 +261,7 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
else {
uint64_t counter = 0;
int tad;
+
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
return counter;
@@ -270,6 +273,7 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
else {
uint64_t counter = 0;
int tad;
+
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
return counter;
@@ -301,7 +305,7 @@ static void fault_in(uint64_t addr, int len)
*/
CVMX_DCACHE_INVALIDATE;
while (len > 0) {
- ACCESS_ONCE(*ptr);
+ READ_ONCE(*ptr);
len -= CVMX_CACHE_LINE_SIZE;
ptr += CVMX_CACHE_LINE_SIZE;
}
@@ -375,7 +379,9 @@ int cvmx_l2c_lock_line(uint64_t addr)
if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
+
lckbase.s.lck_base = addr_tmp >> 7;
+
} else {
lckbase.s.lck_base = addr >> 7;
}
@@ -435,6 +441,7 @@ void cvmx_l2c_flush(void)
/* These may look like constants, but they aren't... */
int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
+
for (set = 0; set < n_set; set++) {
for (assoc = 0; assoc < n_assoc; assoc++) {
address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
@@ -519,89 +526,49 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
union __cvmx_l2c_tag {
uint64_t u64;
struct cvmx_l2c_tag_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved:40;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t addr:20; /* Phys mem addr (33..14) */
-#else
- uint64_t addr:20; /* Phys mem addr (33..14) */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t L:1; /* Line locked */
- uint64_t D:1; /* Line dirty */
- uint64_t V:1; /* Line valid */
- uint64_t reserved:40;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved:40,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:20, /* Phys addr (33..14) */
+ ;))))))
} cn50xx;
struct cvmx_l2c_tag_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved:41;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t addr:19; /* Phys mem addr (33..15) */
-#else
- uint64_t addr:19; /* Phys mem addr (33..15) */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t L:1; /* Line locked */
- uint64_t D:1; /* Line dirty */
- uint64_t V:1; /* Line valid */
- uint64_t reserved:41;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved:41,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:19, /* Phys addr (33..15) */
+ ;))))))
} cn30xx;
struct cvmx_l2c_tag_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved:42;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t addr:18; /* Phys mem addr (33..16) */
-#else
- uint64_t addr:18; /* Phys mem addr (33..16) */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t L:1; /* Line locked */
- uint64_t D:1; /* Line dirty */
- uint64_t V:1; /* Line valid */
- uint64_t reserved:42;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved:42,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:18, /* Phys addr (33..16) */
+ ;))))))
} cn31xx;
struct cvmx_l2c_tag_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved:43;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t addr:17; /* Phys mem addr (33..17) */
-#else
- uint64_t addr:17; /* Phys mem addr (33..17) */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t L:1; /* Line locked */
- uint64_t D:1; /* Line dirty */
- uint64_t V:1; /* Line valid */
- uint64_t reserved:43;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved:43,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:17, /* Phys addr (33..17) */
+ ;))))))
} cn38xx;
struct cvmx_l2c_tag_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved:44;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t addr:16; /* Phys mem addr (33..18) */
-#else
- uint64_t addr:16; /* Phys mem addr (33..18) */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t L:1; /* Line locked */
- uint64_t D:1; /* Line dirty */
- uint64_t V:1; /* Line valid */
- uint64_t reserved:44;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved:44,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:16, /* Phys addr (33..18) */
+ ;))))))
} cn58xx;
struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
@@ -629,8 +596,8 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
union __cvmx_l2c_tag tag_val;
uint64_t dbg_addr = CVMX_L2C_DBG;
unsigned long flags;
-
union cvmx_l2c_dbg debug_val;
+
debug_val.u64 = 0;
/*
* For low core count parts, the core number is always small
@@ -683,8 +650,8 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
{
union cvmx_l2c_tag tag;
- tag.u64 = 0;
+ tag.u64 = 0;
if ((int)association >= cvmx_l2c_get_num_assoc()) {
cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
return tag;
@@ -767,10 +734,12 @@ uint32_t cvmx_l2c_address_to_index(uint64_t addr)
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
union cvmx_l2c_ctl l2c_ctl;
+
l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
indxalias = !l2c_ctl.s.disidxalias;
} else {
union cvmx_l2c_cfg l2c_cfg;
+
l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
indxalias = l2c_cfg.s.idxalias;
}
@@ -778,6 +747,7 @@ uint32_t cvmx_l2c_address_to_index(uint64_t addr)
if (indxalias) {
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
+
idx ^= idx / cvmx_l2c_get_num_sets();
idx ^= a_14_12;
} else {
@@ -801,6 +771,7 @@ int cvmx_l2c_get_cache_size_bytes(void)
int cvmx_l2c_get_set_bits(void)
{
int l2_set_bits;
+
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
l2_set_bits = 11; /* 2048 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
@@ -828,6 +799,7 @@ int cvmx_l2c_get_num_sets(void)
int cvmx_l2c_get_num_assoc(void)
{
int l2_assoc;
+
if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
OCTEON_IS_MODEL(OCTEON_CN52XX) ||
OCTEON_IS_MODEL(OCTEON_CN58XX) ||
@@ -869,16 +841,17 @@ int cvmx_l2c_get_num_assoc(void)
else if (mio_fus_dat3.s.l2c_crip == 1)
l2_assoc = 12;
} else {
- union cvmx_l2d_fus3 val;
- val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
+ uint64_t l2d_fus3;
+
+ l2d_fus3 = cvmx_read_csr(CVMX_L2D_FUS3);
/*
* Using shifts here, as bit position names are
* different for each model but they all mean the
* same.
*/
- if ((val.u64 >> 35) & 0x1)
+ if ((l2d_fus3 >> 35) & 0x1)
l2_assoc = l2_assoc >> 2;
- else if ((val.u64 >> 34) & 0x1)
+ else if ((l2d_fus3 >> 34) & 0x1)
l2_assoc = l2_assoc >> 1;
}
return l2_assoc;
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
index d08a2bce653c..341052387b49 100644
--- a/arch/mips/cavium-octeon/executive/octeon-model.c
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2010 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -63,16 +63,15 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
char pass[4];
int clock_mhz;
const char *suffix;
- union cvmx_l2d_fus3 fus3;
int num_cores;
union cvmx_mio_fus_dat2 fus_dat2;
union cvmx_mio_fus_dat3 fus_dat3;
char fuse_model[10];
uint32_t fuse_data = 0;
+ uint64_t l2d_fus3 = 0;
- fus3.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
- fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
+ l2d_fus3 = (cvmx_read_csr(CVMX_L2D_FUS3) >> 34) & 0x3;
fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
num_cores = cvmx_octeon_num_cores();
@@ -192,7 +191,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
/* Now figure out the family, the first two digits */
switch ((chip_id >> 8) & 0xff) {
case 0: /* CN38XX, CN37XX or CN36XX */
- if (fus3.cn38xx.crip_512k) {
+ if (l2d_fus3) {
/*
* For some unknown reason, the 16 core one is
* called 37 instead of 36.
@@ -223,7 +222,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
}
break;
case 1: /* CN31XX or CN3020 */
- if ((chip_id & 0x10) || fus3.cn31xx.crip_128k)
+ if ((chip_id & 0x10) || l2d_fus3)
family = "30";
else
family = "31";
@@ -246,7 +245,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
case 2: /* CN3010 or CN3005 */
family = "30";
/* A chip with half cache is an 05 */
- if (fus3.cn30xx.crip_64k)
+ if (l2d_fus3)
core_model = "05";
/*
* This series of chips didn't follow the standard
@@ -267,7 +266,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
case 3: /* CN58XX */
family = "58";
/* Special case. 4 core, half cache (CP with half cache) */
- if ((num_cores == 4) && fus3.cn58xx.crip_1024k && !strncmp(suffix, "CP", 2))
+ if ((num_cores == 4) && l2d_fus3 && !strncmp(suffix, "CP", 2))
core_model = "29";
/* Pass 1 uses different encodings for pass numbers */
@@ -290,7 +289,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
break;
case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */
if (fus_dat2.cn56xx.raid_en) {
- if (fus3.cn56xx.crip_1024k)
+ if (l2d_fus3)
family = "55";
else
family = "57";
@@ -309,7 +308,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
if (fus_dat3.cn56xx.bar2_en)
suffix = "NSPB2";
}
- if (fus3.cn56xx.crip_1024k)
+ if (l2d_fus3)
family = "54";
else
family = "56";
@@ -319,7 +318,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
family = "50";
break;
case 7: /* CN52XX */
- if (fus3.cn52xx.crip_256k)
+ if (l2d_fus3)
family = "51";
else
family = "52";
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 3375e61daa19..8505db478904 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -3,71 +3,27 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2016 Cavium Networks
+ * Copyright (C) 2004-2017 Cavium, Inc.
* Copyright (C) 2008 Wind River Systems
*/
-#include <linux/init.h>
-#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
-#include <linux/usb/ehci_def.h>
-#include <linux/usb/ehci_pdriver.h>
-#include <linux/usb/ohci_pdriver.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-helper-board.h>
+
+#ifdef CONFIG_USB
+#include <linux/usb/ehci_def.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
#include <asm/octeon/cvmx-uctlx-defs.h>
#define CVMX_UAHCX_EHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000010ull))
#define CVMX_UAHCX_OHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000408ull))
-/* Octeon Random Number Generator. */
-static int __init octeon_rng_device_init(void)
-{
- struct platform_device *pd;
- int ret = 0;
-
- struct resource rng_resources[] = {
- {
- .flags = IORESOURCE_MEM,
- .start = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS),
- .end = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS) + 0xf
- }, {
- .flags = IORESOURCE_MEM,
- .start = cvmx_build_io_address(8, 0),
- .end = cvmx_build_io_address(8, 0) + 0x7
- }
- };
-
- pd = platform_device_alloc("octeon_rng", -1);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = platform_device_add_resources(pd, rng_resources,
- ARRAY_SIZE(rng_resources));
- if (ret)
- goto fail;
-
- ret = platform_device_add(pd);
- if (ret)
- goto fail;
-
- return ret;
-fail:
- platform_device_put(pd);
-
-out:
- return ret;
-}
-device_initcall(octeon_rng_device_init);
-
-#ifdef CONFIG_USB
-
static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
static int octeon2_usb_clock_start_cnt;
@@ -440,8 +396,49 @@ device_initcall(octeon_ohci_device_init);
#endif /* CONFIG_USB */
+/* Octeon Random Number Generator. */
+static int __init octeon_rng_device_init(void)
+{
+ struct platform_device *pd;
+ int ret = 0;
-static struct of_device_id __initdata octeon_ids[] = {
+ struct resource rng_resources[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ .start = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS),
+ .end = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS) + 0xf
+ }, {
+ .flags = IORESOURCE_MEM,
+ .start = cvmx_build_io_address(8, 0),
+ .end = cvmx_build_io_address(8, 0) + 0x7
+ }
+ };
+
+ pd = platform_device_alloc("octeon_rng", -1);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = platform_device_add_resources(pd, rng_resources,
+ ARRAY_SIZE(rng_resources));
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto fail;
+
+ return ret;
+fail:
+ platform_device_put(pd);
+
+out:
+ return ret;
+}
+device_initcall(octeon_rng_device_init);
+
+const struct of_device_id octeon_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "cavium,octeon-6335-uctl", },
{ .compatible = "cavium,octeon-5750-usbn", },
@@ -481,6 +478,7 @@ static void __init octeon_fdt_set_phy(int eth, int phy_addr)
alt_phy_handle = fdt_getprop(initial_boot_params, eth, "cavium,alt-phy-handle", NULL);
if (alt_phy_handle) {
u32 alt_phandle = be32_to_cpup(alt_phy_handle);
+
alt_phy = fdt_node_offset_by_phandle(initial_boot_params, alt_phandle);
} else {
alt_phy = -1;
@@ -579,6 +577,7 @@ static void __init octeon_fdt_rm_ethernet(int node)
if (phy_handle) {
u32 ph = be32_to_cpup(phy_handle);
int p = fdt_node_offset_by_phandle(initial_boot_params, ph);
+
if (p >= 0)
fdt_nop_node(initial_boot_params, p);
}
@@ -728,6 +727,7 @@ int __init octeon_prune_device_tree(void)
for (i = 0; i < 2; i++) {
int mgmt;
+
snprintf(name_buffer, sizeof(name_buffer),
"mix%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
@@ -743,6 +743,7 @@ int __init octeon_prune_device_tree(void)
name_buffer);
} else {
int phy_addr = cvmx_helper_board_get_mii_address(CVMX_HELPER_BOARD_MGMT_IPD_PORT + i);
+
octeon_fdt_set_phy(mgmt, phy_addr);
}
}
@@ -751,6 +752,7 @@ int __init octeon_prune_device_tree(void)
pip_path = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
if (pip_path) {
int pip = fdt_path_offset(initial_boot_params, pip_path);
+
if (pip >= 0)
for (i = 0; i <= 4; i++)
octeon_fdt_pip_iface(pip, i);
@@ -767,6 +769,7 @@ int __init octeon_prune_device_tree(void)
for (i = 0; i < 2; i++) {
int i2c;
+
snprintf(name_buffer, sizeof(name_buffer),
"twsi%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
@@ -797,11 +800,11 @@ int __init octeon_prune_device_tree(void)
for (i = 0; i < 2; i++) {
int i2c;
+
snprintf(name_buffer, sizeof(name_buffer),
"smi%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
name_buffer, NULL);
-
if (alias_prop) {
i2c = fdt_path_offset(initial_boot_params, alias_prop);
if (i2c < 0)
@@ -824,6 +827,7 @@ int __init octeon_prune_device_tree(void)
for (i = 0; i < 3; i++) {
int uart;
+
snprintf(name_buffer, sizeof(name_buffer),
"uart%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
@@ -863,6 +867,7 @@ int __init octeon_prune_device_tree(void)
int len;
int cf = fdt_path_offset(initial_boot_params, alias_prop);
+
base_ptr = 0;
if (octeon_bootinfo->major_version == 1
&& octeon_bootinfo->minor_version >= 1) {
@@ -912,6 +917,7 @@ int __init octeon_prune_device_tree(void)
fdt_nop_property(initial_boot_params, cf, "cavium,dma-engine-handle");
if (!is_16bit) {
__be32 width = cpu_to_be32(8);
+
fdt_setprop_inplace(initial_boot_params, cf,
"cavium,bus-width", &width, sizeof(width));
}
@@ -1004,6 +1010,7 @@ end_led:
;
}
+#ifdef CONFIG_USB
/* OHCI/UHCI USB */
alias_prop = fdt_getprop(initial_boot_params, aliases,
"uctl", NULL);
@@ -1036,6 +1043,7 @@ end_led:
} else {
__be32 new_f[1];
enum cvmx_helper_board_usb_clock_types c;
+
c = __cvmx_helper_board_usb_get_clock_type();
switch (c) {
case USB_CLOCK_TYPE_REF_48:
@@ -1052,6 +1060,7 @@ end_led:
}
}
}
+#endif
return 0;
}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index d9dbeb0b165b..a8034d0dcade 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -374,14 +374,8 @@ void octeon_write_lcd(const char *s)
*/
int octeon_get_boot_uart(void)
{
- int uart;
-#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
- uart = 1;
-#else
- uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
+ return (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
1 : 0;
-#endif
- return uart;
}
/**
@@ -901,14 +895,10 @@ void __init prom_init(void)
}
if (strstr(arcs_cmdline, "console=") == NULL) {
-#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
- strcat(arcs_cmdline, " console=ttyS0,115200");
-#else
if (octeon_uart == 1)
strcat(arcs_cmdline, " console=ttyS1,115200");
else
strcat(arcs_cmdline, " console=ttyS0,115200");
-#endif
}
mips_hpt_frequency = octeon_get_clock_rate();
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
index c95d94c7838b..91aacf2ef26d 100644
--- a/arch/mips/configs/generic_defconfig
+++ b/arch/mips/configs/generic_defconfig
@@ -36,6 +36,8 @@ CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
CONFIG_NETFILTER=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
@@ -80,6 +82,7 @@ CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_REDUCED=y
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
index b4db69fbc40c..fc67947ed658 100644
--- a/arch/mips/include/asm/cache.h
+++ b/arch/mips/include/asm/cache.h
@@ -9,14 +9,9 @@
#ifndef _ASM_CACHE_H
#define _ASM_CACHE_H
-#include <kmalloc.h>
-
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define SMP_CACHE_SHIFT L1_CACHE_SHIFT
-#define SMP_CACHE_BYTES L1_CACHE_BYTES
-
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif /* _ASM_CACHE_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index be3b4c25f335..cd6efb07c980 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -12,10 +12,9 @@
#ifndef __ASM_CPU_INFO_H
#define __ASM_CPU_INFO_H
+#include <linux/cache.h>
#include <linux/types.h>
-#include <asm/cache.h>
-
/*
* Descriptor for a cache
*/
diff --git a/arch/mips/include/asm/cpufeature.h b/arch/mips/include/asm/cpufeature.h
new file mode 100644
index 000000000000..c63ec05313c1
--- /dev/null
+++ b/arch/mips/include/asm/cpufeature.h
@@ -0,0 +1,26 @@
+/*
+ * CPU feature definitions for module loading, used by
+ * module_cpu_feature_match(), see uapi/asm/hwcap.h for MIPS CPU features.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <uapi/asm/hwcap.h>
+#include <asm/elf.h>
+
+#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
+
+#define cpu_feature(x) ilog2(HWCAP_ ## x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ return elf_hwcap & (1UL << num);
+}
+
+#endif /* __ASM_CPUFEATURE_H */
diff --git a/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h b/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
index 98cf40417c5d..d38be668e338 100644
--- a/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
@@ -10,8 +10,6 @@
#ifndef __ASM_MACH_RM200_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_RM200_CPU_FEATURE_OVERRIDES_H
-#include <cpu-feature-overrides.h>
-
#define cpu_has_tlb 1
#define cpu_has_4kex 1
#define cpu_has_4k_cache 1
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
index 10262cb6ff50..d045973ddb33 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,3140 +28,177 @@
#ifndef __CVMX_L2C_DEFS_H__
#define __CVMX_L2C_DEFS_H__
-#define CVMX_L2C_BIG_CTL (CVMX_ADD_IO_SEG(0x0001180080800030ull))
-#define CVMX_L2C_BST (CVMX_ADD_IO_SEG(0x00011800808007F8ull))
-#define CVMX_L2C_BST0 (CVMX_ADD_IO_SEG(0x00011800800007F8ull))
-#define CVMX_L2C_BST1 (CVMX_ADD_IO_SEG(0x00011800800007F0ull))
-#define CVMX_L2C_BST2 (CVMX_ADD_IO_SEG(0x00011800800007E8ull))
-#define CVMX_L2C_BST_MEMX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F8ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_BST_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F0ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_BST_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F8ull) + ((block_id) & 3) * 0x40000ull)
+#include <uapi/asm/bitfield.h>
+
+#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
-#define CVMX_L2C_COP0_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080940000ull) + ((offset) & 16383) * 8)
#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
-#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
-#define CVMX_L2C_DUT (CVMX_ADD_IO_SEG(0x0001180080000050ull))
-#define CVMX_L2C_DUT_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080E00000ull) + ((offset) & 8191) * 8)
-#define CVMX_L2C_ERR_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_ERR_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_ERR_VBFX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F0ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_ERR_XMC (CVMX_ADD_IO_SEG(0x00011800808007D8ull))
-#define CVMX_L2C_GRPWRR0 (CVMX_ADD_IO_SEG(0x00011800800000C8ull))
-#define CVMX_L2C_GRPWRR1 (CVMX_ADD_IO_SEG(0x00011800800000D0ull))
-#define CVMX_L2C_INT_EN (CVMX_ADD_IO_SEG(0x0001180080000100ull))
-#define CVMX_L2C_INT_ENA (CVMX_ADD_IO_SEG(0x0001180080800020ull))
-#define CVMX_L2C_INT_REG (CVMX_ADD_IO_SEG(0x0001180080800018ull))
-#define CVMX_L2C_INT_STAT (CVMX_ADD_IO_SEG(0x00011800800000F8ull))
-#define CVMX_L2C_IOCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800420ull))
-#define CVMX_L2C_IORX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800428ull))
#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
-#define CVMX_L2C_LFB0 (CVMX_ADD_IO_SEG(0x0001180080000038ull))
-#define CVMX_L2C_LFB1 (CVMX_ADD_IO_SEG(0x0001180080000040ull))
-#define CVMX_L2C_LFB2 (CVMX_ADD_IO_SEG(0x0001180080000048ull))
-#define CVMX_L2C_LFB3 (CVMX_ADD_IO_SEG(0x00011800800000B8ull))
-#define CVMX_L2C_OOB (CVMX_ADD_IO_SEG(0x00011800800000D8ull))
-#define CVMX_L2C_OOB1 (CVMX_ADD_IO_SEG(0x00011800800000E0ull))
-#define CVMX_L2C_OOB2 (CVMX_ADD_IO_SEG(0x00011800800000E8ull))
-#define CVMX_L2C_OOB3 (CVMX_ADD_IO_SEG(0x00011800800000F0ull))
+#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
+#define CVMX_L2C_PFCX(offset) (CVMX_ADD_IO_SEG(0x0001180080000098ull) + \
+ ((offset) & 3) * 8)
#define CVMX_L2C_PFC0 CVMX_L2C_PFCX(0)
#define CVMX_L2C_PFC1 CVMX_L2C_PFCX(1)
#define CVMX_L2C_PFC2 CVMX_L2C_PFCX(2)
#define CVMX_L2C_PFC3 CVMX_L2C_PFCX(3)
-#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
-#define CVMX_L2C_PFCX(offset) (CVMX_ADD_IO_SEG(0x0001180080000098ull) + ((offset) & 3) * 8)
-#define CVMX_L2C_PPGRP (CVMX_ADD_IO_SEG(0x00011800800000C0ull))
-#define CVMX_L2C_QOS_IOBX(offset) (CVMX_ADD_IO_SEG(0x0001180080880200ull) + ((offset) & 1) * 8)
-#define CVMX_L2C_QOS_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080880000ull) + ((offset) & 31) * 8)
-#define CVMX_L2C_QOS_WGT (CVMX_ADD_IO_SEG(0x0001180080800008ull))
-#define CVMX_L2C_RSCX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800410ull) + ((offset) & 3) * 64)
-#define CVMX_L2C_RSDX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800418ull) + ((offset) & 3) * 64)
#define CVMX_L2C_SPAR0 (CVMX_ADD_IO_SEG(0x0001180080000068ull))
#define CVMX_L2C_SPAR1 (CVMX_ADD_IO_SEG(0x0001180080000070ull))
#define CVMX_L2C_SPAR2 (CVMX_ADD_IO_SEG(0x0001180080000078ull))
#define CVMX_L2C_SPAR3 (CVMX_ADD_IO_SEG(0x0001180080000080ull))
#define CVMX_L2C_SPAR4 (CVMX_ADD_IO_SEG(0x0001180080000088ull))
-#define CVMX_L2C_TADX_ECC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00018ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_ECC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00020ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_IEN(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00000ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_INT(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00028ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_PFC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00400ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_PFC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00408ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_PFC2(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00410ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_PFC3(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00418ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_PRF(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00008ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_TAG(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00010ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_VER_ID (CVMX_ADD_IO_SEG(0x00011800808007E0ull))
-#define CVMX_L2C_VER_IOB (CVMX_ADD_IO_SEG(0x00011800808007F0ull))
-#define CVMX_L2C_VER_MSC (CVMX_ADD_IO_SEG(0x00011800808007D0ull))
-#define CVMX_L2C_VER_PP (CVMX_ADD_IO_SEG(0x00011800808007E8ull))
-#define CVMX_L2C_VIRTID_IOBX(offset) (CVMX_ADD_IO_SEG(0x00011800808C0200ull) + ((offset) & 1) * 8)
-#define CVMX_L2C_VIRTID_PPX(offset) (CVMX_ADD_IO_SEG(0x00011800808C0000ull) + ((offset) & 31) * 8)
-#define CVMX_L2C_VRT_CTL (CVMX_ADD_IO_SEG(0x0001180080800010ull))
-#define CVMX_L2C_VRT_MEMX(offset) (CVMX_ADD_IO_SEG(0x0001180080900000ull) + ((offset) & 1023) * 8)
-#define CVMX_L2C_WPAR_IOBX(offset) (CVMX_ADD_IO_SEG(0x0001180080840200ull) + ((offset) & 1) * 8)
-#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + ((offset) & 31) * 8)
-#define CVMX_L2C_XMCX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800400ull) + ((offset) & 3) * 64)
-#define CVMX_L2C_XMC_CMD (CVMX_ADD_IO_SEG(0x0001180080800028ull))
-#define CVMX_L2C_XMDX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800408ull) + ((offset) & 3) * 64)
-
-union cvmx_l2c_big_ctl {
- uint64_t u64;
- struct cvmx_l2c_big_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t maxdram:4;
- uint64_t reserved_1_3:3;
- uint64_t disable:1;
-#else
- uint64_t disable:1;
- uint64_t reserved_1_3:3;
- uint64_t maxdram:4;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_l2c_big_ctl_s cn61xx;
- struct cvmx_l2c_big_ctl_s cn63xx;
- struct cvmx_l2c_big_ctl_s cn66xx;
- struct cvmx_l2c_big_ctl_s cn68xx;
- struct cvmx_l2c_big_ctl_s cn68xxp1;
- struct cvmx_l2c_big_ctl_s cnf71xx;
-};
-
-union cvmx_l2c_bst {
- uint64_t u64;
- struct cvmx_l2c_bst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dutfl:32;
- uint64_t rbffl:4;
- uint64_t xbffl:4;
- uint64_t tdpfl:4;
- uint64_t ioccmdfl:4;
- uint64_t iocdatfl:4;
- uint64_t dutresfl:4;
- uint64_t vrtfl:4;
- uint64_t tdffl:4;
-#else
- uint64_t tdffl:4;
- uint64_t vrtfl:4;
- uint64_t dutresfl:4;
- uint64_t iocdatfl:4;
- uint64_t ioccmdfl:4;
- uint64_t tdpfl:4;
- uint64_t xbffl:4;
- uint64_t rbffl:4;
- uint64_t dutfl:32;
-#endif
- } s;
- struct cvmx_l2c_bst_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_36_63:28;
- uint64_t dutfl:4;
- uint64_t reserved_17_31:15;
- uint64_t ioccmdfl:1;
- uint64_t reserved_13_15:3;
- uint64_t iocdatfl:1;
- uint64_t reserved_9_11:3;
- uint64_t dutresfl:1;
- uint64_t reserved_5_7:3;
- uint64_t vrtfl:1;
- uint64_t reserved_1_3:3;
- uint64_t tdffl:1;
-#else
- uint64_t tdffl:1;
- uint64_t reserved_1_3:3;
- uint64_t vrtfl:1;
- uint64_t reserved_5_7:3;
- uint64_t dutresfl:1;
- uint64_t reserved_9_11:3;
- uint64_t iocdatfl:1;
- uint64_t reserved_13_15:3;
- uint64_t ioccmdfl:1;
- uint64_t reserved_17_31:15;
- uint64_t dutfl:4;
- uint64_t reserved_36_63:28;
-#endif
- } cn61xx;
- struct cvmx_l2c_bst_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_38_63:26;
- uint64_t dutfl:6;
- uint64_t reserved_17_31:15;
- uint64_t ioccmdfl:1;
- uint64_t reserved_13_15:3;
- uint64_t iocdatfl:1;
- uint64_t reserved_9_11:3;
- uint64_t dutresfl:1;
- uint64_t reserved_5_7:3;
- uint64_t vrtfl:1;
- uint64_t reserved_1_3:3;
- uint64_t tdffl:1;
-#else
- uint64_t tdffl:1;
- uint64_t reserved_1_3:3;
- uint64_t vrtfl:1;
- uint64_t reserved_5_7:3;
- uint64_t dutresfl:1;
- uint64_t reserved_9_11:3;
- uint64_t iocdatfl:1;
- uint64_t reserved_13_15:3;
- uint64_t ioccmdfl:1;
- uint64_t reserved_17_31:15;
- uint64_t dutfl:6;
- uint64_t reserved_38_63:26;
-#endif
- } cn63xx;
- struct cvmx_l2c_bst_cn63xx cn63xxp1;
- struct cvmx_l2c_bst_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_42_63:22;
- uint64_t dutfl:10;
- uint64_t reserved_17_31:15;
- uint64_t ioccmdfl:1;
- uint64_t reserved_13_15:3;
- uint64_t iocdatfl:1;
- uint64_t reserved_9_11:3;
- uint64_t dutresfl:1;
- uint64_t reserved_5_7:3;
- uint64_t vrtfl:1;
- uint64_t reserved_1_3:3;
- uint64_t tdffl:1;
-#else
- uint64_t tdffl:1;
- uint64_t reserved_1_3:3;
- uint64_t vrtfl:1;
- uint64_t reserved_5_7:3;
- uint64_t dutresfl:1;
- uint64_t reserved_9_11:3;
- uint64_t iocdatfl:1;
- uint64_t reserved_13_15:3;
- uint64_t ioccmdfl:1;
- uint64_t reserved_17_31:15;
- uint64_t dutfl:10;
- uint64_t reserved_42_63:22;
-#endif
- } cn66xx;
- struct cvmx_l2c_bst_s cn68xx;
- struct cvmx_l2c_bst_s cn68xxp1;
- struct cvmx_l2c_bst_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_bst0 {
- uint64_t u64;
- struct cvmx_l2c_bst0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t dtbnk:1;
- uint64_t wlb_msk:4;
- uint64_t dtcnt:13;
- uint64_t dt:1;
- uint64_t stin_msk:1;
- uint64_t wlb_dat:4;
-#else
- uint64_t wlb_dat:4;
- uint64_t stin_msk:1;
- uint64_t dt:1;
- uint64_t dtcnt:13;
- uint64_t wlb_msk:4;
- uint64_t dtbnk:1;
- uint64_t reserved_24_63:40;
-#endif
- } s;
- struct cvmx_l2c_bst0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_23_63:41;
- uint64_t wlb_msk:4;
- uint64_t reserved_15_18:4;
- uint64_t dtcnt:9;
- uint64_t dt:1;
- uint64_t reserved_4_4:1;
- uint64_t wlb_dat:4;
-#else
- uint64_t wlb_dat:4;
- uint64_t reserved_4_4:1;
- uint64_t dt:1;
- uint64_t dtcnt:9;
- uint64_t reserved_15_18:4;
- uint64_t wlb_msk:4;
- uint64_t reserved_23_63:41;
-#endif
- } cn30xx;
- struct cvmx_l2c_bst0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_23_63:41;
- uint64_t wlb_msk:4;
- uint64_t reserved_16_18:3;
- uint64_t dtcnt:10;
- uint64_t dt:1;
- uint64_t stin_msk:1;
- uint64_t wlb_dat:4;
-#else
- uint64_t wlb_dat:4;
- uint64_t stin_msk:1;
- uint64_t dt:1;
- uint64_t dtcnt:10;
- uint64_t reserved_16_18:3;
- uint64_t wlb_msk:4;
- uint64_t reserved_23_63:41;
-#endif
- } cn31xx;
- struct cvmx_l2c_bst0_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t dtcnt:13;
- uint64_t dt:1;
- uint64_t stin_msk:1;
- uint64_t wlb_dat:4;
-#else
- uint64_t wlb_dat:4;
- uint64_t stin_msk:1;
- uint64_t dt:1;
- uint64_t dtcnt:13;
- uint64_t reserved_19_63:45;
-#endif
- } cn38xx;
- struct cvmx_l2c_bst0_cn38xx cn38xxp2;
- struct cvmx_l2c_bst0_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t dtbnk:1;
- uint64_t wlb_msk:4;
- uint64_t reserved_16_18:3;
- uint64_t dtcnt:10;
- uint64_t dt:1;
- uint64_t stin_msk:1;
- uint64_t wlb_dat:4;
-#else
- uint64_t wlb_dat:4;
- uint64_t stin_msk:1;
- uint64_t dt:1;
- uint64_t dtcnt:10;
- uint64_t reserved_16_18:3;
- uint64_t wlb_msk:4;
- uint64_t dtbnk:1;
- uint64_t reserved_24_63:40;
-#endif
- } cn50xx;
- struct cvmx_l2c_bst0_cn50xx cn52xx;
- struct cvmx_l2c_bst0_cn50xx cn52xxp1;
- struct cvmx_l2c_bst0_s cn56xx;
- struct cvmx_l2c_bst0_s cn56xxp1;
- struct cvmx_l2c_bst0_s cn58xx;
- struct cvmx_l2c_bst0_s cn58xxp1;
-};
-
-union cvmx_l2c_bst1 {
- uint64_t u64;
- struct cvmx_l2c_bst1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t l2t:9;
-#else
- uint64_t l2t:9;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_l2c_bst1_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t vwdf:4;
- uint64_t lrf:2;
- uint64_t vab_vwcf:1;
- uint64_t reserved_5_8:4;
- uint64_t l2t:5;
-#else
- uint64_t l2t:5;
- uint64_t reserved_5_8:4;
- uint64_t vab_vwcf:1;
- uint64_t lrf:2;
- uint64_t vwdf:4;
- uint64_t reserved_16_63:48;
-#endif
- } cn30xx;
- struct cvmx_l2c_bst1_cn30xx cn31xx;
- struct cvmx_l2c_bst1_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t vwdf:4;
- uint64_t lrf:2;
- uint64_t vab_vwcf:1;
- uint64_t l2t:9;
-#else
- uint64_t l2t:9;
- uint64_t vab_vwcf:1;
- uint64_t lrf:2;
- uint64_t vwdf:4;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_l2c_bst1_cn38xx cn38xxp2;
- struct cvmx_l2c_bst1_cn38xx cn50xx;
- struct cvmx_l2c_bst1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t plc2:1;
- uint64_t plc1:1;
- uint64_t plc0:1;
- uint64_t vwdf:4;
- uint64_t reserved_11_11:1;
- uint64_t ilc:1;
- uint64_t vab_vwcf:1;
- uint64_t l2t:9;
-#else
- uint64_t l2t:9;
- uint64_t vab_vwcf:1;
- uint64_t ilc:1;
- uint64_t reserved_11_11:1;
- uint64_t vwdf:4;
- uint64_t plc0:1;
- uint64_t plc1:1;
- uint64_t plc2:1;
- uint64_t reserved_19_63:45;
-#endif
- } cn52xx;
- struct cvmx_l2c_bst1_cn52xx cn52xxp1;
- struct cvmx_l2c_bst1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t plc2:1;
- uint64_t plc1:1;
- uint64_t plc0:1;
- uint64_t ilc:1;
- uint64_t vwdf1:4;
- uint64_t vwdf0:4;
- uint64_t vab_vwcf1:1;
- uint64_t reserved_10_10:1;
- uint64_t vab_vwcf0:1;
- uint64_t l2t:9;
-#else
- uint64_t l2t:9;
- uint64_t vab_vwcf0:1;
- uint64_t reserved_10_10:1;
- uint64_t vab_vwcf1:1;
- uint64_t vwdf0:4;
- uint64_t vwdf1:4;
- uint64_t ilc:1;
- uint64_t plc0:1;
- uint64_t plc1:1;
- uint64_t plc2:1;
- uint64_t reserved_24_63:40;
-#endif
- } cn56xx;
- struct cvmx_l2c_bst1_cn56xx cn56xxp1;
- struct cvmx_l2c_bst1_cn38xx cn58xx;
- struct cvmx_l2c_bst1_cn38xx cn58xxp1;
-};
-
-union cvmx_l2c_bst2 {
- uint64_t u64;
- struct cvmx_l2c_bst2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t mrb:4;
- uint64_t reserved_4_11:8;
- uint64_t ipcbst:1;
- uint64_t picbst:1;
- uint64_t xrdmsk:1;
- uint64_t xrddat:1;
-#else
- uint64_t xrddat:1;
- uint64_t xrdmsk:1;
- uint64_t picbst:1;
- uint64_t ipcbst:1;
- uint64_t reserved_4_11:8;
- uint64_t mrb:4;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_l2c_bst2_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t mrb:4;
- uint64_t rmdf:4;
- uint64_t reserved_4_7:4;
- uint64_t ipcbst:1;
- uint64_t reserved_2_2:1;
- uint64_t xrdmsk:1;
- uint64_t xrddat:1;
-#else
- uint64_t xrddat:1;
- uint64_t xrdmsk:1;
- uint64_t reserved_2_2:1;
- uint64_t ipcbst:1;
- uint64_t reserved_4_7:4;
- uint64_t rmdf:4;
- uint64_t mrb:4;
- uint64_t reserved_16_63:48;
-#endif
- } cn30xx;
- struct cvmx_l2c_bst2_cn30xx cn31xx;
- struct cvmx_l2c_bst2_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t mrb:4;
- uint64_t rmdf:4;
- uint64_t rhdf:4;
- uint64_t ipcbst:1;
- uint64_t picbst:1;
- uint64_t xrdmsk:1;
- uint64_t xrddat:1;
-#else
- uint64_t xrddat:1;
- uint64_t xrdmsk:1;
- uint64_t picbst:1;
- uint64_t ipcbst:1;
- uint64_t rhdf:4;
- uint64_t rmdf:4;
- uint64_t mrb:4;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_l2c_bst2_cn38xx cn38xxp2;
- struct cvmx_l2c_bst2_cn30xx cn50xx;
- struct cvmx_l2c_bst2_cn30xx cn52xx;
- struct cvmx_l2c_bst2_cn30xx cn52xxp1;
- struct cvmx_l2c_bst2_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t mrb:4;
- uint64_t rmdb:4;
- uint64_t rhdb:4;
- uint64_t ipcbst:1;
- uint64_t picbst:1;
- uint64_t xrdmsk:1;
- uint64_t xrddat:1;
-#else
- uint64_t xrddat:1;
- uint64_t xrdmsk:1;
- uint64_t picbst:1;
- uint64_t ipcbst:1;
- uint64_t rhdb:4;
- uint64_t rmdb:4;
- uint64_t mrb:4;
- uint64_t reserved_16_63:48;
-#endif
- } cn56xx;
- struct cvmx_l2c_bst2_cn56xx cn56xxp1;
- struct cvmx_l2c_bst2_cn56xx cn58xx;
- struct cvmx_l2c_bst2_cn56xx cn58xxp1;
-};
-
-union cvmx_l2c_bst_memx {
- uint64_t u64;
- struct cvmx_l2c_bst_memx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t start_bist:1;
- uint64_t clear_bist:1;
- uint64_t reserved_5_61:57;
- uint64_t rdffl:1;
- uint64_t vbffl:4;
-#else
- uint64_t vbffl:4;
- uint64_t rdffl:1;
- uint64_t reserved_5_61:57;
- uint64_t clear_bist:1;
- uint64_t start_bist:1;
-#endif
- } s;
- struct cvmx_l2c_bst_memx_s cn61xx;
- struct cvmx_l2c_bst_memx_s cn63xx;
- struct cvmx_l2c_bst_memx_s cn63xxp1;
- struct cvmx_l2c_bst_memx_s cn66xx;
- struct cvmx_l2c_bst_memx_s cn68xx;
- struct cvmx_l2c_bst_memx_s cn68xxp1;
- struct cvmx_l2c_bst_memx_s cnf71xx;
-};
-
-union cvmx_l2c_bst_tdtx {
- uint64_t u64;
- struct cvmx_l2c_bst_tdtx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t fbfrspfl:8;
- uint64_t sbffl:8;
- uint64_t fbffl:8;
- uint64_t l2dfl:8;
-#else
- uint64_t l2dfl:8;
- uint64_t fbffl:8;
- uint64_t sbffl:8;
- uint64_t fbfrspfl:8;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_bst_tdtx_s cn61xx;
- struct cvmx_l2c_bst_tdtx_s cn63xx;
- struct cvmx_l2c_bst_tdtx_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t sbffl:8;
- uint64_t fbffl:8;
- uint64_t l2dfl:8;
-#else
- uint64_t l2dfl:8;
- uint64_t fbffl:8;
- uint64_t sbffl:8;
- uint64_t reserved_24_63:40;
-#endif
- } cn63xxp1;
- struct cvmx_l2c_bst_tdtx_s cn66xx;
- struct cvmx_l2c_bst_tdtx_s cn68xx;
- struct cvmx_l2c_bst_tdtx_s cn68xxp1;
- struct cvmx_l2c_bst_tdtx_s cnf71xx;
-};
+#define CVMX_L2C_TADX_PFCX(offset, block_id) \
+ (CVMX_ADD_IO_SEG(0x0001180080A00400ull) + (((offset) & 3) + \
+ ((block_id) & 7) * 0x8000ull) * 8)
+#define CVMX_L2C_TADX_PFC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00400ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_TADX_PFC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00408ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_TADX_PFC2(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00410ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_TADX_PFC3(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00418ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_TADX_PRF(offset) (CVMX_ADD_IO_SEG(0x0001180080A00008ull) + \
+ ((offset) & 7) * 0x40000ull)
+#define CVMX_L2C_TADX_TAG(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00010ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_WPAR_IOBX(offset) (CVMX_ADD_IO_SEG(0x0001180080840200ull) + \
+ ((offset) & 1) * 8)
+#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \
+ ((offset) & 31) * 8)
+#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
-union cvmx_l2c_bst_ttgx {
- uint64_t u64;
- struct cvmx_l2c_bst_ttgx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t lrufl:1;
- uint64_t tagfl:16;
-#else
- uint64_t tagfl:16;
- uint64_t lrufl:1;
- uint64_t reserved_17_63:47;
-#endif
- } s;
- struct cvmx_l2c_bst_ttgx_s cn61xx;
- struct cvmx_l2c_bst_ttgx_s cn63xx;
- struct cvmx_l2c_bst_ttgx_s cn63xxp1;
- struct cvmx_l2c_bst_ttgx_s cn66xx;
- struct cvmx_l2c_bst_ttgx_s cn68xx;
- struct cvmx_l2c_bst_ttgx_s cn68xxp1;
- struct cvmx_l2c_bst_ttgx_s cnf71xx;
-};
union cvmx_l2c_cfg {
uint64_t u64;
struct cvmx_l2c_cfg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t bstrun:1;
- uint64_t lbist:1;
- uint64_t xor_bank:1;
- uint64_t dpres1:1;
- uint64_t dpres0:1;
- uint64_t dfill_dis:1;
- uint64_t fpexp:4;
- uint64_t fpempty:1;
- uint64_t fpen:1;
- uint64_t idxalias:1;
- uint64_t mwf_crd:4;
- uint64_t rsp_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t lrf_arb_mode:1;
-#else
- uint64_t lrf_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t mwf_crd:4;
- uint64_t idxalias:1;
- uint64_t fpen:1;
- uint64_t fpempty:1;
- uint64_t fpexp:4;
- uint64_t dfill_dis:1;
- uint64_t dpres0:1;
- uint64_t dpres1:1;
- uint64_t xor_bank:1;
- uint64_t lbist:1;
- uint64_t bstrun:1;
- uint64_t reserved_20_63:44;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_20_63:44,
+ __BITFIELD_FIELD(uint64_t bstrun:1,
+ __BITFIELD_FIELD(uint64_t lbist:1,
+ __BITFIELD_FIELD(uint64_t xor_bank:1,
+ __BITFIELD_FIELD(uint64_t dpres1:1,
+ __BITFIELD_FIELD(uint64_t dpres0:1,
+ __BITFIELD_FIELD(uint64_t dfill_dis:1,
+ __BITFIELD_FIELD(uint64_t fpexp:4,
+ __BITFIELD_FIELD(uint64_t fpempty:1,
+ __BITFIELD_FIELD(uint64_t fpen:1,
+ __BITFIELD_FIELD(uint64_t idxalias:1,
+ __BITFIELD_FIELD(uint64_t mwf_crd:4,
+ __BITFIELD_FIELD(uint64_t rsp_arb_mode:1,
+ __BITFIELD_FIELD(uint64_t rfb_arb_mode:1,
+ __BITFIELD_FIELD(uint64_t lrf_arb_mode:1,
+ ;)))))))))))))))
} s;
- struct cvmx_l2c_cfg_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t fpexp:4;
- uint64_t fpempty:1;
- uint64_t fpen:1;
- uint64_t idxalias:1;
- uint64_t mwf_crd:4;
- uint64_t rsp_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t lrf_arb_mode:1;
-#else
- uint64_t lrf_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t mwf_crd:4;
- uint64_t idxalias:1;
- uint64_t fpen:1;
- uint64_t fpempty:1;
- uint64_t fpexp:4;
- uint64_t reserved_14_63:50;
-#endif
- } cn30xx;
- struct cvmx_l2c_cfg_cn30xx cn31xx;
- struct cvmx_l2c_cfg_cn30xx cn38xx;
- struct cvmx_l2c_cfg_cn30xx cn38xxp2;
- struct cvmx_l2c_cfg_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t bstrun:1;
- uint64_t lbist:1;
- uint64_t reserved_14_17:4;
- uint64_t fpexp:4;
- uint64_t fpempty:1;
- uint64_t fpen:1;
- uint64_t idxalias:1;
- uint64_t mwf_crd:4;
- uint64_t rsp_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t lrf_arb_mode:1;
-#else
- uint64_t lrf_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t mwf_crd:4;
- uint64_t idxalias:1;
- uint64_t fpen:1;
- uint64_t fpempty:1;
- uint64_t fpexp:4;
- uint64_t reserved_14_17:4;
- uint64_t lbist:1;
- uint64_t bstrun:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn50xx;
- struct cvmx_l2c_cfg_cn50xx cn52xx;
- struct cvmx_l2c_cfg_cn50xx cn52xxp1;
- struct cvmx_l2c_cfg_s cn56xx;
- struct cvmx_l2c_cfg_s cn56xxp1;
- struct cvmx_l2c_cfg_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t bstrun:1;
- uint64_t lbist:1;
- uint64_t reserved_15_17:3;
- uint64_t dfill_dis:1;
- uint64_t fpexp:4;
- uint64_t fpempty:1;
- uint64_t fpen:1;
- uint64_t idxalias:1;
- uint64_t mwf_crd:4;
- uint64_t rsp_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t lrf_arb_mode:1;
-#else
- uint64_t lrf_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t mwf_crd:4;
- uint64_t idxalias:1;
- uint64_t fpen:1;
- uint64_t fpempty:1;
- uint64_t fpexp:4;
- uint64_t dfill_dis:1;
- uint64_t reserved_15_17:3;
- uint64_t lbist:1;
- uint64_t bstrun:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn58xx;
- struct cvmx_l2c_cfg_cn58xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t dfill_dis:1;
- uint64_t fpexp:4;
- uint64_t fpempty:1;
- uint64_t fpen:1;
- uint64_t idxalias:1;
- uint64_t mwf_crd:4;
- uint64_t rsp_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t lrf_arb_mode:1;
-#else
- uint64_t lrf_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t mwf_crd:4;
- uint64_t idxalias:1;
- uint64_t fpen:1;
- uint64_t fpempty:1;
- uint64_t fpexp:4;
- uint64_t dfill_dis:1;
- uint64_t reserved_15_63:49;
-#endif
- } cn58xxp1;
-};
-
-union cvmx_l2c_cop0_mapx {
- uint64_t u64;
- struct cvmx_l2c_cop0_mapx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
- } s;
- struct cvmx_l2c_cop0_mapx_s cn61xx;
- struct cvmx_l2c_cop0_mapx_s cn63xx;
- struct cvmx_l2c_cop0_mapx_s cn63xxp1;
- struct cvmx_l2c_cop0_mapx_s cn66xx;
- struct cvmx_l2c_cop0_mapx_s cn68xx;
- struct cvmx_l2c_cop0_mapx_s cn68xxp1;
- struct cvmx_l2c_cop0_mapx_s cnf71xx;
};
union cvmx_l2c_ctl {
uint64_t u64;
struct cvmx_l2c_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_30_63:34;
- uint64_t sepcmt:1;
- uint64_t rdf_fast:1;
- uint64_t disstgl2i:1;
- uint64_t l2dfsbe:1;
- uint64_t l2dfdbe:1;
- uint64_t discclk:1;
- uint64_t maxvab:4;
- uint64_t maxlfb:4;
- uint64_t rsp_arb_mode:1;
- uint64_t xmc_arb_mode:1;
- uint64_t ef_ena:1;
- uint64_t ef_cnt:7;
- uint64_t vab_thresh:4;
- uint64_t disecc:1;
- uint64_t disidxalias:1;
-#else
- uint64_t disidxalias:1;
- uint64_t disecc:1;
- uint64_t vab_thresh:4;
- uint64_t ef_cnt:7;
- uint64_t ef_ena:1;
- uint64_t xmc_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t maxlfb:4;
- uint64_t maxvab:4;
- uint64_t discclk:1;
- uint64_t l2dfdbe:1;
- uint64_t l2dfsbe:1;
- uint64_t disstgl2i:1;
- uint64_t rdf_fast:1;
- uint64_t sepcmt:1;
- uint64_t reserved_30_63:34;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_30_63:34,
+ __BITFIELD_FIELD(uint64_t sepcmt:1,
+ __BITFIELD_FIELD(uint64_t rdf_fast:1,
+ __BITFIELD_FIELD(uint64_t disstgl2i:1,
+ __BITFIELD_FIELD(uint64_t l2dfsbe:1,
+ __BITFIELD_FIELD(uint64_t l2dfdbe:1,
+ __BITFIELD_FIELD(uint64_t discclk:1,
+ __BITFIELD_FIELD(uint64_t maxvab:4,
+ __BITFIELD_FIELD(uint64_t maxlfb:4,
+ __BITFIELD_FIELD(uint64_t rsp_arb_mode:1,
+ __BITFIELD_FIELD(uint64_t xmc_arb_mode:1,
+ __BITFIELD_FIELD(uint64_t ef_ena:1,
+ __BITFIELD_FIELD(uint64_t ef_cnt:7,
+ __BITFIELD_FIELD(uint64_t vab_thresh:4,
+ __BITFIELD_FIELD(uint64_t disecc:1,
+ __BITFIELD_FIELD(uint64_t disidxalias:1,
+ ;))))))))))))))))
} s;
- struct cvmx_l2c_ctl_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t rdf_fast:1;
- uint64_t disstgl2i:1;
- uint64_t l2dfsbe:1;
- uint64_t l2dfdbe:1;
- uint64_t discclk:1;
- uint64_t maxvab:4;
- uint64_t maxlfb:4;
- uint64_t rsp_arb_mode:1;
- uint64_t xmc_arb_mode:1;
- uint64_t ef_ena:1;
- uint64_t ef_cnt:7;
- uint64_t vab_thresh:4;
- uint64_t disecc:1;
- uint64_t disidxalias:1;
-#else
- uint64_t disidxalias:1;
- uint64_t disecc:1;
- uint64_t vab_thresh:4;
- uint64_t ef_cnt:7;
- uint64_t ef_ena:1;
- uint64_t xmc_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t maxlfb:4;
- uint64_t maxvab:4;
- uint64_t discclk:1;
- uint64_t l2dfdbe:1;
- uint64_t l2dfsbe:1;
- uint64_t disstgl2i:1;
- uint64_t rdf_fast:1;
- uint64_t reserved_29_63:35;
-#endif
- } cn61xx;
- struct cvmx_l2c_ctl_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_28_63:36;
- uint64_t disstgl2i:1;
- uint64_t l2dfsbe:1;
- uint64_t l2dfdbe:1;
- uint64_t discclk:1;
- uint64_t maxvab:4;
- uint64_t maxlfb:4;
- uint64_t rsp_arb_mode:1;
- uint64_t xmc_arb_mode:1;
- uint64_t ef_ena:1;
- uint64_t ef_cnt:7;
- uint64_t vab_thresh:4;
- uint64_t disecc:1;
- uint64_t disidxalias:1;
-#else
- uint64_t disidxalias:1;
- uint64_t disecc:1;
- uint64_t vab_thresh:4;
- uint64_t ef_cnt:7;
- uint64_t ef_ena:1;
- uint64_t xmc_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t maxlfb:4;
- uint64_t maxvab:4;
- uint64_t discclk:1;
- uint64_t l2dfdbe:1;
- uint64_t l2dfsbe:1;
- uint64_t disstgl2i:1;
- uint64_t reserved_28_63:36;
-#endif
- } cn63xx;
- struct cvmx_l2c_ctl_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_25_63:39;
- uint64_t discclk:1;
- uint64_t maxvab:4;
- uint64_t maxlfb:4;
- uint64_t rsp_arb_mode:1;
- uint64_t xmc_arb_mode:1;
- uint64_t ef_ena:1;
- uint64_t ef_cnt:7;
- uint64_t vab_thresh:4;
- uint64_t disecc:1;
- uint64_t disidxalias:1;
-#else
- uint64_t disidxalias:1;
- uint64_t disecc:1;
- uint64_t vab_thresh:4;
- uint64_t ef_cnt:7;
- uint64_t ef_ena:1;
- uint64_t xmc_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t maxlfb:4;
- uint64_t maxvab:4;
- uint64_t discclk:1;
- uint64_t reserved_25_63:39;
-#endif
- } cn63xxp1;
- struct cvmx_l2c_ctl_cn61xx cn66xx;
- struct cvmx_l2c_ctl_s cn68xx;
- struct cvmx_l2c_ctl_cn63xx cn68xxp1;
- struct cvmx_l2c_ctl_cn61xx cnf71xx;
};
union cvmx_l2c_dbg {
uint64_t u64;
struct cvmx_l2c_dbg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t lfb_enum:4;
- uint64_t lfb_dmp:1;
- uint64_t ppnum:4;
- uint64_t set:3;
- uint64_t finv:1;
- uint64_t l2d:1;
- uint64_t l2t:1;
-#else
- uint64_t l2t:1;
- uint64_t l2d:1;
- uint64_t finv:1;
- uint64_t set:3;
- uint64_t ppnum:4;
- uint64_t lfb_dmp:1;
- uint64_t lfb_enum:4;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_l2c_dbg_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_13_63:51;
- uint64_t lfb_enum:2;
- uint64_t lfb_dmp:1;
- uint64_t reserved_7_9:3;
- uint64_t ppnum:1;
- uint64_t reserved_5_5:1;
- uint64_t set:2;
- uint64_t finv:1;
- uint64_t l2d:1;
- uint64_t l2t:1;
-#else
- uint64_t l2t:1;
- uint64_t l2d:1;
- uint64_t finv:1;
- uint64_t set:2;
- uint64_t reserved_5_5:1;
- uint64_t ppnum:1;
- uint64_t reserved_7_9:3;
- uint64_t lfb_dmp:1;
- uint64_t lfb_enum:2;
- uint64_t reserved_13_63:51;
-#endif
- } cn30xx;
- struct cvmx_l2c_dbg_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t lfb_enum:3;
- uint64_t lfb_dmp:1;
- uint64_t reserved_7_9:3;
- uint64_t ppnum:1;
- uint64_t reserved_5_5:1;
- uint64_t set:2;
- uint64_t finv:1;
- uint64_t l2d:1;
- uint64_t l2t:1;
-#else
- uint64_t l2t:1;
- uint64_t l2d:1;
- uint64_t finv:1;
- uint64_t set:2;
- uint64_t reserved_5_5:1;
- uint64_t ppnum:1;
- uint64_t reserved_7_9:3;
- uint64_t lfb_dmp:1;
- uint64_t lfb_enum:3;
- uint64_t reserved_14_63:50;
-#endif
- } cn31xx;
- struct cvmx_l2c_dbg_s cn38xx;
- struct cvmx_l2c_dbg_s cn38xxp2;
- struct cvmx_l2c_dbg_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t lfb_enum:3;
- uint64_t lfb_dmp:1;
- uint64_t reserved_7_9:3;
- uint64_t ppnum:1;
- uint64_t set:3;
- uint64_t finv:1;
- uint64_t l2d:1;
- uint64_t l2t:1;
-#else
- uint64_t l2t:1;
- uint64_t l2d:1;
- uint64_t finv:1;
- uint64_t set:3;
- uint64_t ppnum:1;
- uint64_t reserved_7_9:3;
- uint64_t lfb_dmp:1;
- uint64_t lfb_enum:3;
- uint64_t reserved_14_63:50;
-#endif
- } cn50xx;
- struct cvmx_l2c_dbg_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t lfb_enum:3;
- uint64_t lfb_dmp:1;
- uint64_t reserved_8_9:2;
- uint64_t ppnum:2;
- uint64_t set:3;
- uint64_t finv:1;
- uint64_t l2d:1;
- uint64_t l2t:1;
-#else
- uint64_t l2t:1;
- uint64_t l2d:1;
- uint64_t finv:1;
- uint64_t set:3;
- uint64_t ppnum:2;
- uint64_t reserved_8_9:2;
- uint64_t lfb_dmp:1;
- uint64_t lfb_enum:3;
- uint64_t reserved_14_63:50;
-#endif
- } cn52xx;
- struct cvmx_l2c_dbg_cn52xx cn52xxp1;
- struct cvmx_l2c_dbg_s cn56xx;
- struct cvmx_l2c_dbg_s cn56xxp1;
- struct cvmx_l2c_dbg_s cn58xx;
- struct cvmx_l2c_dbg_s cn58xxp1;
-};
-
-union cvmx_l2c_dut {
- uint64_t u64;
- struct cvmx_l2c_dut_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t dtena:1;
- uint64_t reserved_30_30:1;
- uint64_t dt_vld:1;
- uint64_t dt_tag:29;
-#else
- uint64_t dt_tag:29;
- uint64_t dt_vld:1;
- uint64_t reserved_30_30:1;
- uint64_t dtena:1;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_dut_s cn30xx;
- struct cvmx_l2c_dut_s cn31xx;
- struct cvmx_l2c_dut_s cn38xx;
- struct cvmx_l2c_dut_s cn38xxp2;
- struct cvmx_l2c_dut_s cn50xx;
- struct cvmx_l2c_dut_s cn52xx;
- struct cvmx_l2c_dut_s cn52xxp1;
- struct cvmx_l2c_dut_s cn56xx;
- struct cvmx_l2c_dut_s cn56xxp1;
- struct cvmx_l2c_dut_s cn58xx;
- struct cvmx_l2c_dut_s cn58xxp1;
-};
-
-union cvmx_l2c_dut_mapx {
- uint64_t u64;
- struct cvmx_l2c_dut_mapx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_38_63:26;
- uint64_t tag:28;
- uint64_t reserved_1_9:9;
- uint64_t valid:1;
-#else
- uint64_t valid:1;
- uint64_t reserved_1_9:9;
- uint64_t tag:28;
- uint64_t reserved_38_63:26;
-#endif
- } s;
- struct cvmx_l2c_dut_mapx_s cn61xx;
- struct cvmx_l2c_dut_mapx_s cn63xx;
- struct cvmx_l2c_dut_mapx_s cn63xxp1;
- struct cvmx_l2c_dut_mapx_s cn66xx;
- struct cvmx_l2c_dut_mapx_s cn68xx;
- struct cvmx_l2c_dut_mapx_s cn68xxp1;
- struct cvmx_l2c_dut_mapx_s cnf71xx;
-};
-
-union cvmx_l2c_err_tdtx {
- uint64_t u64;
- struct cvmx_l2c_err_tdtx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dbe:1;
- uint64_t sbe:1;
- uint64_t vdbe:1;
- uint64_t vsbe:1;
- uint64_t syn:10;
- uint64_t reserved_22_49:28;
- uint64_t wayidx:18;
- uint64_t reserved_2_3:2;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_3:2;
- uint64_t wayidx:18;
- uint64_t reserved_22_49:28;
- uint64_t syn:10;
- uint64_t vsbe:1;
- uint64_t vdbe:1;
- uint64_t sbe:1;
- uint64_t dbe:1;
-#endif
- } s;
- struct cvmx_l2c_err_tdtx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dbe:1;
- uint64_t sbe:1;
- uint64_t vdbe:1;
- uint64_t vsbe:1;
- uint64_t syn:10;
- uint64_t reserved_20_49:30;
- uint64_t wayidx:16;
- uint64_t reserved_2_3:2;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_3:2;
- uint64_t wayidx:16;
- uint64_t reserved_20_49:30;
- uint64_t syn:10;
- uint64_t vsbe:1;
- uint64_t vdbe:1;
- uint64_t sbe:1;
- uint64_t dbe:1;
-#endif
- } cn61xx;
- struct cvmx_l2c_err_tdtx_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dbe:1;
- uint64_t sbe:1;
- uint64_t vdbe:1;
- uint64_t vsbe:1;
- uint64_t syn:10;
- uint64_t reserved_21_49:29;
- uint64_t wayidx:17;
- uint64_t reserved_2_3:2;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_3:2;
- uint64_t wayidx:17;
- uint64_t reserved_21_49:29;
- uint64_t syn:10;
- uint64_t vsbe:1;
- uint64_t vdbe:1;
- uint64_t sbe:1;
- uint64_t dbe:1;
-#endif
- } cn63xx;
- struct cvmx_l2c_err_tdtx_cn63xx cn63xxp1;
- struct cvmx_l2c_err_tdtx_cn63xx cn66xx;
- struct cvmx_l2c_err_tdtx_s cn68xx;
- struct cvmx_l2c_err_tdtx_s cn68xxp1;
- struct cvmx_l2c_err_tdtx_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_err_ttgx {
- uint64_t u64;
- struct cvmx_l2c_err_ttgx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dbe:1;
- uint64_t sbe:1;
- uint64_t noway:1;
- uint64_t reserved_56_60:5;
- uint64_t syn:6;
- uint64_t reserved_22_49:28;
- uint64_t wayidx:15;
- uint64_t reserved_2_6:5;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_6:5;
- uint64_t wayidx:15;
- uint64_t reserved_22_49:28;
- uint64_t syn:6;
- uint64_t reserved_56_60:5;
- uint64_t noway:1;
- uint64_t sbe:1;
- uint64_t dbe:1;
-#endif
- } s;
- struct cvmx_l2c_err_ttgx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dbe:1;
- uint64_t sbe:1;
- uint64_t noway:1;
- uint64_t reserved_56_60:5;
- uint64_t syn:6;
- uint64_t reserved_20_49:30;
- uint64_t wayidx:13;
- uint64_t reserved_2_6:5;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_6:5;
- uint64_t wayidx:13;
- uint64_t reserved_20_49:30;
- uint64_t syn:6;
- uint64_t reserved_56_60:5;
- uint64_t noway:1;
- uint64_t sbe:1;
- uint64_t dbe:1;
-#endif
- } cn61xx;
- struct cvmx_l2c_err_ttgx_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dbe:1;
- uint64_t sbe:1;
- uint64_t noway:1;
- uint64_t reserved_56_60:5;
- uint64_t syn:6;
- uint64_t reserved_21_49:29;
- uint64_t wayidx:14;
- uint64_t reserved_2_6:5;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_6:5;
- uint64_t wayidx:14;
- uint64_t reserved_21_49:29;
- uint64_t syn:6;
- uint64_t reserved_56_60:5;
- uint64_t noway:1;
- uint64_t sbe:1;
- uint64_t dbe:1;
-#endif
- } cn63xx;
- struct cvmx_l2c_err_ttgx_cn63xx cn63xxp1;
- struct cvmx_l2c_err_ttgx_cn63xx cn66xx;
- struct cvmx_l2c_err_ttgx_s cn68xx;
- struct cvmx_l2c_err_ttgx_s cn68xxp1;
- struct cvmx_l2c_err_ttgx_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_err_vbfx {
- uint64_t u64;
- struct cvmx_l2c_err_vbfx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t vdbe:1;
- uint64_t vsbe:1;
- uint64_t vsyn:10;
- uint64_t reserved_2_49:48;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_49:48;
- uint64_t vsyn:10;
- uint64_t vsbe:1;
- uint64_t vdbe:1;
- uint64_t reserved_62_63:2;
-#endif
- } s;
- struct cvmx_l2c_err_vbfx_s cn61xx;
- struct cvmx_l2c_err_vbfx_s cn63xx;
- struct cvmx_l2c_err_vbfx_s cn63xxp1;
- struct cvmx_l2c_err_vbfx_s cn66xx;
- struct cvmx_l2c_err_vbfx_s cn68xx;
- struct cvmx_l2c_err_vbfx_s cn68xxp1;
- struct cvmx_l2c_err_vbfx_s cnf71xx;
-};
-
-union cvmx_l2c_err_xmc {
- uint64_t u64;
- struct cvmx_l2c_err_xmc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t cmd:6;
- uint64_t reserved_54_57:4;
- uint64_t sid:6;
- uint64_t reserved_38_47:10;
- uint64_t addr:38;
-#else
- uint64_t addr:38;
- uint64_t reserved_38_47:10;
- uint64_t sid:6;
- uint64_t reserved_54_57:4;
- uint64_t cmd:6;
-#endif
- } s;
- struct cvmx_l2c_err_xmc_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t cmd:6;
- uint64_t reserved_52_57:6;
- uint64_t sid:4;
- uint64_t reserved_38_47:10;
- uint64_t addr:38;
-#else
- uint64_t addr:38;
- uint64_t reserved_38_47:10;
- uint64_t sid:4;
- uint64_t reserved_52_57:6;
- uint64_t cmd:6;
-#endif
- } cn61xx;
- struct cvmx_l2c_err_xmc_cn61xx cn63xx;
- struct cvmx_l2c_err_xmc_cn61xx cn63xxp1;
- struct cvmx_l2c_err_xmc_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t cmd:6;
- uint64_t reserved_53_57:5;
- uint64_t sid:5;
- uint64_t reserved_38_47:10;
- uint64_t addr:38;
-#else
- uint64_t addr:38;
- uint64_t reserved_38_47:10;
- uint64_t sid:5;
- uint64_t reserved_53_57:5;
- uint64_t cmd:6;
-#endif
- } cn66xx;
- struct cvmx_l2c_err_xmc_s cn68xx;
- struct cvmx_l2c_err_xmc_s cn68xxp1;
- struct cvmx_l2c_err_xmc_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_grpwrr0 {
- uint64_t u64;
- struct cvmx_l2c_grpwrr0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t plc1rmsk:32;
- uint64_t plc0rmsk:32;
-#else
- uint64_t plc0rmsk:32;
- uint64_t plc1rmsk:32;
-#endif
- } s;
- struct cvmx_l2c_grpwrr0_s cn52xx;
- struct cvmx_l2c_grpwrr0_s cn52xxp1;
- struct cvmx_l2c_grpwrr0_s cn56xx;
- struct cvmx_l2c_grpwrr0_s cn56xxp1;
-};
-
-union cvmx_l2c_grpwrr1 {
- uint64_t u64;
- struct cvmx_l2c_grpwrr1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t ilcrmsk:32;
- uint64_t plc2rmsk:32;
-#else
- uint64_t plc2rmsk:32;
- uint64_t ilcrmsk:32;
-#endif
- } s;
- struct cvmx_l2c_grpwrr1_s cn52xx;
- struct cvmx_l2c_grpwrr1_s cn52xxp1;
- struct cvmx_l2c_grpwrr1_s cn56xx;
- struct cvmx_l2c_grpwrr1_s cn56xxp1;
-};
-
-union cvmx_l2c_int_en {
- uint64_t u64;
- struct cvmx_l2c_int_en_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t lck2ena:1;
- uint64_t lckena:1;
- uint64_t l2ddeden:1;
- uint64_t l2dsecen:1;
- uint64_t l2tdeden:1;
- uint64_t l2tsecen:1;
- uint64_t oob3en:1;
- uint64_t oob2en:1;
- uint64_t oob1en:1;
-#else
- uint64_t oob1en:1;
- uint64_t oob2en:1;
- uint64_t oob3en:1;
- uint64_t l2tsecen:1;
- uint64_t l2tdeden:1;
- uint64_t l2dsecen:1;
- uint64_t l2ddeden:1;
- uint64_t lckena:1;
- uint64_t lck2ena:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_l2c_int_en_s cn52xx;
- struct cvmx_l2c_int_en_s cn52xxp1;
- struct cvmx_l2c_int_en_s cn56xx;
- struct cvmx_l2c_int_en_s cn56xxp1;
-};
-
-union cvmx_l2c_int_ena {
- uint64_t u64;
- struct cvmx_l2c_int_ena_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t bigrd:1;
- uint64_t bigwr:1;
- uint64_t vrtpe:1;
- uint64_t vrtadrng:1;
- uint64_t vrtidrng:1;
- uint64_t vrtwr:1;
- uint64_t holewr:1;
- uint64_t holerd:1;
-#else
- uint64_t holerd:1;
- uint64_t holewr:1;
- uint64_t vrtwr:1;
- uint64_t vrtidrng:1;
- uint64_t vrtadrng:1;
- uint64_t vrtpe:1;
- uint64_t bigwr:1;
- uint64_t bigrd:1;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_l2c_int_ena_s cn61xx;
- struct cvmx_l2c_int_ena_s cn63xx;
- struct cvmx_l2c_int_ena_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t vrtpe:1;
- uint64_t vrtadrng:1;
- uint64_t vrtidrng:1;
- uint64_t vrtwr:1;
- uint64_t holewr:1;
- uint64_t holerd:1;
-#else
- uint64_t holerd:1;
- uint64_t holewr:1;
- uint64_t vrtwr:1;
- uint64_t vrtidrng:1;
- uint64_t vrtadrng:1;
- uint64_t vrtpe:1;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xxp1;
- struct cvmx_l2c_int_ena_s cn66xx;
- struct cvmx_l2c_int_ena_s cn68xx;
- struct cvmx_l2c_int_ena_s cn68xxp1;
- struct cvmx_l2c_int_ena_s cnf71xx;
-};
-
-union cvmx_l2c_int_reg {
- uint64_t u64;
- struct cvmx_l2c_int_reg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t tad3:1;
- uint64_t tad2:1;
- uint64_t tad1:1;
- uint64_t tad0:1;
- uint64_t reserved_8_15:8;
- uint64_t bigrd:1;
- uint64_t bigwr:1;
- uint64_t vrtpe:1;
- uint64_t vrtadrng:1;
- uint64_t vrtidrng:1;
- uint64_t vrtwr:1;
- uint64_t holewr:1;
- uint64_t holerd:1;
-#else
- uint64_t holerd:1;
- uint64_t holewr:1;
- uint64_t vrtwr:1;
- uint64_t vrtidrng:1;
- uint64_t vrtadrng:1;
- uint64_t vrtpe:1;
- uint64_t bigwr:1;
- uint64_t bigrd:1;
- uint64_t reserved_8_15:8;
- uint64_t tad0:1;
- uint64_t tad1:1;
- uint64_t tad2:1;
- uint64_t tad3:1;
- uint64_t reserved_20_63:44;
-#endif
- } s;
- struct cvmx_l2c_int_reg_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t tad0:1;
- uint64_t reserved_8_15:8;
- uint64_t bigrd:1;
- uint64_t bigwr:1;
- uint64_t vrtpe:1;
- uint64_t vrtadrng:1;
- uint64_t vrtidrng:1;
- uint64_t vrtwr:1;
- uint64_t holewr:1;
- uint64_t holerd:1;
-#else
- uint64_t holerd:1;
- uint64_t holewr:1;
- uint64_t vrtwr:1;
- uint64_t vrtidrng:1;
- uint64_t vrtadrng:1;
- uint64_t vrtpe:1;
- uint64_t bigwr:1;
- uint64_t bigrd:1;
- uint64_t reserved_8_15:8;
- uint64_t tad0:1;
- uint64_t reserved_17_63:47;
-#endif
- } cn61xx;
- struct cvmx_l2c_int_reg_cn61xx cn63xx;
- struct cvmx_l2c_int_reg_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t tad0:1;
- uint64_t reserved_6_15:10;
- uint64_t vrtpe:1;
- uint64_t vrtadrng:1;
- uint64_t vrtidrng:1;
- uint64_t vrtwr:1;
- uint64_t holewr:1;
- uint64_t holerd:1;
-#else
- uint64_t holerd:1;
- uint64_t holewr:1;
- uint64_t vrtwr:1;
- uint64_t vrtidrng:1;
- uint64_t vrtadrng:1;
- uint64_t vrtpe:1;
- uint64_t reserved_6_15:10;
- uint64_t tad0:1;
- uint64_t reserved_17_63:47;
-#endif
- } cn63xxp1;
- struct cvmx_l2c_int_reg_cn61xx cn66xx;
- struct cvmx_l2c_int_reg_s cn68xx;
- struct cvmx_l2c_int_reg_s cn68xxp1;
- struct cvmx_l2c_int_reg_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_int_stat {
- uint64_t u64;
- struct cvmx_l2c_int_stat_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t lck2:1;
- uint64_t lck:1;
- uint64_t l2dded:1;
- uint64_t l2dsec:1;
- uint64_t l2tded:1;
- uint64_t l2tsec:1;
- uint64_t oob3:1;
- uint64_t oob2:1;
- uint64_t oob1:1;
-#else
- uint64_t oob1:1;
- uint64_t oob2:1;
- uint64_t oob3:1;
- uint64_t l2tsec:1;
- uint64_t l2tded:1;
- uint64_t l2dsec:1;
- uint64_t l2dded:1;
- uint64_t lck:1;
- uint64_t lck2:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_l2c_int_stat_s cn52xx;
- struct cvmx_l2c_int_stat_s cn52xxp1;
- struct cvmx_l2c_int_stat_s cn56xx;
- struct cvmx_l2c_int_stat_s cn56xxp1;
-};
-
-union cvmx_l2c_iocx_pfc {
- uint64_t u64;
- struct cvmx_l2c_iocx_pfc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_iocx_pfc_s cn61xx;
- struct cvmx_l2c_iocx_pfc_s cn63xx;
- struct cvmx_l2c_iocx_pfc_s cn63xxp1;
- struct cvmx_l2c_iocx_pfc_s cn66xx;
- struct cvmx_l2c_iocx_pfc_s cn68xx;
- struct cvmx_l2c_iocx_pfc_s cn68xxp1;
- struct cvmx_l2c_iocx_pfc_s cnf71xx;
-};
-
-union cvmx_l2c_iorx_pfc {
- uint64_t u64;
- struct cvmx_l2c_iorx_pfc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_iorx_pfc_s cn61xx;
- struct cvmx_l2c_iorx_pfc_s cn63xx;
- struct cvmx_l2c_iorx_pfc_s cn63xxp1;
- struct cvmx_l2c_iorx_pfc_s cn66xx;
- struct cvmx_l2c_iorx_pfc_s cn68xx;
- struct cvmx_l2c_iorx_pfc_s cn68xxp1;
- struct cvmx_l2c_iorx_pfc_s cnf71xx;
-};
-
-union cvmx_l2c_lckbase {
- uint64_t u64;
- struct cvmx_l2c_lckbase_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_31_63:33;
- uint64_t lck_base:27;
- uint64_t reserved_1_3:3;
- uint64_t lck_ena:1;
-#else
- uint64_t lck_ena:1;
- uint64_t reserved_1_3:3;
- uint64_t lck_base:27;
- uint64_t reserved_31_63:33;
-#endif
- } s;
- struct cvmx_l2c_lckbase_s cn30xx;
- struct cvmx_l2c_lckbase_s cn31xx;
- struct cvmx_l2c_lckbase_s cn38xx;
- struct cvmx_l2c_lckbase_s cn38xxp2;
- struct cvmx_l2c_lckbase_s cn50xx;
- struct cvmx_l2c_lckbase_s cn52xx;
- struct cvmx_l2c_lckbase_s cn52xxp1;
- struct cvmx_l2c_lckbase_s cn56xx;
- struct cvmx_l2c_lckbase_s cn56xxp1;
- struct cvmx_l2c_lckbase_s cn58xx;
- struct cvmx_l2c_lckbase_s cn58xxp1;
-};
-
-union cvmx_l2c_lckoff {
- uint64_t u64;
- struct cvmx_l2c_lckoff_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t lck_offset:10;
-#else
- uint64_t lck_offset:10;
- uint64_t reserved_10_63:54;
-#endif
- } s;
- struct cvmx_l2c_lckoff_s cn30xx;
- struct cvmx_l2c_lckoff_s cn31xx;
- struct cvmx_l2c_lckoff_s cn38xx;
- struct cvmx_l2c_lckoff_s cn38xxp2;
- struct cvmx_l2c_lckoff_s cn50xx;
- struct cvmx_l2c_lckoff_s cn52xx;
- struct cvmx_l2c_lckoff_s cn52xxp1;
- struct cvmx_l2c_lckoff_s cn56xx;
- struct cvmx_l2c_lckoff_s cn56xxp1;
- struct cvmx_l2c_lckoff_s cn58xx;
- struct cvmx_l2c_lckoff_s cn58xxp1;
-};
-
-union cvmx_l2c_lfb0 {
- uint64_t u64;
- struct cvmx_l2c_lfb0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t stcpnd:1;
- uint64_t stpnd:1;
- uint64_t stinv:1;
- uint64_t stcfl:1;
- uint64_t vam:1;
- uint64_t inxt:4;
- uint64_t itl:1;
- uint64_t ihd:1;
- uint64_t set:3;
- uint64_t vabnum:4;
- uint64_t sid:9;
- uint64_t cmd:4;
- uint64_t vld:1;
-#else
- uint64_t vld:1;
- uint64_t cmd:4;
- uint64_t sid:9;
- uint64_t vabnum:4;
- uint64_t set:3;
- uint64_t ihd:1;
- uint64_t itl:1;
- uint64_t inxt:4;
- uint64_t vam:1;
- uint64_t stcfl:1;
- uint64_t stinv:1;
- uint64_t stpnd:1;
- uint64_t stcpnd:1;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_lfb0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t stcpnd:1;
- uint64_t stpnd:1;
- uint64_t stinv:1;
- uint64_t stcfl:1;
- uint64_t vam:1;
- uint64_t reserved_25_26:2;
- uint64_t inxt:2;
- uint64_t itl:1;
- uint64_t ihd:1;
- uint64_t reserved_20_20:1;
- uint64_t set:2;
- uint64_t reserved_16_17:2;
- uint64_t vabnum:2;
- uint64_t sid:9;
- uint64_t cmd:4;
- uint64_t vld:1;
-#else
- uint64_t vld:1;
- uint64_t cmd:4;
- uint64_t sid:9;
- uint64_t vabnum:2;
- uint64_t reserved_16_17:2;
- uint64_t set:2;
- uint64_t reserved_20_20:1;
- uint64_t ihd:1;
- uint64_t itl:1;
- uint64_t inxt:2;
- uint64_t reserved_25_26:2;
- uint64_t vam:1;
- uint64_t stcfl:1;
- uint64_t stinv:1;
- uint64_t stpnd:1;
- uint64_t stcpnd:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn30xx;
- struct cvmx_l2c_lfb0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t stcpnd:1;
- uint64_t stpnd:1;
- uint64_t stinv:1;
- uint64_t stcfl:1;
- uint64_t vam:1;
- uint64_t reserved_26_26:1;
- uint64_t inxt:3;
- uint64_t itl:1;
- uint64_t ihd:1;
- uint64_t reserved_20_20:1;
- uint64_t set:2;
- uint64_t reserved_17_17:1;
- uint64_t vabnum:3;
- uint64_t sid:9;
- uint64_t cmd:4;
- uint64_t vld:1;
-#else
- uint64_t vld:1;
- uint64_t cmd:4;
- uint64_t sid:9;
- uint64_t vabnum:3;
- uint64_t reserved_17_17:1;
- uint64_t set:2;
- uint64_t reserved_20_20:1;
- uint64_t ihd:1;
- uint64_t itl:1;
- uint64_t inxt:3;
- uint64_t reserved_26_26:1;
- uint64_t vam:1;
- uint64_t stcfl:1;
- uint64_t stinv:1;
- uint64_t stpnd:1;
- uint64_t stcpnd:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn31xx;
- struct cvmx_l2c_lfb0_s cn38xx;
- struct cvmx_l2c_lfb0_s cn38xxp2;
- struct cvmx_l2c_lfb0_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t stcpnd:1;
- uint64_t stpnd:1;
- uint64_t stinv:1;
- uint64_t stcfl:1;
- uint64_t vam:1;
- uint64_t reserved_26_26:1;
- uint64_t inxt:3;
- uint64_t itl:1;
- uint64_t ihd:1;
- uint64_t set:3;
- uint64_t reserved_17_17:1;
- uint64_t vabnum:3;
- uint64_t sid:9;
- uint64_t cmd:4;
- uint64_t vld:1;
-#else
- uint64_t vld:1;
- uint64_t cmd:4;
- uint64_t sid:9;
- uint64_t vabnum:3;
- uint64_t reserved_17_17:1;
- uint64_t set:3;
- uint64_t ihd:1;
- uint64_t itl:1;
- uint64_t inxt:3;
- uint64_t reserved_26_26:1;
- uint64_t vam:1;
- uint64_t stcfl:1;
- uint64_t stinv:1;
- uint64_t stpnd:1;
- uint64_t stcpnd:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn50xx;
- struct cvmx_l2c_lfb0_cn50xx cn52xx;
- struct cvmx_l2c_lfb0_cn50xx cn52xxp1;
- struct cvmx_l2c_lfb0_s cn56xx;
- struct cvmx_l2c_lfb0_s cn56xxp1;
- struct cvmx_l2c_lfb0_s cn58xx;
- struct cvmx_l2c_lfb0_s cn58xxp1;
-};
-
-union cvmx_l2c_lfb1 {
- uint64_t u64;
- struct cvmx_l2c_lfb1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t dsgoing:1;
- uint64_t bid:2;
- uint64_t wtrsp:1;
- uint64_t wtdw:1;
- uint64_t wtdq:1;
- uint64_t wtwhp:1;
- uint64_t wtwhf:1;
- uint64_t wtwrm:1;
- uint64_t wtstm:1;
- uint64_t wtrda:1;
- uint64_t wtstdt:1;
- uint64_t wtstrsp:1;
- uint64_t wtstrsc:1;
- uint64_t wtvtm:1;
- uint64_t wtmfl:1;
- uint64_t prbrty:1;
- uint64_t wtprb:1;
- uint64_t vld:1;
-#else
- uint64_t vld:1;
- uint64_t wtprb:1;
- uint64_t prbrty:1;
- uint64_t wtmfl:1;
- uint64_t wtvtm:1;
- uint64_t wtstrsc:1;
- uint64_t wtstrsp:1;
- uint64_t wtstdt:1;
- uint64_t wtrda:1;
- uint64_t wtstm:1;
- uint64_t wtwrm:1;
- uint64_t wtwhf:1;
- uint64_t wtwhp:1;
- uint64_t wtdq:1;
- uint64_t wtdw:1;
- uint64_t wtrsp:1;
- uint64_t bid:2;
- uint64_t dsgoing:1;
- uint64_t reserved_19_63:45;
-#endif
- } s;
- struct cvmx_l2c_lfb1_s cn30xx;
- struct cvmx_l2c_lfb1_s cn31xx;
- struct cvmx_l2c_lfb1_s cn38xx;
- struct cvmx_l2c_lfb1_s cn38xxp2;
- struct cvmx_l2c_lfb1_s cn50xx;
- struct cvmx_l2c_lfb1_s cn52xx;
- struct cvmx_l2c_lfb1_s cn52xxp1;
- struct cvmx_l2c_lfb1_s cn56xx;
- struct cvmx_l2c_lfb1_s cn56xxp1;
- struct cvmx_l2c_lfb1_s cn58xx;
- struct cvmx_l2c_lfb1_s cn58xxp1;
-};
-
-union cvmx_l2c_lfb2 {
- uint64_t u64;
- struct cvmx_l2c_lfb2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_0_63:64;
-#else
- uint64_t reserved_0_63:64;
-#endif
- } s;
- struct cvmx_l2c_lfb2_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_27_63:37;
- uint64_t lfb_tag:19;
- uint64_t lfb_idx:8;
-#else
- uint64_t lfb_idx:8;
- uint64_t lfb_tag:19;
- uint64_t reserved_27_63:37;
-#endif
- } cn30xx;
- struct cvmx_l2c_lfb2_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_27_63:37;
- uint64_t lfb_tag:17;
- uint64_t lfb_idx:10;
-#else
- uint64_t lfb_idx:10;
- uint64_t lfb_tag:17;
- uint64_t reserved_27_63:37;
-#endif
- } cn31xx;
- struct cvmx_l2c_lfb2_cn31xx cn38xx;
- struct cvmx_l2c_lfb2_cn31xx cn38xxp2;
- struct cvmx_l2c_lfb2_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_27_63:37;
- uint64_t lfb_tag:20;
- uint64_t lfb_idx:7;
-#else
- uint64_t lfb_idx:7;
- uint64_t lfb_tag:20;
- uint64_t reserved_27_63:37;
-#endif
- } cn50xx;
- struct cvmx_l2c_lfb2_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_27_63:37;
- uint64_t lfb_tag:18;
- uint64_t lfb_idx:9;
-#else
- uint64_t lfb_idx:9;
- uint64_t lfb_tag:18;
- uint64_t reserved_27_63:37;
-#endif
- } cn52xx;
- struct cvmx_l2c_lfb2_cn52xx cn52xxp1;
- struct cvmx_l2c_lfb2_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_27_63:37;
- uint64_t lfb_tag:16;
- uint64_t lfb_idx:11;
-#else
- uint64_t lfb_idx:11;
- uint64_t lfb_tag:16;
- uint64_t reserved_27_63:37;
-#endif
- } cn56xx;
- struct cvmx_l2c_lfb2_cn56xx cn56xxp1;
- struct cvmx_l2c_lfb2_cn56xx cn58xx;
- struct cvmx_l2c_lfb2_cn56xx cn58xxp1;
-};
-
-union cvmx_l2c_lfb3 {
- uint64_t u64;
- struct cvmx_l2c_lfb3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t stpartdis:1;
- uint64_t lfb_hwm:4;
-#else
- uint64_t lfb_hwm:4;
- uint64_t stpartdis:1;
- uint64_t reserved_5_63:59;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_15_63:49,
+ __BITFIELD_FIELD(uint64_t lfb_enum:4,
+ __BITFIELD_FIELD(uint64_t lfb_dmp:1,
+ __BITFIELD_FIELD(uint64_t ppnum:4,
+ __BITFIELD_FIELD(uint64_t set:3,
+ __BITFIELD_FIELD(uint64_t finv:1,
+ __BITFIELD_FIELD(uint64_t l2d:1,
+ __BITFIELD_FIELD(uint64_t l2t:1,
+ ;))))))))
} s;
- struct cvmx_l2c_lfb3_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t stpartdis:1;
- uint64_t reserved_2_3:2;
- uint64_t lfb_hwm:2;
-#else
- uint64_t lfb_hwm:2;
- uint64_t reserved_2_3:2;
- uint64_t stpartdis:1;
- uint64_t reserved_5_63:59;
-#endif
- } cn30xx;
- struct cvmx_l2c_lfb3_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t stpartdis:1;
- uint64_t reserved_3_3:1;
- uint64_t lfb_hwm:3;
-#else
- uint64_t lfb_hwm:3;
- uint64_t reserved_3_3:1;
- uint64_t stpartdis:1;
- uint64_t reserved_5_63:59;
-#endif
- } cn31xx;
- struct cvmx_l2c_lfb3_s cn38xx;
- struct cvmx_l2c_lfb3_s cn38xxp2;
- struct cvmx_l2c_lfb3_cn31xx cn50xx;
- struct cvmx_l2c_lfb3_cn31xx cn52xx;
- struct cvmx_l2c_lfb3_cn31xx cn52xxp1;
- struct cvmx_l2c_lfb3_s cn56xx;
- struct cvmx_l2c_lfb3_s cn56xxp1;
- struct cvmx_l2c_lfb3_s cn58xx;
- struct cvmx_l2c_lfb3_s cn58xxp1;
-};
-
-union cvmx_l2c_oob {
- uint64_t u64;
- struct cvmx_l2c_oob_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t dwbena:1;
- uint64_t stena:1;
-#else
- uint64_t stena:1;
- uint64_t dwbena:1;
- uint64_t reserved_2_63:62;
-#endif
- } s;
- struct cvmx_l2c_oob_s cn52xx;
- struct cvmx_l2c_oob_s cn52xxp1;
- struct cvmx_l2c_oob_s cn56xx;
- struct cvmx_l2c_oob_s cn56xxp1;
-};
-
-union cvmx_l2c_oob1 {
- uint64_t u64;
- struct cvmx_l2c_oob1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t fadr:27;
- uint64_t fsrc:1;
- uint64_t reserved_34_35:2;
- uint64_t sadr:14;
- uint64_t reserved_14_19:6;
- uint64_t size:14;
-#else
- uint64_t size:14;
- uint64_t reserved_14_19:6;
- uint64_t sadr:14;
- uint64_t reserved_34_35:2;
- uint64_t fsrc:1;
- uint64_t fadr:27;
-#endif
- } s;
- struct cvmx_l2c_oob1_s cn52xx;
- struct cvmx_l2c_oob1_s cn52xxp1;
- struct cvmx_l2c_oob1_s cn56xx;
- struct cvmx_l2c_oob1_s cn56xxp1;
-};
-
-union cvmx_l2c_oob2 {
- uint64_t u64;
- struct cvmx_l2c_oob2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t fadr:27;
- uint64_t fsrc:1;
- uint64_t reserved_34_35:2;
- uint64_t sadr:14;
- uint64_t reserved_14_19:6;
- uint64_t size:14;
-#else
- uint64_t size:14;
- uint64_t reserved_14_19:6;
- uint64_t sadr:14;
- uint64_t reserved_34_35:2;
- uint64_t fsrc:1;
- uint64_t fadr:27;
-#endif
- } s;
- struct cvmx_l2c_oob2_s cn52xx;
- struct cvmx_l2c_oob2_s cn52xxp1;
- struct cvmx_l2c_oob2_s cn56xx;
- struct cvmx_l2c_oob2_s cn56xxp1;
-};
-
-union cvmx_l2c_oob3 {
- uint64_t u64;
- struct cvmx_l2c_oob3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t fadr:27;
- uint64_t fsrc:1;
- uint64_t reserved_34_35:2;
- uint64_t sadr:14;
- uint64_t reserved_14_19:6;
- uint64_t size:14;
-#else
- uint64_t size:14;
- uint64_t reserved_14_19:6;
- uint64_t sadr:14;
- uint64_t reserved_34_35:2;
- uint64_t fsrc:1;
- uint64_t fadr:27;
-#endif
- } s;
- struct cvmx_l2c_oob3_s cn52xx;
- struct cvmx_l2c_oob3_s cn52xxp1;
- struct cvmx_l2c_oob3_s cn56xx;
- struct cvmx_l2c_oob3_s cn56xxp1;
-};
-
-union cvmx_l2c_pfcx {
- uint64_t u64;
- struct cvmx_l2c_pfcx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_36_63:28;
- uint64_t pfcnt0:36;
-#else
- uint64_t pfcnt0:36;
- uint64_t reserved_36_63:28;
-#endif
- } s;
- struct cvmx_l2c_pfcx_s cn30xx;
- struct cvmx_l2c_pfcx_s cn31xx;
- struct cvmx_l2c_pfcx_s cn38xx;
- struct cvmx_l2c_pfcx_s cn38xxp2;
- struct cvmx_l2c_pfcx_s cn50xx;
- struct cvmx_l2c_pfcx_s cn52xx;
- struct cvmx_l2c_pfcx_s cn52xxp1;
- struct cvmx_l2c_pfcx_s cn56xx;
- struct cvmx_l2c_pfcx_s cn56xxp1;
- struct cvmx_l2c_pfcx_s cn58xx;
- struct cvmx_l2c_pfcx_s cn58xxp1;
};
union cvmx_l2c_pfctl {
uint64_t u64;
struct cvmx_l2c_pfctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_36_63:28;
- uint64_t cnt3rdclr:1;
- uint64_t cnt2rdclr:1;
- uint64_t cnt1rdclr:1;
- uint64_t cnt0rdclr:1;
- uint64_t cnt3ena:1;
- uint64_t cnt3clr:1;
- uint64_t cnt3sel:6;
- uint64_t cnt2ena:1;
- uint64_t cnt2clr:1;
- uint64_t cnt2sel:6;
- uint64_t cnt1ena:1;
- uint64_t cnt1clr:1;
- uint64_t cnt1sel:6;
- uint64_t cnt0ena:1;
- uint64_t cnt0clr:1;
- uint64_t cnt0sel:6;
-#else
- uint64_t cnt0sel:6;
- uint64_t cnt0clr:1;
- uint64_t cnt0ena:1;
- uint64_t cnt1sel:6;
- uint64_t cnt1clr:1;
- uint64_t cnt1ena:1;
- uint64_t cnt2sel:6;
- uint64_t cnt2clr:1;
- uint64_t cnt2ena:1;
- uint64_t cnt3sel:6;
- uint64_t cnt3clr:1;
- uint64_t cnt3ena:1;
- uint64_t cnt0rdclr:1;
- uint64_t cnt1rdclr:1;
- uint64_t cnt2rdclr:1;
- uint64_t cnt3rdclr:1;
- uint64_t reserved_36_63:28;
-#endif
- } s;
- struct cvmx_l2c_pfctl_s cn30xx;
- struct cvmx_l2c_pfctl_s cn31xx;
- struct cvmx_l2c_pfctl_s cn38xx;
- struct cvmx_l2c_pfctl_s cn38xxp2;
- struct cvmx_l2c_pfctl_s cn50xx;
- struct cvmx_l2c_pfctl_s cn52xx;
- struct cvmx_l2c_pfctl_s cn52xxp1;
- struct cvmx_l2c_pfctl_s cn56xx;
- struct cvmx_l2c_pfctl_s cn56xxp1;
- struct cvmx_l2c_pfctl_s cn58xx;
- struct cvmx_l2c_pfctl_s cn58xxp1;
-};
-
-union cvmx_l2c_ppgrp {
- uint64_t u64;
- struct cvmx_l2c_ppgrp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t pp11grp:2;
- uint64_t pp10grp:2;
- uint64_t pp9grp:2;
- uint64_t pp8grp:2;
- uint64_t pp7grp:2;
- uint64_t pp6grp:2;
- uint64_t pp5grp:2;
- uint64_t pp4grp:2;
- uint64_t pp3grp:2;
- uint64_t pp2grp:2;
- uint64_t pp1grp:2;
- uint64_t pp0grp:2;
-#else
- uint64_t pp0grp:2;
- uint64_t pp1grp:2;
- uint64_t pp2grp:2;
- uint64_t pp3grp:2;
- uint64_t pp4grp:2;
- uint64_t pp5grp:2;
- uint64_t pp6grp:2;
- uint64_t pp7grp:2;
- uint64_t pp8grp:2;
- uint64_t pp9grp:2;
- uint64_t pp10grp:2;
- uint64_t pp11grp:2;
- uint64_t reserved_24_63:40;
-#endif
- } s;
- struct cvmx_l2c_ppgrp_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t pp3grp:2;
- uint64_t pp2grp:2;
- uint64_t pp1grp:2;
- uint64_t pp0grp:2;
-#else
- uint64_t pp0grp:2;
- uint64_t pp1grp:2;
- uint64_t pp2grp:2;
- uint64_t pp3grp:2;
- uint64_t reserved_8_63:56;
-#endif
- } cn52xx;
- struct cvmx_l2c_ppgrp_cn52xx cn52xxp1;
- struct cvmx_l2c_ppgrp_s cn56xx;
- struct cvmx_l2c_ppgrp_s cn56xxp1;
-};
-
-union cvmx_l2c_qos_iobx {
- uint64_t u64;
- struct cvmx_l2c_qos_iobx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_7_63:57;
- uint64_t dwblvl:3;
- uint64_t reserved_3_3:1;
- uint64_t lvl:3;
-#else
- uint64_t lvl:3;
- uint64_t reserved_3_3:1;
- uint64_t dwblvl:3;
- uint64_t reserved_7_63:57;
-#endif
- } s;
- struct cvmx_l2c_qos_iobx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t dwblvl:2;
- uint64_t reserved_2_3:2;
- uint64_t lvl:2;
-#else
- uint64_t lvl:2;
- uint64_t reserved_2_3:2;
- uint64_t dwblvl:2;
- uint64_t reserved_6_63:58;
-#endif
- } cn61xx;
- struct cvmx_l2c_qos_iobx_cn61xx cn63xx;
- struct cvmx_l2c_qos_iobx_cn61xx cn63xxp1;
- struct cvmx_l2c_qos_iobx_cn61xx cn66xx;
- struct cvmx_l2c_qos_iobx_s cn68xx;
- struct cvmx_l2c_qos_iobx_s cn68xxp1;
- struct cvmx_l2c_qos_iobx_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_qos_ppx {
- uint64_t u64;
- struct cvmx_l2c_qos_ppx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_3_63:61;
- uint64_t lvl:3;
-#else
- uint64_t lvl:3;
- uint64_t reserved_3_63:61;
-#endif
- } s;
- struct cvmx_l2c_qos_ppx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t lvl:2;
-#else
- uint64_t lvl:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn61xx;
- struct cvmx_l2c_qos_ppx_cn61xx cn63xx;
- struct cvmx_l2c_qos_ppx_cn61xx cn63xxp1;
- struct cvmx_l2c_qos_ppx_cn61xx cn66xx;
- struct cvmx_l2c_qos_ppx_s cn68xx;
- struct cvmx_l2c_qos_ppx_s cn68xxp1;
- struct cvmx_l2c_qos_ppx_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_qos_wgt {
- uint64_t u64;
- struct cvmx_l2c_qos_wgt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t wgt7:8;
- uint64_t wgt6:8;
- uint64_t wgt5:8;
- uint64_t wgt4:8;
- uint64_t wgt3:8;
- uint64_t wgt2:8;
- uint64_t wgt1:8;
- uint64_t wgt0:8;
-#else
- uint64_t wgt0:8;
- uint64_t wgt1:8;
- uint64_t wgt2:8;
- uint64_t wgt3:8;
- uint64_t wgt4:8;
- uint64_t wgt5:8;
- uint64_t wgt6:8;
- uint64_t wgt7:8;
-#endif
- } s;
- struct cvmx_l2c_qos_wgt_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wgt3:8;
- uint64_t wgt2:8;
- uint64_t wgt1:8;
- uint64_t wgt0:8;
-#else
- uint64_t wgt0:8;
- uint64_t wgt1:8;
- uint64_t wgt2:8;
- uint64_t wgt3:8;
- uint64_t reserved_32_63:32;
-#endif
- } cn61xx;
- struct cvmx_l2c_qos_wgt_cn61xx cn63xx;
- struct cvmx_l2c_qos_wgt_cn61xx cn63xxp1;
- struct cvmx_l2c_qos_wgt_cn61xx cn66xx;
- struct cvmx_l2c_qos_wgt_s cn68xx;
- struct cvmx_l2c_qos_wgt_s cn68xxp1;
- struct cvmx_l2c_qos_wgt_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_rscx_pfc {
- uint64_t u64;
- struct cvmx_l2c_rscx_pfc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_rscx_pfc_s cn61xx;
- struct cvmx_l2c_rscx_pfc_s cn63xx;
- struct cvmx_l2c_rscx_pfc_s cn63xxp1;
- struct cvmx_l2c_rscx_pfc_s cn66xx;
- struct cvmx_l2c_rscx_pfc_s cn68xx;
- struct cvmx_l2c_rscx_pfc_s cn68xxp1;
- struct cvmx_l2c_rscx_pfc_s cnf71xx;
-};
-
-union cvmx_l2c_rsdx_pfc {
- uint64_t u64;
- struct cvmx_l2c_rsdx_pfc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_rsdx_pfc_s cn61xx;
- struct cvmx_l2c_rsdx_pfc_s cn63xx;
- struct cvmx_l2c_rsdx_pfc_s cn63xxp1;
- struct cvmx_l2c_rsdx_pfc_s cn66xx;
- struct cvmx_l2c_rsdx_pfc_s cn68xx;
- struct cvmx_l2c_rsdx_pfc_s cn68xxp1;
- struct cvmx_l2c_rsdx_pfc_s cnf71xx;
-};
-
-union cvmx_l2c_spar0 {
- uint64_t u64;
- struct cvmx_l2c_spar0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t umsk3:8;
- uint64_t umsk2:8;
- uint64_t umsk1:8;
- uint64_t umsk0:8;
-#else
- uint64_t umsk0:8;
- uint64_t umsk1:8;
- uint64_t umsk2:8;
- uint64_t umsk3:8;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_spar0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t umsk0:4;
-#else
- uint64_t umsk0:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn30xx;
- struct cvmx_l2c_spar0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t umsk1:4;
- uint64_t reserved_4_7:4;
- uint64_t umsk0:4;
-#else
- uint64_t umsk0:4;
- uint64_t reserved_4_7:4;
- uint64_t umsk1:4;
- uint64_t reserved_12_63:52;
-#endif
- } cn31xx;
- struct cvmx_l2c_spar0_s cn38xx;
- struct cvmx_l2c_spar0_s cn38xxp2;
- struct cvmx_l2c_spar0_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t umsk1:8;
- uint64_t umsk0:8;
-#else
- uint64_t umsk0:8;
- uint64_t umsk1:8;
- uint64_t reserved_16_63:48;
-#endif
- } cn50xx;
- struct cvmx_l2c_spar0_s cn52xx;
- struct cvmx_l2c_spar0_s cn52xxp1;
- struct cvmx_l2c_spar0_s cn56xx;
- struct cvmx_l2c_spar0_s cn56xxp1;
- struct cvmx_l2c_spar0_s cn58xx;
- struct cvmx_l2c_spar0_s cn58xxp1;
-};
-
-union cvmx_l2c_spar1 {
- uint64_t u64;
- struct cvmx_l2c_spar1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t umsk7:8;
- uint64_t umsk6:8;
- uint64_t umsk5:8;
- uint64_t umsk4:8;
-#else
- uint64_t umsk4:8;
- uint64_t umsk5:8;
- uint64_t umsk6:8;
- uint64_t umsk7:8;
- uint64_t reserved_32_63:32;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_36_63:28,
+ __BITFIELD_FIELD(uint64_t cnt3rdclr:1,
+ __BITFIELD_FIELD(uint64_t cnt2rdclr:1,
+ __BITFIELD_FIELD(uint64_t cnt1rdclr:1,
+ __BITFIELD_FIELD(uint64_t cnt0rdclr:1,
+ __BITFIELD_FIELD(uint64_t cnt3ena:1,
+ __BITFIELD_FIELD(uint64_t cnt3clr:1,
+ __BITFIELD_FIELD(uint64_t cnt3sel:6,
+ __BITFIELD_FIELD(uint64_t cnt2ena:1,
+ __BITFIELD_FIELD(uint64_t cnt2clr:1,
+ __BITFIELD_FIELD(uint64_t cnt2sel:6,
+ __BITFIELD_FIELD(uint64_t cnt1ena:1,
+ __BITFIELD_FIELD(uint64_t cnt1clr:1,
+ __BITFIELD_FIELD(uint64_t cnt1sel:6,
+ __BITFIELD_FIELD(uint64_t cnt0ena:1,
+ __BITFIELD_FIELD(uint64_t cnt0clr:1,
+ __BITFIELD_FIELD(uint64_t cnt0sel:6,
+ ;)))))))))))))))))
} s;
- struct cvmx_l2c_spar1_s cn38xx;
- struct cvmx_l2c_spar1_s cn38xxp2;
- struct cvmx_l2c_spar1_s cn56xx;
- struct cvmx_l2c_spar1_s cn56xxp1;
- struct cvmx_l2c_spar1_s cn58xx;
- struct cvmx_l2c_spar1_s cn58xxp1;
-};
-
-union cvmx_l2c_spar2 {
- uint64_t u64;
- struct cvmx_l2c_spar2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t umsk11:8;
- uint64_t umsk10:8;
- uint64_t umsk9:8;
- uint64_t umsk8:8;
-#else
- uint64_t umsk8:8;
- uint64_t umsk9:8;
- uint64_t umsk10:8;
- uint64_t umsk11:8;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_spar2_s cn38xx;
- struct cvmx_l2c_spar2_s cn38xxp2;
- struct cvmx_l2c_spar2_s cn56xx;
- struct cvmx_l2c_spar2_s cn56xxp1;
- struct cvmx_l2c_spar2_s cn58xx;
- struct cvmx_l2c_spar2_s cn58xxp1;
-};
-
-union cvmx_l2c_spar3 {
- uint64_t u64;
- struct cvmx_l2c_spar3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t umsk15:8;
- uint64_t umsk14:8;
- uint64_t umsk13:8;
- uint64_t umsk12:8;
-#else
- uint64_t umsk12:8;
- uint64_t umsk13:8;
- uint64_t umsk14:8;
- uint64_t umsk15:8;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_spar3_s cn38xx;
- struct cvmx_l2c_spar3_s cn38xxp2;
- struct cvmx_l2c_spar3_s cn58xx;
- struct cvmx_l2c_spar3_s cn58xxp1;
-};
-
-union cvmx_l2c_spar4 {
- uint64_t u64;
- struct cvmx_l2c_spar4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t umskiob:8;
-#else
- uint64_t umskiob:8;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_l2c_spar4_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t umskiob:4;
-#else
- uint64_t umskiob:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn30xx;
- struct cvmx_l2c_spar4_cn30xx cn31xx;
- struct cvmx_l2c_spar4_s cn38xx;
- struct cvmx_l2c_spar4_s cn38xxp2;
- struct cvmx_l2c_spar4_s cn50xx;
- struct cvmx_l2c_spar4_s cn52xx;
- struct cvmx_l2c_spar4_s cn52xxp1;
- struct cvmx_l2c_spar4_s cn56xx;
- struct cvmx_l2c_spar4_s cn56xxp1;
- struct cvmx_l2c_spar4_s cn58xx;
- struct cvmx_l2c_spar4_s cn58xxp1;
-};
-
-union cvmx_l2c_tadx_ecc0 {
- uint64_t u64;
- struct cvmx_l2c_tadx_ecc0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_58_63:6;
- uint64_t ow3ecc:10;
- uint64_t reserved_42_47:6;
- uint64_t ow2ecc:10;
- uint64_t reserved_26_31:6;
- uint64_t ow1ecc:10;
- uint64_t reserved_10_15:6;
- uint64_t ow0ecc:10;
-#else
- uint64_t ow0ecc:10;
- uint64_t reserved_10_15:6;
- uint64_t ow1ecc:10;
- uint64_t reserved_26_31:6;
- uint64_t ow2ecc:10;
- uint64_t reserved_42_47:6;
- uint64_t ow3ecc:10;
- uint64_t reserved_58_63:6;
-#endif
- } s;
- struct cvmx_l2c_tadx_ecc0_s cn61xx;
- struct cvmx_l2c_tadx_ecc0_s cn63xx;
- struct cvmx_l2c_tadx_ecc0_s cn63xxp1;
- struct cvmx_l2c_tadx_ecc0_s cn66xx;
- struct cvmx_l2c_tadx_ecc0_s cn68xx;
- struct cvmx_l2c_tadx_ecc0_s cn68xxp1;
- struct cvmx_l2c_tadx_ecc0_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_ecc1 {
- uint64_t u64;
- struct cvmx_l2c_tadx_ecc1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_58_63:6;
- uint64_t ow7ecc:10;
- uint64_t reserved_42_47:6;
- uint64_t ow6ecc:10;
- uint64_t reserved_26_31:6;
- uint64_t ow5ecc:10;
- uint64_t reserved_10_15:6;
- uint64_t ow4ecc:10;
-#else
- uint64_t ow4ecc:10;
- uint64_t reserved_10_15:6;
- uint64_t ow5ecc:10;
- uint64_t reserved_26_31:6;
- uint64_t ow6ecc:10;
- uint64_t reserved_42_47:6;
- uint64_t ow7ecc:10;
- uint64_t reserved_58_63:6;
-#endif
- } s;
- struct cvmx_l2c_tadx_ecc1_s cn61xx;
- struct cvmx_l2c_tadx_ecc1_s cn63xx;
- struct cvmx_l2c_tadx_ecc1_s cn63xxp1;
- struct cvmx_l2c_tadx_ecc1_s cn66xx;
- struct cvmx_l2c_tadx_ecc1_s cn68xx;
- struct cvmx_l2c_tadx_ecc1_s cn68xxp1;
- struct cvmx_l2c_tadx_ecc1_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_ien {
- uint64_t u64;
- struct cvmx_l2c_tadx_ien_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t wrdislmc:1;
- uint64_t rddislmc:1;
- uint64_t noway:1;
- uint64_t vbfdbe:1;
- uint64_t vbfsbe:1;
- uint64_t tagdbe:1;
- uint64_t tagsbe:1;
- uint64_t l2ddbe:1;
- uint64_t l2dsbe:1;
-#else
- uint64_t l2dsbe:1;
- uint64_t l2ddbe:1;
- uint64_t tagsbe:1;
- uint64_t tagdbe:1;
- uint64_t vbfsbe:1;
- uint64_t vbfdbe:1;
- uint64_t noway:1;
- uint64_t rddislmc:1;
- uint64_t wrdislmc:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_l2c_tadx_ien_s cn61xx;
- struct cvmx_l2c_tadx_ien_s cn63xx;
- struct cvmx_l2c_tadx_ien_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_7_63:57;
- uint64_t noway:1;
- uint64_t vbfdbe:1;
- uint64_t vbfsbe:1;
- uint64_t tagdbe:1;
- uint64_t tagsbe:1;
- uint64_t l2ddbe:1;
- uint64_t l2dsbe:1;
-#else
- uint64_t l2dsbe:1;
- uint64_t l2ddbe:1;
- uint64_t tagsbe:1;
- uint64_t tagdbe:1;
- uint64_t vbfsbe:1;
- uint64_t vbfdbe:1;
- uint64_t noway:1;
- uint64_t reserved_7_63:57;
-#endif
- } cn63xxp1;
- struct cvmx_l2c_tadx_ien_s cn66xx;
- struct cvmx_l2c_tadx_ien_s cn68xx;
- struct cvmx_l2c_tadx_ien_s cn68xxp1;
- struct cvmx_l2c_tadx_ien_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_int {
- uint64_t u64;
- struct cvmx_l2c_tadx_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t wrdislmc:1;
- uint64_t rddislmc:1;
- uint64_t noway:1;
- uint64_t vbfdbe:1;
- uint64_t vbfsbe:1;
- uint64_t tagdbe:1;
- uint64_t tagsbe:1;
- uint64_t l2ddbe:1;
- uint64_t l2dsbe:1;
-#else
- uint64_t l2dsbe:1;
- uint64_t l2ddbe:1;
- uint64_t tagsbe:1;
- uint64_t tagdbe:1;
- uint64_t vbfsbe:1;
- uint64_t vbfdbe:1;
- uint64_t noway:1;
- uint64_t rddislmc:1;
- uint64_t wrdislmc:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_l2c_tadx_int_s cn61xx;
- struct cvmx_l2c_tadx_int_s cn63xx;
- struct cvmx_l2c_tadx_int_s cn66xx;
- struct cvmx_l2c_tadx_int_s cn68xx;
- struct cvmx_l2c_tadx_int_s cn68xxp1;
- struct cvmx_l2c_tadx_int_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_pfc0 {
- uint64_t u64;
- struct cvmx_l2c_tadx_pfc0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_tadx_pfc0_s cn61xx;
- struct cvmx_l2c_tadx_pfc0_s cn63xx;
- struct cvmx_l2c_tadx_pfc0_s cn63xxp1;
- struct cvmx_l2c_tadx_pfc0_s cn66xx;
- struct cvmx_l2c_tadx_pfc0_s cn68xx;
- struct cvmx_l2c_tadx_pfc0_s cn68xxp1;
- struct cvmx_l2c_tadx_pfc0_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_pfc1 {
- uint64_t u64;
- struct cvmx_l2c_tadx_pfc1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_tadx_pfc1_s cn61xx;
- struct cvmx_l2c_tadx_pfc1_s cn63xx;
- struct cvmx_l2c_tadx_pfc1_s cn63xxp1;
- struct cvmx_l2c_tadx_pfc1_s cn66xx;
- struct cvmx_l2c_tadx_pfc1_s cn68xx;
- struct cvmx_l2c_tadx_pfc1_s cn68xxp1;
- struct cvmx_l2c_tadx_pfc1_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_pfc2 {
- uint64_t u64;
- struct cvmx_l2c_tadx_pfc2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_tadx_pfc2_s cn61xx;
- struct cvmx_l2c_tadx_pfc2_s cn63xx;
- struct cvmx_l2c_tadx_pfc2_s cn63xxp1;
- struct cvmx_l2c_tadx_pfc2_s cn66xx;
- struct cvmx_l2c_tadx_pfc2_s cn68xx;
- struct cvmx_l2c_tadx_pfc2_s cn68xxp1;
- struct cvmx_l2c_tadx_pfc2_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_pfc3 {
- uint64_t u64;
- struct cvmx_l2c_tadx_pfc3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_tadx_pfc3_s cn61xx;
- struct cvmx_l2c_tadx_pfc3_s cn63xx;
- struct cvmx_l2c_tadx_pfc3_s cn63xxp1;
- struct cvmx_l2c_tadx_pfc3_s cn66xx;
- struct cvmx_l2c_tadx_pfc3_s cn68xx;
- struct cvmx_l2c_tadx_pfc3_s cn68xxp1;
- struct cvmx_l2c_tadx_pfc3_s cnf71xx;
};
union cvmx_l2c_tadx_prf {
uint64_t u64;
struct cvmx_l2c_tadx_prf_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t cnt3sel:8;
- uint64_t cnt2sel:8;
- uint64_t cnt1sel:8;
- uint64_t cnt0sel:8;
-#else
- uint64_t cnt0sel:8;
- uint64_t cnt1sel:8;
- uint64_t cnt2sel:8;
- uint64_t cnt3sel:8;
- uint64_t reserved_32_63:32;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_32_63:32,
+ __BITFIELD_FIELD(uint64_t cnt3sel:8,
+ __BITFIELD_FIELD(uint64_t cnt2sel:8,
+ __BITFIELD_FIELD(uint64_t cnt1sel:8,
+ __BITFIELD_FIELD(uint64_t cnt0sel:8,
+ ;)))))
} s;
- struct cvmx_l2c_tadx_prf_s cn61xx;
- struct cvmx_l2c_tadx_prf_s cn63xx;
- struct cvmx_l2c_tadx_prf_s cn63xxp1;
- struct cvmx_l2c_tadx_prf_s cn66xx;
- struct cvmx_l2c_tadx_prf_s cn68xx;
- struct cvmx_l2c_tadx_prf_s cn68xxp1;
- struct cvmx_l2c_tadx_prf_s cnf71xx;
};
union cvmx_l2c_tadx_tag {
uint64_t u64;
struct cvmx_l2c_tadx_tag_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_46_63:18;
- uint64_t ecc:6;
- uint64_t reserved_36_39:4;
- uint64_t tag:19;
- uint64_t reserved_4_16:13;
- uint64_t use:1;
- uint64_t valid:1;
- uint64_t dirty:1;
- uint64_t lock:1;
-#else
- uint64_t lock:1;
- uint64_t dirty:1;
- uint64_t valid:1;
- uint64_t use:1;
- uint64_t reserved_4_16:13;
- uint64_t tag:19;
- uint64_t reserved_36_39:4;
- uint64_t ecc:6;
- uint64_t reserved_46_63:18;
-#endif
- } s;
- struct cvmx_l2c_tadx_tag_s cn61xx;
- struct cvmx_l2c_tadx_tag_s cn63xx;
- struct cvmx_l2c_tadx_tag_s cn63xxp1;
- struct cvmx_l2c_tadx_tag_s cn66xx;
- struct cvmx_l2c_tadx_tag_s cn68xx;
- struct cvmx_l2c_tadx_tag_s cn68xxp1;
- struct cvmx_l2c_tadx_tag_s cnf71xx;
-};
-
-union cvmx_l2c_ver_id {
- uint64_t u64;
- struct cvmx_l2c_ver_id_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t mask:64;
-#else
- uint64_t mask:64;
-#endif
- } s;
- struct cvmx_l2c_ver_id_s cn61xx;
- struct cvmx_l2c_ver_id_s cn63xx;
- struct cvmx_l2c_ver_id_s cn63xxp1;
- struct cvmx_l2c_ver_id_s cn66xx;
- struct cvmx_l2c_ver_id_s cn68xx;
- struct cvmx_l2c_ver_id_s cn68xxp1;
- struct cvmx_l2c_ver_id_s cnf71xx;
-};
-
-union cvmx_l2c_ver_iob {
- uint64_t u64;
- struct cvmx_l2c_ver_iob_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t mask:2;
-#else
- uint64_t mask:2;
- uint64_t reserved_2_63:62;
-#endif
- } s;
- struct cvmx_l2c_ver_iob_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t mask:1;
-#else
- uint64_t mask:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn61xx;
- struct cvmx_l2c_ver_iob_cn61xx cn63xx;
- struct cvmx_l2c_ver_iob_cn61xx cn63xxp1;
- struct cvmx_l2c_ver_iob_cn61xx cn66xx;
- struct cvmx_l2c_ver_iob_s cn68xx;
- struct cvmx_l2c_ver_iob_s cn68xxp1;
- struct cvmx_l2c_ver_iob_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_ver_msc {
- uint64_t u64;
- struct cvmx_l2c_ver_msc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t invl2:1;
- uint64_t dwb:1;
-#else
- uint64_t dwb:1;
- uint64_t invl2:1;
- uint64_t reserved_2_63:62;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_46_63:18,
+ __BITFIELD_FIELD(uint64_t ecc:6,
+ __BITFIELD_FIELD(uint64_t reserved_36_39:4,
+ __BITFIELD_FIELD(uint64_t tag:19,
+ __BITFIELD_FIELD(uint64_t reserved_4_16:13,
+ __BITFIELD_FIELD(uint64_t use:1,
+ __BITFIELD_FIELD(uint64_t valid:1,
+ __BITFIELD_FIELD(uint64_t dirty:1,
+ __BITFIELD_FIELD(uint64_t lock:1,
+ ;)))))))))
} s;
- struct cvmx_l2c_ver_msc_s cn61xx;
- struct cvmx_l2c_ver_msc_s cn63xx;
- struct cvmx_l2c_ver_msc_s cn66xx;
- struct cvmx_l2c_ver_msc_s cn68xx;
- struct cvmx_l2c_ver_msc_s cn68xxp1;
- struct cvmx_l2c_ver_msc_s cnf71xx;
};
-union cvmx_l2c_ver_pp {
- uint64_t u64;
- struct cvmx_l2c_ver_pp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t mask:32;
-#else
- uint64_t mask:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_ver_pp_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mask:4;
-#else
- uint64_t mask:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn61xx;
- struct cvmx_l2c_ver_pp_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t mask:6;
-#else
- uint64_t mask:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_l2c_ver_pp_cn63xx cn63xxp1;
- struct cvmx_l2c_ver_pp_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t mask:10;
-#else
- uint64_t mask:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_l2c_ver_pp_s cn68xx;
- struct cvmx_l2c_ver_pp_s cn68xxp1;
- struct cvmx_l2c_ver_pp_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_virtid_iobx {
- uint64_t u64;
- struct cvmx_l2c_virtid_iobx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t dwbid:6;
- uint64_t reserved_6_7:2;
- uint64_t id:6;
-#else
- uint64_t id:6;
- uint64_t reserved_6_7:2;
- uint64_t dwbid:6;
- uint64_t reserved_14_63:50;
-#endif
- } s;
- struct cvmx_l2c_virtid_iobx_s cn61xx;
- struct cvmx_l2c_virtid_iobx_s cn63xx;
- struct cvmx_l2c_virtid_iobx_s cn63xxp1;
- struct cvmx_l2c_virtid_iobx_s cn66xx;
- struct cvmx_l2c_virtid_iobx_s cn68xx;
- struct cvmx_l2c_virtid_iobx_s cn68xxp1;
- struct cvmx_l2c_virtid_iobx_s cnf71xx;
-};
-
-union cvmx_l2c_virtid_ppx {
- uint64_t u64;
- struct cvmx_l2c_virtid_ppx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t id:6;
-#else
- uint64_t id:6;
- uint64_t reserved_6_63:58;
-#endif
- } s;
- struct cvmx_l2c_virtid_ppx_s cn61xx;
- struct cvmx_l2c_virtid_ppx_s cn63xx;
- struct cvmx_l2c_virtid_ppx_s cn63xxp1;
- struct cvmx_l2c_virtid_ppx_s cn66xx;
- struct cvmx_l2c_virtid_ppx_s cn68xx;
- struct cvmx_l2c_virtid_ppx_s cn68xxp1;
- struct cvmx_l2c_virtid_ppx_s cnf71xx;
-};
-
-union cvmx_l2c_vrt_ctl {
- uint64_t u64;
- struct cvmx_l2c_vrt_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t ooberr:1;
- uint64_t reserved_7_7:1;
- uint64_t memsz:3;
- uint64_t numid:3;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t numid:3;
- uint64_t memsz:3;
- uint64_t reserved_7_7:1;
- uint64_t ooberr:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_l2c_vrt_ctl_s cn61xx;
- struct cvmx_l2c_vrt_ctl_s cn63xx;
- struct cvmx_l2c_vrt_ctl_s cn63xxp1;
- struct cvmx_l2c_vrt_ctl_s cn66xx;
- struct cvmx_l2c_vrt_ctl_s cn68xx;
- struct cvmx_l2c_vrt_ctl_s cn68xxp1;
- struct cvmx_l2c_vrt_ctl_s cnf71xx;
-};
-
-union cvmx_l2c_vrt_memx {
- uint64_t u64;
- struct cvmx_l2c_vrt_memx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_36_63:28;
- uint64_t parity:4;
- uint64_t data:32;
-#else
- uint64_t data:32;
- uint64_t parity:4;
- uint64_t reserved_36_63:28;
-#endif
- } s;
- struct cvmx_l2c_vrt_memx_s cn61xx;
- struct cvmx_l2c_vrt_memx_s cn63xx;
- struct cvmx_l2c_vrt_memx_s cn63xxp1;
- struct cvmx_l2c_vrt_memx_s cn66xx;
- struct cvmx_l2c_vrt_memx_s cn68xx;
- struct cvmx_l2c_vrt_memx_s cn68xxp1;
- struct cvmx_l2c_vrt_memx_s cnf71xx;
-};
-
-union cvmx_l2c_wpar_iobx {
- uint64_t u64;
- struct cvmx_l2c_wpar_iobx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t mask:16;
-#else
- uint64_t mask:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_l2c_wpar_iobx_s cn61xx;
- struct cvmx_l2c_wpar_iobx_s cn63xx;
- struct cvmx_l2c_wpar_iobx_s cn63xxp1;
- struct cvmx_l2c_wpar_iobx_s cn66xx;
- struct cvmx_l2c_wpar_iobx_s cn68xx;
- struct cvmx_l2c_wpar_iobx_s cn68xxp1;
- struct cvmx_l2c_wpar_iobx_s cnf71xx;
-};
-
-union cvmx_l2c_wpar_ppx {
- uint64_t u64;
- struct cvmx_l2c_wpar_ppx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t mask:16;
-#else
- uint64_t mask:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_l2c_wpar_ppx_s cn61xx;
- struct cvmx_l2c_wpar_ppx_s cn63xx;
- struct cvmx_l2c_wpar_ppx_s cn63xxp1;
- struct cvmx_l2c_wpar_ppx_s cn66xx;
- struct cvmx_l2c_wpar_ppx_s cn68xx;
- struct cvmx_l2c_wpar_ppx_s cn68xxp1;
- struct cvmx_l2c_wpar_ppx_s cnf71xx;
-};
-
-union cvmx_l2c_xmcx_pfc {
- uint64_t u64;
- struct cvmx_l2c_xmcx_pfc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_xmcx_pfc_s cn61xx;
- struct cvmx_l2c_xmcx_pfc_s cn63xx;
- struct cvmx_l2c_xmcx_pfc_s cn63xxp1;
- struct cvmx_l2c_xmcx_pfc_s cn66xx;
- struct cvmx_l2c_xmcx_pfc_s cn68xx;
- struct cvmx_l2c_xmcx_pfc_s cn68xxp1;
- struct cvmx_l2c_xmcx_pfc_s cnf71xx;
-};
-
-union cvmx_l2c_xmc_cmd {
+union cvmx_l2c_lckbase {
uint64_t u64;
- struct cvmx_l2c_xmc_cmd_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t inuse:1;
- uint64_t cmd:6;
- uint64_t reserved_38_56:19;
- uint64_t addr:38;
-#else
- uint64_t addr:38;
- uint64_t reserved_38_56:19;
- uint64_t cmd:6;
- uint64_t inuse:1;
-#endif
+ struct cvmx_l2c_lckbase_s {
+ __BITFIELD_FIELD(uint64_t reserved_31_63:33,
+ __BITFIELD_FIELD(uint64_t lck_base:27,
+ __BITFIELD_FIELD(uint64_t reserved_1_3:3,
+ __BITFIELD_FIELD(uint64_t lck_ena:1,
+ ;))))
} s;
- struct cvmx_l2c_xmc_cmd_s cn61xx;
- struct cvmx_l2c_xmc_cmd_s cn63xx;
- struct cvmx_l2c_xmc_cmd_s cn63xxp1;
- struct cvmx_l2c_xmc_cmd_s cn66xx;
- struct cvmx_l2c_xmc_cmd_s cn68xx;
- struct cvmx_l2c_xmc_cmd_s cn68xxp1;
- struct cvmx_l2c_xmc_cmd_s cnf71xx;
};
-union cvmx_l2c_xmdx_pfc {
+union cvmx_l2c_lckoff {
uint64_t u64;
- struct cvmx_l2c_xmdx_pfc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
+ struct cvmx_l2c_lckoff_s {
+ __BITFIELD_FIELD(uint64_t reserved_10_63:54,
+ __BITFIELD_FIELD(uint64_t lck_offset:10,
+ ;))
} s;
- struct cvmx_l2c_xmdx_pfc_s cn61xx;
- struct cvmx_l2c_xmdx_pfc_s cn63xx;
- struct cvmx_l2c_xmdx_pfc_s cn63xxp1;
- struct cvmx_l2c_xmdx_pfc_s cn66xx;
- struct cvmx_l2c_xmdx_pfc_s cn68xx;
- struct cvmx_l2c_xmdx_pfc_s cn68xxp1;
- struct cvmx_l2c_xmdx_pfc_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h
index ddb429210a0e..02c4479a90c8 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2010 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -33,48 +33,39 @@
#ifndef __CVMX_L2C_H__
#define __CVMX_L2C_H__
-#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */
-#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro, use function */
-#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */
+#include <uapi/asm/bitfield.h>
+#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro */
+#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro */
+#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro */
-#define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */
+/* Based on 128 byte cache line size */
+#define CVMX_L2C_IDX_ADDR_SHIFT 7
#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
/* Defines for index aliasing computations */
-#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
+#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + \
+ cvmx_l2c_get_set_bits())
#define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
-#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096
+#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096
-/* Defines for Virtualizations, valid only from Octeon II onwards. */
-#define CVMX_L2C_VRT_MAX_VIRTID_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 64 : 0)
-#define CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 32 : 0)
+/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
+#define CVMX_L2C_TADS 1
union cvmx_l2c_tag {
uint64_t u64;
struct {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved:28;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t addr:32; /* Phys mem (not all bits valid) */
-#else
- uint64_t addr:32; /* Phys mem (not all bits valid) */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t L:1; /* Line locked */
- uint64_t D:1; /* Line dirty */
- uint64_t V:1; /* Line valid */
- uint64_t reserved:28;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved:28,
+ __BITFIELD_FIELD(uint64_t V:1,
+ __BITFIELD_FIELD(uint64_t D:1,
+ __BITFIELD_FIELD(uint64_t L:1,
+ __BITFIELD_FIELD(uint64_t U:1,
+ __BITFIELD_FIELD(uint64_t addr:32,
+ ;))))))
} s;
};
-/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
-#define CVMX_L2C_TADS 1
-
- /* L2C Performance Counter events. */
+/* L2C Performance Counter events. */
enum cvmx_l2c_event {
CVMX_L2C_EVENT_CYCLES = 0,
CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
@@ -175,7 +166,8 @@ enum cvmx_l2c_tad_event {
*
* @note The routine does not clear the counter.
*/
-void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, uint32_t clear_on_read);
+void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
+ uint32_t clear_on_read);
/**
* Read the given L2 Cache performance counter. The counter must be configured
@@ -307,8 +299,11 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index);
/* Wrapper providing a deprecated old function name */
-static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index) __attribute__((deprecated));
-static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index)
+static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
+ uint32_t index)
+ __attribute__((deprecated));
+static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
+ uint32_t index)
{
return cvmx_l2c_get_tag(association, index);
}
diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
deleted file mode 100644
index 11a456215638..000000000000
--- a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
+++ /dev/null
@@ -1,526 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2012 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_L2D_DEFS_H__
-#define __CVMX_L2D_DEFS_H__
-
-#define CVMX_L2D_BST0 (CVMX_ADD_IO_SEG(0x0001180080000780ull))
-#define CVMX_L2D_BST1 (CVMX_ADD_IO_SEG(0x0001180080000788ull))
-#define CVMX_L2D_BST2 (CVMX_ADD_IO_SEG(0x0001180080000790ull))
-#define CVMX_L2D_BST3 (CVMX_ADD_IO_SEG(0x0001180080000798ull))
-#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
-#define CVMX_L2D_FADR (CVMX_ADD_IO_SEG(0x0001180080000018ull))
-#define CVMX_L2D_FSYN0 (CVMX_ADD_IO_SEG(0x0001180080000020ull))
-#define CVMX_L2D_FSYN1 (CVMX_ADD_IO_SEG(0x0001180080000028ull))
-#define CVMX_L2D_FUS0 (CVMX_ADD_IO_SEG(0x00011800800007A0ull))
-#define CVMX_L2D_FUS1 (CVMX_ADD_IO_SEG(0x00011800800007A8ull))
-#define CVMX_L2D_FUS2 (CVMX_ADD_IO_SEG(0x00011800800007B0ull))
-#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
-
-union cvmx_l2d_bst0 {
- uint64_t u64;
- struct cvmx_l2d_bst0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_35_63:29;
- uint64_t ftl:1;
- uint64_t q0stat:34;
-#else
- uint64_t q0stat:34;
- uint64_t ftl:1;
- uint64_t reserved_35_63:29;
-#endif
- } s;
- struct cvmx_l2d_bst0_s cn30xx;
- struct cvmx_l2d_bst0_s cn31xx;
- struct cvmx_l2d_bst0_s cn38xx;
- struct cvmx_l2d_bst0_s cn38xxp2;
- struct cvmx_l2d_bst0_s cn50xx;
- struct cvmx_l2d_bst0_s cn52xx;
- struct cvmx_l2d_bst0_s cn52xxp1;
- struct cvmx_l2d_bst0_s cn56xx;
- struct cvmx_l2d_bst0_s cn56xxp1;
- struct cvmx_l2d_bst0_s cn58xx;
- struct cvmx_l2d_bst0_s cn58xxp1;
-};
-
-union cvmx_l2d_bst1 {
- uint64_t u64;
- struct cvmx_l2d_bst1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t q1stat:34;
-#else
- uint64_t q1stat:34;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_l2d_bst1_s cn30xx;
- struct cvmx_l2d_bst1_s cn31xx;
- struct cvmx_l2d_bst1_s cn38xx;
- struct cvmx_l2d_bst1_s cn38xxp2;
- struct cvmx_l2d_bst1_s cn50xx;
- struct cvmx_l2d_bst1_s cn52xx;
- struct cvmx_l2d_bst1_s cn52xxp1;
- struct cvmx_l2d_bst1_s cn56xx;
- struct cvmx_l2d_bst1_s cn56xxp1;
- struct cvmx_l2d_bst1_s cn58xx;
- struct cvmx_l2d_bst1_s cn58xxp1;
-};
-
-union cvmx_l2d_bst2 {
- uint64_t u64;
- struct cvmx_l2d_bst2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t q2stat:34;
-#else
- uint64_t q2stat:34;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_l2d_bst2_s cn30xx;
- struct cvmx_l2d_bst2_s cn31xx;
- struct cvmx_l2d_bst2_s cn38xx;
- struct cvmx_l2d_bst2_s cn38xxp2;
- struct cvmx_l2d_bst2_s cn50xx;
- struct cvmx_l2d_bst2_s cn52xx;
- struct cvmx_l2d_bst2_s cn52xxp1;
- struct cvmx_l2d_bst2_s cn56xx;
- struct cvmx_l2d_bst2_s cn56xxp1;
- struct cvmx_l2d_bst2_s cn58xx;
- struct cvmx_l2d_bst2_s cn58xxp1;
-};
-
-union cvmx_l2d_bst3 {
- uint64_t u64;
- struct cvmx_l2d_bst3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t q3stat:34;
-#else
- uint64_t q3stat:34;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_l2d_bst3_s cn30xx;
- struct cvmx_l2d_bst3_s cn31xx;
- struct cvmx_l2d_bst3_s cn38xx;
- struct cvmx_l2d_bst3_s cn38xxp2;
- struct cvmx_l2d_bst3_s cn50xx;
- struct cvmx_l2d_bst3_s cn52xx;
- struct cvmx_l2d_bst3_s cn52xxp1;
- struct cvmx_l2d_bst3_s cn56xx;
- struct cvmx_l2d_bst3_s cn56xxp1;
- struct cvmx_l2d_bst3_s cn58xx;
- struct cvmx_l2d_bst3_s cn58xxp1;
-};
-
-union cvmx_l2d_err {
- uint64_t u64;
- struct cvmx_l2d_err_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t bmhclsel:1;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t bmhclsel:1;
- uint64_t reserved_6_63:58;
-#endif
- } s;
- struct cvmx_l2d_err_s cn30xx;
- struct cvmx_l2d_err_s cn31xx;
- struct cvmx_l2d_err_s cn38xx;
- struct cvmx_l2d_err_s cn38xxp2;
- struct cvmx_l2d_err_s cn50xx;
- struct cvmx_l2d_err_s cn52xx;
- struct cvmx_l2d_err_s cn52xxp1;
- struct cvmx_l2d_err_s cn56xx;
- struct cvmx_l2d_err_s cn56xxp1;
- struct cvmx_l2d_err_s cn58xx;
- struct cvmx_l2d_err_s cn58xxp1;
-};
-
-union cvmx_l2d_fadr {
- uint64_t u64;
- struct cvmx_l2d_fadr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t fadru:1;
- uint64_t fowmsk:4;
- uint64_t fset:3;
- uint64_t fadr:11;
-#else
- uint64_t fadr:11;
- uint64_t fset:3;
- uint64_t fowmsk:4;
- uint64_t fadru:1;
- uint64_t reserved_19_63:45;
-#endif
- } s;
- struct cvmx_l2d_fadr_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t fowmsk:4;
- uint64_t reserved_13_13:1;
- uint64_t fset:2;
- uint64_t reserved_9_10:2;
- uint64_t fadr:9;
-#else
- uint64_t fadr:9;
- uint64_t reserved_9_10:2;
- uint64_t fset:2;
- uint64_t reserved_13_13:1;
- uint64_t fowmsk:4;
- uint64_t reserved_18_63:46;
-#endif
- } cn30xx;
- struct cvmx_l2d_fadr_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t fowmsk:4;
- uint64_t reserved_13_13:1;
- uint64_t fset:2;
- uint64_t reserved_10_10:1;
- uint64_t fadr:10;
-#else
- uint64_t fadr:10;
- uint64_t reserved_10_10:1;
- uint64_t fset:2;
- uint64_t reserved_13_13:1;
- uint64_t fowmsk:4;
- uint64_t reserved_18_63:46;
-#endif
- } cn31xx;
- struct cvmx_l2d_fadr_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t fowmsk:4;
- uint64_t fset:3;
- uint64_t fadr:11;
-#else
- uint64_t fadr:11;
- uint64_t fset:3;
- uint64_t fowmsk:4;
- uint64_t reserved_18_63:46;
-#endif
- } cn38xx;
- struct cvmx_l2d_fadr_cn38xx cn38xxp2;
- struct cvmx_l2d_fadr_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t fowmsk:4;
- uint64_t fset:3;
- uint64_t reserved_8_10:3;
- uint64_t fadr:8;
-#else
- uint64_t fadr:8;
- uint64_t reserved_8_10:3;
- uint64_t fset:3;
- uint64_t fowmsk:4;
- uint64_t reserved_18_63:46;
-#endif
- } cn50xx;
- struct cvmx_l2d_fadr_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t fowmsk:4;
- uint64_t fset:3;
- uint64_t reserved_10_10:1;
- uint64_t fadr:10;
-#else
- uint64_t fadr:10;
- uint64_t reserved_10_10:1;
- uint64_t fset:3;
- uint64_t fowmsk:4;
- uint64_t reserved_18_63:46;
-#endif
- } cn52xx;
- struct cvmx_l2d_fadr_cn52xx cn52xxp1;
- struct cvmx_l2d_fadr_s cn56xx;
- struct cvmx_l2d_fadr_s cn56xxp1;
- struct cvmx_l2d_fadr_s cn58xx;
- struct cvmx_l2d_fadr_s cn58xxp1;
-};
-
-union cvmx_l2d_fsyn0 {
- uint64_t u64;
- struct cvmx_l2d_fsyn0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t fsyn_ow1:10;
- uint64_t fsyn_ow0:10;
-#else
- uint64_t fsyn_ow0:10;
- uint64_t fsyn_ow1:10;
- uint64_t reserved_20_63:44;
-#endif
- } s;
- struct cvmx_l2d_fsyn0_s cn30xx;
- struct cvmx_l2d_fsyn0_s cn31xx;
- struct cvmx_l2d_fsyn0_s cn38xx;
- struct cvmx_l2d_fsyn0_s cn38xxp2;
- struct cvmx_l2d_fsyn0_s cn50xx;
- struct cvmx_l2d_fsyn0_s cn52xx;
- struct cvmx_l2d_fsyn0_s cn52xxp1;
- struct cvmx_l2d_fsyn0_s cn56xx;
- struct cvmx_l2d_fsyn0_s cn56xxp1;
- struct cvmx_l2d_fsyn0_s cn58xx;
- struct cvmx_l2d_fsyn0_s cn58xxp1;
-};
-
-union cvmx_l2d_fsyn1 {
- uint64_t u64;
- struct cvmx_l2d_fsyn1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t fsyn_ow3:10;
- uint64_t fsyn_ow2:10;
-#else
- uint64_t fsyn_ow2:10;
- uint64_t fsyn_ow3:10;
- uint64_t reserved_20_63:44;
-#endif
- } s;
- struct cvmx_l2d_fsyn1_s cn30xx;
- struct cvmx_l2d_fsyn1_s cn31xx;
- struct cvmx_l2d_fsyn1_s cn38xx;
- struct cvmx_l2d_fsyn1_s cn38xxp2;
- struct cvmx_l2d_fsyn1_s cn50xx;
- struct cvmx_l2d_fsyn1_s cn52xx;
- struct cvmx_l2d_fsyn1_s cn52xxp1;
- struct cvmx_l2d_fsyn1_s cn56xx;
- struct cvmx_l2d_fsyn1_s cn56xxp1;
- struct cvmx_l2d_fsyn1_s cn58xx;
- struct cvmx_l2d_fsyn1_s cn58xxp1;
-};
-
-union cvmx_l2d_fus0 {
- uint64_t u64;
- struct cvmx_l2d_fus0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t q0fus:34;
-#else
- uint64_t q0fus:34;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_l2d_fus0_s cn30xx;
- struct cvmx_l2d_fus0_s cn31xx;
- struct cvmx_l2d_fus0_s cn38xx;
- struct cvmx_l2d_fus0_s cn38xxp2;
- struct cvmx_l2d_fus0_s cn50xx;
- struct cvmx_l2d_fus0_s cn52xx;
- struct cvmx_l2d_fus0_s cn52xxp1;
- struct cvmx_l2d_fus0_s cn56xx;
- struct cvmx_l2d_fus0_s cn56xxp1;
- struct cvmx_l2d_fus0_s cn58xx;
- struct cvmx_l2d_fus0_s cn58xxp1;
-};
-
-union cvmx_l2d_fus1 {
- uint64_t u64;
- struct cvmx_l2d_fus1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t q1fus:34;
-#else
- uint64_t q1fus:34;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_l2d_fus1_s cn30xx;
- struct cvmx_l2d_fus1_s cn31xx;
- struct cvmx_l2d_fus1_s cn38xx;
- struct cvmx_l2d_fus1_s cn38xxp2;
- struct cvmx_l2d_fus1_s cn50xx;
- struct cvmx_l2d_fus1_s cn52xx;
- struct cvmx_l2d_fus1_s cn52xxp1;
- struct cvmx_l2d_fus1_s cn56xx;
- struct cvmx_l2d_fus1_s cn56xxp1;
- struct cvmx_l2d_fus1_s cn58xx;
- struct cvmx_l2d_fus1_s cn58xxp1;
-};
-
-union cvmx_l2d_fus2 {
- uint64_t u64;
- struct cvmx_l2d_fus2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t q2fus:34;
-#else
- uint64_t q2fus:34;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_l2d_fus2_s cn30xx;
- struct cvmx_l2d_fus2_s cn31xx;
- struct cvmx_l2d_fus2_s cn38xx;
- struct cvmx_l2d_fus2_s cn38xxp2;
- struct cvmx_l2d_fus2_s cn50xx;
- struct cvmx_l2d_fus2_s cn52xx;
- struct cvmx_l2d_fus2_s cn52xxp1;
- struct cvmx_l2d_fus2_s cn56xx;
- struct cvmx_l2d_fus2_s cn56xxp1;
- struct cvmx_l2d_fus2_s cn58xx;
- struct cvmx_l2d_fus2_s cn58xxp1;
-};
-
-union cvmx_l2d_fus3 {
- uint64_t u64;
- struct cvmx_l2d_fus3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_40_63:24;
- uint64_t ema_ctl:3;
- uint64_t reserved_34_36:3;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t reserved_34_36:3;
- uint64_t ema_ctl:3;
- uint64_t reserved_40_63:24;
-#endif
- } s;
- struct cvmx_l2d_fus3_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_35_63:29;
- uint64_t crip_64k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_64k:1;
- uint64_t reserved_35_63:29;
-#endif
- } cn30xx;
- struct cvmx_l2d_fus3_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_35_63:29;
- uint64_t crip_128k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_128k:1;
- uint64_t reserved_35_63:29;
-#endif
- } cn31xx;
- struct cvmx_l2d_fus3_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_36_63:28;
- uint64_t crip_256k:1;
- uint64_t crip_512k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_512k:1;
- uint64_t crip_256k:1;
- uint64_t reserved_36_63:28;
-#endif
- } cn38xx;
- struct cvmx_l2d_fus3_cn38xx cn38xxp2;
- struct cvmx_l2d_fus3_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_40_63:24;
- uint64_t ema_ctl:3;
- uint64_t reserved_36_36:1;
- uint64_t crip_32k:1;
- uint64_t crip_64k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_64k:1;
- uint64_t crip_32k:1;
- uint64_t reserved_36_36:1;
- uint64_t ema_ctl:3;
- uint64_t reserved_40_63:24;
-#endif
- } cn50xx;
- struct cvmx_l2d_fus3_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_40_63:24;
- uint64_t ema_ctl:3;
- uint64_t reserved_36_36:1;
- uint64_t crip_128k:1;
- uint64_t crip_256k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_256k:1;
- uint64_t crip_128k:1;
- uint64_t reserved_36_36:1;
- uint64_t ema_ctl:3;
- uint64_t reserved_40_63:24;
-#endif
- } cn52xx;
- struct cvmx_l2d_fus3_cn52xx cn52xxp1;
- struct cvmx_l2d_fus3_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_40_63:24;
- uint64_t ema_ctl:3;
- uint64_t reserved_36_36:1;
- uint64_t crip_512k:1;
- uint64_t crip_1024k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_1024k:1;
- uint64_t crip_512k:1;
- uint64_t reserved_36_36:1;
- uint64_t ema_ctl:3;
- uint64_t reserved_40_63:24;
-#endif
- } cn56xx;
- struct cvmx_l2d_fus3_cn56xx cn56xxp1;
- struct cvmx_l2d_fus3_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_39_63:25;
- uint64_t ema_ctl:2;
- uint64_t reserved_36_36:1;
- uint64_t crip_512k:1;
- uint64_t crip_1024k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_1024k:1;
- uint64_t crip_512k:1;
- uint64_t reserved_36_36:1;
- uint64_t ema_ctl:2;
- uint64_t reserved_39_63:25;
-#endif
- } cn58xx;
- struct cvmx_l2d_fus3_cn58xx cn58xxp1;
-};
-
-#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
index 83ce22c080e6..fe50671fd1bb 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,210 +28,116 @@
#ifndef __CVMX_L2T_DEFS_H__
#define __CVMX_L2T_DEFS_H__
-#define CVMX_L2T_ERR (CVMX_ADD_IO_SEG(0x0001180080000008ull))
+#include <uapi/asm/bitfield.h>
+
+#define CVMX_L2T_ERR (CVMX_ADD_IO_SEG(0x0001180080000008ull))
+
union cvmx_l2t_err {
uint64_t u64;
struct cvmx_l2t_err_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t fadru:1;
- uint64_t lck_intena2:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena:1;
- uint64_t lckerr:1;
- uint64_t fset:3;
- uint64_t fadr:10;
- uint64_t fsyn:6;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t fsyn:6;
- uint64_t fadr:10;
- uint64_t fset:3;
- uint64_t lckerr:1;
- uint64_t lck_intena:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena2:1;
- uint64_t fadru:1;
- uint64_t reserved_29_63:35;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_29_63:35,
+ __BITFIELD_FIELD(uint64_t fadru:1,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t fset:3,
+ __BITFIELD_FIELD(uint64_t fadr:10,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;))))))))))))))
} s;
struct cvmx_l2t_err_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_28_63:36;
- uint64_t lck_intena2:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena:1;
- uint64_t lckerr:1;
- uint64_t reserved_23_23:1;
- uint64_t fset:2;
- uint64_t reserved_19_20:2;
- uint64_t fadr:8;
- uint64_t fsyn:6;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t fsyn:6;
- uint64_t fadr:8;
- uint64_t reserved_19_20:2;
- uint64_t fset:2;
- uint64_t reserved_23_23:1;
- uint64_t lckerr:1;
- uint64_t lck_intena:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena2:1;
- uint64_t reserved_28_63:36;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t reserved_23_23:1,
+ __BITFIELD_FIELD(uint64_t fset:2,
+ __BITFIELD_FIELD(uint64_t reserved_19_20:2,
+ __BITFIELD_FIELD(uint64_t fadr:8,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;)))))))))))))))
} cn30xx;
struct cvmx_l2t_err_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_28_63:36;
- uint64_t lck_intena2:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena:1;
- uint64_t lckerr:1;
- uint64_t reserved_23_23:1;
- uint64_t fset:2;
- uint64_t reserved_20_20:1;
- uint64_t fadr:9;
- uint64_t fsyn:6;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t fsyn:6;
- uint64_t fadr:9;
- uint64_t reserved_20_20:1;
- uint64_t fset:2;
- uint64_t reserved_23_23:1;
- uint64_t lckerr:1;
- uint64_t lck_intena:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena2:1;
- uint64_t reserved_28_63:36;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t reserved_23_23:1,
+ __BITFIELD_FIELD(uint64_t fset:2,
+ __BITFIELD_FIELD(uint64_t reserved_20_20:1,
+ __BITFIELD_FIELD(uint64_t fadr:9,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;)))))))))))))))
} cn31xx;
struct cvmx_l2t_err_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_28_63:36;
- uint64_t lck_intena2:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena:1;
- uint64_t lckerr:1;
- uint64_t fset:3;
- uint64_t fadr:10;
- uint64_t fsyn:6;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t fsyn:6;
- uint64_t fadr:10;
- uint64_t fset:3;
- uint64_t lckerr:1;
- uint64_t lck_intena:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena2:1;
- uint64_t reserved_28_63:36;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t fset:3,
+ __BITFIELD_FIELD(uint64_t fadr:10,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;)))))))))))))
} cn38xx;
struct cvmx_l2t_err_cn38xx cn38xxp2;
struct cvmx_l2t_err_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_28_63:36;
- uint64_t lck_intena2:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena:1;
- uint64_t lckerr:1;
- uint64_t fset:3;
- uint64_t reserved_18_20:3;
- uint64_t fadr:7;
- uint64_t fsyn:6;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t fsyn:6;
- uint64_t fadr:7;
- uint64_t reserved_18_20:3;
- uint64_t fset:3;
- uint64_t lckerr:1;
- uint64_t lck_intena:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena2:1;
- uint64_t reserved_28_63:36;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t fset:3,
+ __BITFIELD_FIELD(uint64_t reserved_18_20:3,
+ __BITFIELD_FIELD(uint64_t fadr:7,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;))))))))))))))
} cn50xx;
struct cvmx_l2t_err_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_28_63:36;
- uint64_t lck_intena2:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena:1;
- uint64_t lckerr:1;
- uint64_t fset:3;
- uint64_t reserved_20_20:1;
- uint64_t fadr:9;
- uint64_t fsyn:6;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t fsyn:6;
- uint64_t fadr:9;
- uint64_t reserved_20_20:1;
- uint64_t fset:3;
- uint64_t lckerr:1;
- uint64_t lck_intena:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena2:1;
- uint64_t reserved_28_63:36;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t fset:3,
+ __BITFIELD_FIELD(uint64_t reserved_20_20:1,
+ __BITFIELD_FIELD(uint64_t fadr:9,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;))))))))))))))
} cn52xx;
struct cvmx_l2t_err_cn52xx cn52xxp1;
struct cvmx_l2t_err_s cn56xx;
diff --git a/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
index 4bce393391e2..e2dce1acf029 100644
--- a/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,3148 +28,341 @@
#ifndef __CVMX_PCIERCX_DEFS_H__
#define __CVMX_PCIERCX_DEFS_H__
-#define CVMX_PCIERCX_CFG000(block_id) (0x0000000000000000ull)
+#include <uapi/asm/bitfield.h>
+
#define CVMX_PCIERCX_CFG001(block_id) (0x0000000000000004ull)
-#define CVMX_PCIERCX_CFG002(block_id) (0x0000000000000008ull)
-#define CVMX_PCIERCX_CFG003(block_id) (0x000000000000000Cull)
-#define CVMX_PCIERCX_CFG004(block_id) (0x0000000000000010ull)
-#define CVMX_PCIERCX_CFG005(block_id) (0x0000000000000014ull)
#define CVMX_PCIERCX_CFG006(block_id) (0x0000000000000018ull)
-#define CVMX_PCIERCX_CFG007(block_id) (0x000000000000001Cull)
#define CVMX_PCIERCX_CFG008(block_id) (0x0000000000000020ull)
#define CVMX_PCIERCX_CFG009(block_id) (0x0000000000000024ull)
#define CVMX_PCIERCX_CFG010(block_id) (0x0000000000000028ull)
#define CVMX_PCIERCX_CFG011(block_id) (0x000000000000002Cull)
-#define CVMX_PCIERCX_CFG012(block_id) (0x0000000000000030ull)
-#define CVMX_PCIERCX_CFG013(block_id) (0x0000000000000034ull)
-#define CVMX_PCIERCX_CFG014(block_id) (0x0000000000000038ull)
-#define CVMX_PCIERCX_CFG015(block_id) (0x000000000000003Cull)
-#define CVMX_PCIERCX_CFG016(block_id) (0x0000000000000040ull)
-#define CVMX_PCIERCX_CFG017(block_id) (0x0000000000000044ull)
-#define CVMX_PCIERCX_CFG020(block_id) (0x0000000000000050ull)
-#define CVMX_PCIERCX_CFG021(block_id) (0x0000000000000054ull)
-#define CVMX_PCIERCX_CFG022(block_id) (0x0000000000000058ull)
-#define CVMX_PCIERCX_CFG023(block_id) (0x000000000000005Cull)
-#define CVMX_PCIERCX_CFG028(block_id) (0x0000000000000070ull)
-#define CVMX_PCIERCX_CFG029(block_id) (0x0000000000000074ull)
#define CVMX_PCIERCX_CFG030(block_id) (0x0000000000000078ull)
#define CVMX_PCIERCX_CFG031(block_id) (0x000000000000007Cull)
#define CVMX_PCIERCX_CFG032(block_id) (0x0000000000000080ull)
-#define CVMX_PCIERCX_CFG033(block_id) (0x0000000000000084ull)
#define CVMX_PCIERCX_CFG034(block_id) (0x0000000000000088ull)
#define CVMX_PCIERCX_CFG035(block_id) (0x000000000000008Cull)
-#define CVMX_PCIERCX_CFG036(block_id) (0x0000000000000090ull)
-#define CVMX_PCIERCX_CFG037(block_id) (0x0000000000000094ull)
-#define CVMX_PCIERCX_CFG038(block_id) (0x0000000000000098ull)
-#define CVMX_PCIERCX_CFG039(block_id) (0x000000000000009Cull)
#define CVMX_PCIERCX_CFG040(block_id) (0x00000000000000A0ull)
-#define CVMX_PCIERCX_CFG041(block_id) (0x00000000000000A4ull)
-#define CVMX_PCIERCX_CFG042(block_id) (0x00000000000000A8ull)
-#define CVMX_PCIERCX_CFG064(block_id) (0x0000000000000100ull)
-#define CVMX_PCIERCX_CFG065(block_id) (0x0000000000000104ull)
#define CVMX_PCIERCX_CFG066(block_id) (0x0000000000000108ull)
-#define CVMX_PCIERCX_CFG067(block_id) (0x000000000000010Cull)
-#define CVMX_PCIERCX_CFG068(block_id) (0x0000000000000110ull)
#define CVMX_PCIERCX_CFG069(block_id) (0x0000000000000114ull)
#define CVMX_PCIERCX_CFG070(block_id) (0x0000000000000118ull)
-#define CVMX_PCIERCX_CFG071(block_id) (0x000000000000011Cull)
-#define CVMX_PCIERCX_CFG072(block_id) (0x0000000000000120ull)
-#define CVMX_PCIERCX_CFG073(block_id) (0x0000000000000124ull)
-#define CVMX_PCIERCX_CFG074(block_id) (0x0000000000000128ull)
#define CVMX_PCIERCX_CFG075(block_id) (0x000000000000012Cull)
-#define CVMX_PCIERCX_CFG076(block_id) (0x0000000000000130ull)
-#define CVMX_PCIERCX_CFG077(block_id) (0x0000000000000134ull)
#define CVMX_PCIERCX_CFG448(block_id) (0x0000000000000700ull)
-#define CVMX_PCIERCX_CFG449(block_id) (0x0000000000000704ull)
-#define CVMX_PCIERCX_CFG450(block_id) (0x0000000000000708ull)
-#define CVMX_PCIERCX_CFG451(block_id) (0x000000000000070Cull)
#define CVMX_PCIERCX_CFG452(block_id) (0x0000000000000710ull)
-#define CVMX_PCIERCX_CFG453(block_id) (0x0000000000000714ull)
-#define CVMX_PCIERCX_CFG454(block_id) (0x0000000000000718ull)
#define CVMX_PCIERCX_CFG455(block_id) (0x000000000000071Cull)
-#define CVMX_PCIERCX_CFG456(block_id) (0x0000000000000720ull)
-#define CVMX_PCIERCX_CFG458(block_id) (0x0000000000000728ull)
-#define CVMX_PCIERCX_CFG459(block_id) (0x000000000000072Cull)
-#define CVMX_PCIERCX_CFG460(block_id) (0x0000000000000730ull)
-#define CVMX_PCIERCX_CFG461(block_id) (0x0000000000000734ull)
-#define CVMX_PCIERCX_CFG462(block_id) (0x0000000000000738ull)
-#define CVMX_PCIERCX_CFG463(block_id) (0x000000000000073Cull)
-#define CVMX_PCIERCX_CFG464(block_id) (0x0000000000000740ull)
-#define CVMX_PCIERCX_CFG465(block_id) (0x0000000000000744ull)
-#define CVMX_PCIERCX_CFG466(block_id) (0x0000000000000748ull)
-#define CVMX_PCIERCX_CFG467(block_id) (0x000000000000074Cull)
-#define CVMX_PCIERCX_CFG468(block_id) (0x0000000000000750ull)
-#define CVMX_PCIERCX_CFG490(block_id) (0x00000000000007A8ull)
-#define CVMX_PCIERCX_CFG491(block_id) (0x00000000000007ACull)
-#define CVMX_PCIERCX_CFG492(block_id) (0x00000000000007B0ull)
#define CVMX_PCIERCX_CFG515(block_id) (0x000000000000080Cull)
-#define CVMX_PCIERCX_CFG516(block_id) (0x0000000000000810ull)
-#define CVMX_PCIERCX_CFG517(block_id) (0x0000000000000814ull)
-
-union cvmx_pciercx_cfg000 {
- uint32_t u32;
- struct cvmx_pciercx_cfg000_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t devid:16;
- uint32_t vendid:16;
-#else
- uint32_t vendid:16;
- uint32_t devid:16;
-#endif
- } s;
- struct cvmx_pciercx_cfg000_s cn52xx;
- struct cvmx_pciercx_cfg000_s cn52xxp1;
- struct cvmx_pciercx_cfg000_s cn56xx;
- struct cvmx_pciercx_cfg000_s cn56xxp1;
- struct cvmx_pciercx_cfg000_s cn61xx;
- struct cvmx_pciercx_cfg000_s cn63xx;
- struct cvmx_pciercx_cfg000_s cn63xxp1;
- struct cvmx_pciercx_cfg000_s cn66xx;
- struct cvmx_pciercx_cfg000_s cn68xx;
- struct cvmx_pciercx_cfg000_s cn68xxp1;
- struct cvmx_pciercx_cfg000_s cnf71xx;
-};
union cvmx_pciercx_cfg001 {
uint32_t u32;
struct cvmx_pciercx_cfg001_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dpe:1;
- uint32_t sse:1;
- uint32_t rma:1;
- uint32_t rta:1;
- uint32_t sta:1;
- uint32_t devt:2;
- uint32_t mdpe:1;
- uint32_t fbb:1;
- uint32_t reserved_22_22:1;
- uint32_t m66:1;
- uint32_t cl:1;
- uint32_t i_stat:1;
- uint32_t reserved_11_18:8;
- uint32_t i_dis:1;
- uint32_t fbbe:1;
- uint32_t see:1;
- uint32_t ids_wcc:1;
- uint32_t per:1;
- uint32_t vps:1;
- uint32_t mwice:1;
- uint32_t scse:1;
- uint32_t me:1;
- uint32_t msae:1;
- uint32_t isae:1;
-#else
- uint32_t isae:1;
- uint32_t msae:1;
- uint32_t me:1;
- uint32_t scse:1;
- uint32_t mwice:1;
- uint32_t vps:1;
- uint32_t per:1;
- uint32_t ids_wcc:1;
- uint32_t see:1;
- uint32_t fbbe:1;
- uint32_t i_dis:1;
- uint32_t reserved_11_18:8;
- uint32_t i_stat:1;
- uint32_t cl:1;
- uint32_t m66:1;
- uint32_t reserved_22_22:1;
- uint32_t fbb:1;
- uint32_t mdpe:1;
- uint32_t devt:2;
- uint32_t sta:1;
- uint32_t rta:1;
- uint32_t rma:1;
- uint32_t sse:1;
- uint32_t dpe:1;
-#endif
- } s;
- struct cvmx_pciercx_cfg001_s cn52xx;
- struct cvmx_pciercx_cfg001_s cn52xxp1;
- struct cvmx_pciercx_cfg001_s cn56xx;
- struct cvmx_pciercx_cfg001_s cn56xxp1;
- struct cvmx_pciercx_cfg001_s cn61xx;
- struct cvmx_pciercx_cfg001_s cn63xx;
- struct cvmx_pciercx_cfg001_s cn63xxp1;
- struct cvmx_pciercx_cfg001_s cn66xx;
- struct cvmx_pciercx_cfg001_s cn68xx;
- struct cvmx_pciercx_cfg001_s cn68xxp1;
- struct cvmx_pciercx_cfg001_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg002 {
- uint32_t u32;
- struct cvmx_pciercx_cfg002_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t bcc:8;
- uint32_t sc:8;
- uint32_t pi:8;
- uint32_t rid:8;
-#else
- uint32_t rid:8;
- uint32_t pi:8;
- uint32_t sc:8;
- uint32_t bcc:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg002_s cn52xx;
- struct cvmx_pciercx_cfg002_s cn52xxp1;
- struct cvmx_pciercx_cfg002_s cn56xx;
- struct cvmx_pciercx_cfg002_s cn56xxp1;
- struct cvmx_pciercx_cfg002_s cn61xx;
- struct cvmx_pciercx_cfg002_s cn63xx;
- struct cvmx_pciercx_cfg002_s cn63xxp1;
- struct cvmx_pciercx_cfg002_s cn66xx;
- struct cvmx_pciercx_cfg002_s cn68xx;
- struct cvmx_pciercx_cfg002_s cn68xxp1;
- struct cvmx_pciercx_cfg002_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg003 {
- uint32_t u32;
- struct cvmx_pciercx_cfg003_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t bist:8;
- uint32_t mfd:1;
- uint32_t chf:7;
- uint32_t lt:8;
- uint32_t cls:8;
-#else
- uint32_t cls:8;
- uint32_t lt:8;
- uint32_t chf:7;
- uint32_t mfd:1;
- uint32_t bist:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg003_s cn52xx;
- struct cvmx_pciercx_cfg003_s cn52xxp1;
- struct cvmx_pciercx_cfg003_s cn56xx;
- struct cvmx_pciercx_cfg003_s cn56xxp1;
- struct cvmx_pciercx_cfg003_s cn61xx;
- struct cvmx_pciercx_cfg003_s cn63xx;
- struct cvmx_pciercx_cfg003_s cn63xxp1;
- struct cvmx_pciercx_cfg003_s cn66xx;
- struct cvmx_pciercx_cfg003_s cn68xx;
- struct cvmx_pciercx_cfg003_s cn68xxp1;
- struct cvmx_pciercx_cfg003_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg004 {
- uint32_t u32;
- struct cvmx_pciercx_cfg004_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
+ __BITFIELD_FIELD(uint32_t dpe:1,
+ __BITFIELD_FIELD(uint32_t sse:1,
+ __BITFIELD_FIELD(uint32_t rma:1,
+ __BITFIELD_FIELD(uint32_t rta:1,
+ __BITFIELD_FIELD(uint32_t sta:1,
+ __BITFIELD_FIELD(uint32_t devt:2,
+ __BITFIELD_FIELD(uint32_t mdpe:1,
+ __BITFIELD_FIELD(uint32_t fbb:1,
+ __BITFIELD_FIELD(uint32_t reserved_22_22:1,
+ __BITFIELD_FIELD(uint32_t m66:1,
+ __BITFIELD_FIELD(uint32_t cl:1,
+ __BITFIELD_FIELD(uint32_t i_stat:1,
+ __BITFIELD_FIELD(uint32_t reserved_11_18:8,
+ __BITFIELD_FIELD(uint32_t i_dis:1,
+ __BITFIELD_FIELD(uint32_t fbbe:1,
+ __BITFIELD_FIELD(uint32_t see:1,
+ __BITFIELD_FIELD(uint32_t ids_wcc:1,
+ __BITFIELD_FIELD(uint32_t per:1,
+ __BITFIELD_FIELD(uint32_t vps:1,
+ __BITFIELD_FIELD(uint32_t mwice:1,
+ __BITFIELD_FIELD(uint32_t scse:1,
+ __BITFIELD_FIELD(uint32_t me:1,
+ __BITFIELD_FIELD(uint32_t msae:1,
+ __BITFIELD_FIELD(uint32_t isae:1,
+ ;))))))))))))))))))))))))
} s;
- struct cvmx_pciercx_cfg004_s cn52xx;
- struct cvmx_pciercx_cfg004_s cn52xxp1;
- struct cvmx_pciercx_cfg004_s cn56xx;
- struct cvmx_pciercx_cfg004_s cn56xxp1;
- struct cvmx_pciercx_cfg004_s cn61xx;
- struct cvmx_pciercx_cfg004_s cn63xx;
- struct cvmx_pciercx_cfg004_s cn63xxp1;
- struct cvmx_pciercx_cfg004_s cn66xx;
- struct cvmx_pciercx_cfg004_s cn68xx;
- struct cvmx_pciercx_cfg004_s cn68xxp1;
- struct cvmx_pciercx_cfg004_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg005 {
- uint32_t u32;
- struct cvmx_pciercx_cfg005_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg005_s cn52xx;
- struct cvmx_pciercx_cfg005_s cn52xxp1;
- struct cvmx_pciercx_cfg005_s cn56xx;
- struct cvmx_pciercx_cfg005_s cn56xxp1;
- struct cvmx_pciercx_cfg005_s cn61xx;
- struct cvmx_pciercx_cfg005_s cn63xx;
- struct cvmx_pciercx_cfg005_s cn63xxp1;
- struct cvmx_pciercx_cfg005_s cn66xx;
- struct cvmx_pciercx_cfg005_s cn68xx;
- struct cvmx_pciercx_cfg005_s cn68xxp1;
- struct cvmx_pciercx_cfg005_s cnf71xx;
};
union cvmx_pciercx_cfg006 {
uint32_t u32;
struct cvmx_pciercx_cfg006_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t slt:8;
- uint32_t subbnum:8;
- uint32_t sbnum:8;
- uint32_t pbnum:8;
-#else
- uint32_t pbnum:8;
- uint32_t sbnum:8;
- uint32_t subbnum:8;
- uint32_t slt:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg006_s cn52xx;
- struct cvmx_pciercx_cfg006_s cn52xxp1;
- struct cvmx_pciercx_cfg006_s cn56xx;
- struct cvmx_pciercx_cfg006_s cn56xxp1;
- struct cvmx_pciercx_cfg006_s cn61xx;
- struct cvmx_pciercx_cfg006_s cn63xx;
- struct cvmx_pciercx_cfg006_s cn63xxp1;
- struct cvmx_pciercx_cfg006_s cn66xx;
- struct cvmx_pciercx_cfg006_s cn68xx;
- struct cvmx_pciercx_cfg006_s cn68xxp1;
- struct cvmx_pciercx_cfg006_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg007 {
- uint32_t u32;
- struct cvmx_pciercx_cfg007_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dpe:1;
- uint32_t sse:1;
- uint32_t rma:1;
- uint32_t rta:1;
- uint32_t sta:1;
- uint32_t devt:2;
- uint32_t mdpe:1;
- uint32_t fbb:1;
- uint32_t reserved_22_22:1;
- uint32_t m66:1;
- uint32_t reserved_16_20:5;
- uint32_t lio_limi:4;
- uint32_t reserved_9_11:3;
- uint32_t io32b:1;
- uint32_t lio_base:4;
- uint32_t reserved_1_3:3;
- uint32_t io32a:1;
-#else
- uint32_t io32a:1;
- uint32_t reserved_1_3:3;
- uint32_t lio_base:4;
- uint32_t io32b:1;
- uint32_t reserved_9_11:3;
- uint32_t lio_limi:4;
- uint32_t reserved_16_20:5;
- uint32_t m66:1;
- uint32_t reserved_22_22:1;
- uint32_t fbb:1;
- uint32_t mdpe:1;
- uint32_t devt:2;
- uint32_t sta:1;
- uint32_t rta:1;
- uint32_t rma:1;
- uint32_t sse:1;
- uint32_t dpe:1;
-#endif
+ __BITFIELD_FIELD(uint32_t slt:8,
+ __BITFIELD_FIELD(uint32_t subbnum:8,
+ __BITFIELD_FIELD(uint32_t sbnum:8,
+ __BITFIELD_FIELD(uint32_t pbnum:8,
+ ;))))
} s;
- struct cvmx_pciercx_cfg007_s cn52xx;
- struct cvmx_pciercx_cfg007_s cn52xxp1;
- struct cvmx_pciercx_cfg007_s cn56xx;
- struct cvmx_pciercx_cfg007_s cn56xxp1;
- struct cvmx_pciercx_cfg007_s cn61xx;
- struct cvmx_pciercx_cfg007_s cn63xx;
- struct cvmx_pciercx_cfg007_s cn63xxp1;
- struct cvmx_pciercx_cfg007_s cn66xx;
- struct cvmx_pciercx_cfg007_s cn68xx;
- struct cvmx_pciercx_cfg007_s cn68xxp1;
- struct cvmx_pciercx_cfg007_s cnf71xx;
};
union cvmx_pciercx_cfg008 {
uint32_t u32;
struct cvmx_pciercx_cfg008_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t ml_addr:12;
- uint32_t reserved_16_19:4;
- uint32_t mb_addr:12;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t mb_addr:12;
- uint32_t reserved_16_19:4;
- uint32_t ml_addr:12;
-#endif
+ __BITFIELD_FIELD(uint32_t ml_addr:12,
+ __BITFIELD_FIELD(uint32_t reserved_16_19:4,
+ __BITFIELD_FIELD(uint32_t mb_addr:12,
+ __BITFIELD_FIELD(uint32_t reserved_0_3:4,
+ ;))))
} s;
- struct cvmx_pciercx_cfg008_s cn52xx;
- struct cvmx_pciercx_cfg008_s cn52xxp1;
- struct cvmx_pciercx_cfg008_s cn56xx;
- struct cvmx_pciercx_cfg008_s cn56xxp1;
- struct cvmx_pciercx_cfg008_s cn61xx;
- struct cvmx_pciercx_cfg008_s cn63xx;
- struct cvmx_pciercx_cfg008_s cn63xxp1;
- struct cvmx_pciercx_cfg008_s cn66xx;
- struct cvmx_pciercx_cfg008_s cn68xx;
- struct cvmx_pciercx_cfg008_s cn68xxp1;
- struct cvmx_pciercx_cfg008_s cnf71xx;
};
union cvmx_pciercx_cfg009 {
uint32_t u32;
struct cvmx_pciercx_cfg009_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t lmem_limit:12;
- uint32_t reserved_17_19:3;
- uint32_t mem64b:1;
- uint32_t lmem_base:12;
- uint32_t reserved_1_3:3;
- uint32_t mem64a:1;
-#else
- uint32_t mem64a:1;
- uint32_t reserved_1_3:3;
- uint32_t lmem_base:12;
- uint32_t mem64b:1;
- uint32_t reserved_17_19:3;
- uint32_t lmem_limit:12;
-#endif
+ __BITFIELD_FIELD(uint32_t lmem_limit:12,
+ __BITFIELD_FIELD(uint32_t reserved_17_19:3,
+ __BITFIELD_FIELD(uint32_t mem64b:1,
+ __BITFIELD_FIELD(uint32_t lmem_base:12,
+ __BITFIELD_FIELD(uint32_t reserved_1_3:3,
+ __BITFIELD_FIELD(uint32_t mem64a:1,
+ ;))))))
} s;
- struct cvmx_pciercx_cfg009_s cn52xx;
- struct cvmx_pciercx_cfg009_s cn52xxp1;
- struct cvmx_pciercx_cfg009_s cn56xx;
- struct cvmx_pciercx_cfg009_s cn56xxp1;
- struct cvmx_pciercx_cfg009_s cn61xx;
- struct cvmx_pciercx_cfg009_s cn63xx;
- struct cvmx_pciercx_cfg009_s cn63xxp1;
- struct cvmx_pciercx_cfg009_s cn66xx;
- struct cvmx_pciercx_cfg009_s cn68xx;
- struct cvmx_pciercx_cfg009_s cn68xxp1;
- struct cvmx_pciercx_cfg009_s cnf71xx;
};
union cvmx_pciercx_cfg010 {
uint32_t u32;
struct cvmx_pciercx_cfg010_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t umem_base:32;
-#else
- uint32_t umem_base:32;
-#endif
+ uint32_t umem_base;
} s;
- struct cvmx_pciercx_cfg010_s cn52xx;
- struct cvmx_pciercx_cfg010_s cn52xxp1;
- struct cvmx_pciercx_cfg010_s cn56xx;
- struct cvmx_pciercx_cfg010_s cn56xxp1;
- struct cvmx_pciercx_cfg010_s cn61xx;
- struct cvmx_pciercx_cfg010_s cn63xx;
- struct cvmx_pciercx_cfg010_s cn63xxp1;
- struct cvmx_pciercx_cfg010_s cn66xx;
- struct cvmx_pciercx_cfg010_s cn68xx;
- struct cvmx_pciercx_cfg010_s cn68xxp1;
- struct cvmx_pciercx_cfg010_s cnf71xx;
};
union cvmx_pciercx_cfg011 {
uint32_t u32;
struct cvmx_pciercx_cfg011_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t umem_limit:32;
-#else
- uint32_t umem_limit:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg011_s cn52xx;
- struct cvmx_pciercx_cfg011_s cn52xxp1;
- struct cvmx_pciercx_cfg011_s cn56xx;
- struct cvmx_pciercx_cfg011_s cn56xxp1;
- struct cvmx_pciercx_cfg011_s cn61xx;
- struct cvmx_pciercx_cfg011_s cn63xx;
- struct cvmx_pciercx_cfg011_s cn63xxp1;
- struct cvmx_pciercx_cfg011_s cn66xx;
- struct cvmx_pciercx_cfg011_s cn68xx;
- struct cvmx_pciercx_cfg011_s cn68xxp1;
- struct cvmx_pciercx_cfg011_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg012 {
- uint32_t u32;
- struct cvmx_pciercx_cfg012_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t uio_limit:16;
- uint32_t uio_base:16;
-#else
- uint32_t uio_base:16;
- uint32_t uio_limit:16;
-#endif
+ uint32_t umem_limit;
} s;
- struct cvmx_pciercx_cfg012_s cn52xx;
- struct cvmx_pciercx_cfg012_s cn52xxp1;
- struct cvmx_pciercx_cfg012_s cn56xx;
- struct cvmx_pciercx_cfg012_s cn56xxp1;
- struct cvmx_pciercx_cfg012_s cn61xx;
- struct cvmx_pciercx_cfg012_s cn63xx;
- struct cvmx_pciercx_cfg012_s cn63xxp1;
- struct cvmx_pciercx_cfg012_s cn66xx;
- struct cvmx_pciercx_cfg012_s cn68xx;
- struct cvmx_pciercx_cfg012_s cn68xxp1;
- struct cvmx_pciercx_cfg012_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg013 {
- uint32_t u32;
- struct cvmx_pciercx_cfg013_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_8_31:24;
- uint32_t cp:8;
-#else
- uint32_t cp:8;
- uint32_t reserved_8_31:24;
-#endif
- } s;
- struct cvmx_pciercx_cfg013_s cn52xx;
- struct cvmx_pciercx_cfg013_s cn52xxp1;
- struct cvmx_pciercx_cfg013_s cn56xx;
- struct cvmx_pciercx_cfg013_s cn56xxp1;
- struct cvmx_pciercx_cfg013_s cn61xx;
- struct cvmx_pciercx_cfg013_s cn63xx;
- struct cvmx_pciercx_cfg013_s cn63xxp1;
- struct cvmx_pciercx_cfg013_s cn66xx;
- struct cvmx_pciercx_cfg013_s cn68xx;
- struct cvmx_pciercx_cfg013_s cn68xxp1;
- struct cvmx_pciercx_cfg013_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg014 {
- uint32_t u32;
- struct cvmx_pciercx_cfg014_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg014_s cn52xx;
- struct cvmx_pciercx_cfg014_s cn52xxp1;
- struct cvmx_pciercx_cfg014_s cn56xx;
- struct cvmx_pciercx_cfg014_s cn56xxp1;
- struct cvmx_pciercx_cfg014_s cn61xx;
- struct cvmx_pciercx_cfg014_s cn63xx;
- struct cvmx_pciercx_cfg014_s cn63xxp1;
- struct cvmx_pciercx_cfg014_s cn66xx;
- struct cvmx_pciercx_cfg014_s cn68xx;
- struct cvmx_pciercx_cfg014_s cn68xxp1;
- struct cvmx_pciercx_cfg014_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg015 {
- uint32_t u32;
- struct cvmx_pciercx_cfg015_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_28_31:4;
- uint32_t dtsees:1;
- uint32_t dts:1;
- uint32_t sdt:1;
- uint32_t pdt:1;
- uint32_t fbbe:1;
- uint32_t sbrst:1;
- uint32_t mam:1;
- uint32_t vga16d:1;
- uint32_t vgae:1;
- uint32_t isae:1;
- uint32_t see:1;
- uint32_t pere:1;
- uint32_t inta:8;
- uint32_t il:8;
-#else
- uint32_t il:8;
- uint32_t inta:8;
- uint32_t pere:1;
- uint32_t see:1;
- uint32_t isae:1;
- uint32_t vgae:1;
- uint32_t vga16d:1;
- uint32_t mam:1;
- uint32_t sbrst:1;
- uint32_t fbbe:1;
- uint32_t pdt:1;
- uint32_t sdt:1;
- uint32_t dts:1;
- uint32_t dtsees:1;
- uint32_t reserved_28_31:4;
-#endif
- } s;
- struct cvmx_pciercx_cfg015_s cn52xx;
- struct cvmx_pciercx_cfg015_s cn52xxp1;
- struct cvmx_pciercx_cfg015_s cn56xx;
- struct cvmx_pciercx_cfg015_s cn56xxp1;
- struct cvmx_pciercx_cfg015_s cn61xx;
- struct cvmx_pciercx_cfg015_s cn63xx;
- struct cvmx_pciercx_cfg015_s cn63xxp1;
- struct cvmx_pciercx_cfg015_s cn66xx;
- struct cvmx_pciercx_cfg015_s cn68xx;
- struct cvmx_pciercx_cfg015_s cn68xxp1;
- struct cvmx_pciercx_cfg015_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg016 {
- uint32_t u32;
- struct cvmx_pciercx_cfg016_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t pmes:5;
- uint32_t d2s:1;
- uint32_t d1s:1;
- uint32_t auxc:3;
- uint32_t dsi:1;
- uint32_t reserved_20_20:1;
- uint32_t pme_clock:1;
- uint32_t pmsv:3;
- uint32_t ncp:8;
- uint32_t pmcid:8;
-#else
- uint32_t pmcid:8;
- uint32_t ncp:8;
- uint32_t pmsv:3;
- uint32_t pme_clock:1;
- uint32_t reserved_20_20:1;
- uint32_t dsi:1;
- uint32_t auxc:3;
- uint32_t d1s:1;
- uint32_t d2s:1;
- uint32_t pmes:5;
-#endif
- } s;
- struct cvmx_pciercx_cfg016_s cn52xx;
- struct cvmx_pciercx_cfg016_s cn52xxp1;
- struct cvmx_pciercx_cfg016_s cn56xx;
- struct cvmx_pciercx_cfg016_s cn56xxp1;
- struct cvmx_pciercx_cfg016_s cn61xx;
- struct cvmx_pciercx_cfg016_s cn63xx;
- struct cvmx_pciercx_cfg016_s cn63xxp1;
- struct cvmx_pciercx_cfg016_s cn66xx;
- struct cvmx_pciercx_cfg016_s cn68xx;
- struct cvmx_pciercx_cfg016_s cn68xxp1;
- struct cvmx_pciercx_cfg016_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg017 {
- uint32_t u32;
- struct cvmx_pciercx_cfg017_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t pmdia:8;
- uint32_t bpccee:1;
- uint32_t bd3h:1;
- uint32_t reserved_16_21:6;
- uint32_t pmess:1;
- uint32_t pmedsia:2;
- uint32_t pmds:4;
- uint32_t pmeens:1;
- uint32_t reserved_4_7:4;
- uint32_t nsr:1;
- uint32_t reserved_2_2:1;
- uint32_t ps:2;
-#else
- uint32_t ps:2;
- uint32_t reserved_2_2:1;
- uint32_t nsr:1;
- uint32_t reserved_4_7:4;
- uint32_t pmeens:1;
- uint32_t pmds:4;
- uint32_t pmedsia:2;
- uint32_t pmess:1;
- uint32_t reserved_16_21:6;
- uint32_t bd3h:1;
- uint32_t bpccee:1;
- uint32_t pmdia:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg017_s cn52xx;
- struct cvmx_pciercx_cfg017_s cn52xxp1;
- struct cvmx_pciercx_cfg017_s cn56xx;
- struct cvmx_pciercx_cfg017_s cn56xxp1;
- struct cvmx_pciercx_cfg017_s cn61xx;
- struct cvmx_pciercx_cfg017_s cn63xx;
- struct cvmx_pciercx_cfg017_s cn63xxp1;
- struct cvmx_pciercx_cfg017_s cn66xx;
- struct cvmx_pciercx_cfg017_s cn68xx;
- struct cvmx_pciercx_cfg017_s cn68xxp1;
- struct cvmx_pciercx_cfg017_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg020 {
- uint32_t u32;
- struct cvmx_pciercx_cfg020_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t pvm:1;
- uint32_t m64:1;
- uint32_t mme:3;
- uint32_t mmc:3;
- uint32_t msien:1;
- uint32_t ncp:8;
- uint32_t msicid:8;
-#else
- uint32_t msicid:8;
- uint32_t ncp:8;
- uint32_t msien:1;
- uint32_t mmc:3;
- uint32_t mme:3;
- uint32_t m64:1;
- uint32_t pvm:1;
- uint32_t reserved_25_31:7;
-#endif
- } s;
- struct cvmx_pciercx_cfg020_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_24_31:8;
- uint32_t m64:1;
- uint32_t mme:3;
- uint32_t mmc:3;
- uint32_t msien:1;
- uint32_t ncp:8;
- uint32_t msicid:8;
-#else
- uint32_t msicid:8;
- uint32_t ncp:8;
- uint32_t msien:1;
- uint32_t mmc:3;
- uint32_t mme:3;
- uint32_t m64:1;
- uint32_t reserved_24_31:8;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg020_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg020_cn52xx cn56xx;
- struct cvmx_pciercx_cfg020_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg020_s cn61xx;
- struct cvmx_pciercx_cfg020_cn52xx cn63xx;
- struct cvmx_pciercx_cfg020_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg020_cn52xx cn66xx;
- struct cvmx_pciercx_cfg020_cn52xx cn68xx;
- struct cvmx_pciercx_cfg020_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg020_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg021 {
- uint32_t u32;
- struct cvmx_pciercx_cfg021_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t lmsi:30;
- uint32_t reserved_0_1:2;
-#else
- uint32_t reserved_0_1:2;
- uint32_t lmsi:30;
-#endif
- } s;
- struct cvmx_pciercx_cfg021_s cn52xx;
- struct cvmx_pciercx_cfg021_s cn52xxp1;
- struct cvmx_pciercx_cfg021_s cn56xx;
- struct cvmx_pciercx_cfg021_s cn56xxp1;
- struct cvmx_pciercx_cfg021_s cn61xx;
- struct cvmx_pciercx_cfg021_s cn63xx;
- struct cvmx_pciercx_cfg021_s cn63xxp1;
- struct cvmx_pciercx_cfg021_s cn66xx;
- struct cvmx_pciercx_cfg021_s cn68xx;
- struct cvmx_pciercx_cfg021_s cn68xxp1;
- struct cvmx_pciercx_cfg021_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg022 {
- uint32_t u32;
- struct cvmx_pciercx_cfg022_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t umsi:32;
-#else
- uint32_t umsi:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg022_s cn52xx;
- struct cvmx_pciercx_cfg022_s cn52xxp1;
- struct cvmx_pciercx_cfg022_s cn56xx;
- struct cvmx_pciercx_cfg022_s cn56xxp1;
- struct cvmx_pciercx_cfg022_s cn61xx;
- struct cvmx_pciercx_cfg022_s cn63xx;
- struct cvmx_pciercx_cfg022_s cn63xxp1;
- struct cvmx_pciercx_cfg022_s cn66xx;
- struct cvmx_pciercx_cfg022_s cn68xx;
- struct cvmx_pciercx_cfg022_s cn68xxp1;
- struct cvmx_pciercx_cfg022_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg023 {
- uint32_t u32;
- struct cvmx_pciercx_cfg023_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_16_31:16;
- uint32_t msimd:16;
-#else
- uint32_t msimd:16;
- uint32_t reserved_16_31:16;
-#endif
- } s;
- struct cvmx_pciercx_cfg023_s cn52xx;
- struct cvmx_pciercx_cfg023_s cn52xxp1;
- struct cvmx_pciercx_cfg023_s cn56xx;
- struct cvmx_pciercx_cfg023_s cn56xxp1;
- struct cvmx_pciercx_cfg023_s cn61xx;
- struct cvmx_pciercx_cfg023_s cn63xx;
- struct cvmx_pciercx_cfg023_s cn63xxp1;
- struct cvmx_pciercx_cfg023_s cn66xx;
- struct cvmx_pciercx_cfg023_s cn68xx;
- struct cvmx_pciercx_cfg023_s cn68xxp1;
- struct cvmx_pciercx_cfg023_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg028 {
- uint32_t u32;
- struct cvmx_pciercx_cfg028_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_30_31:2;
- uint32_t imn:5;
- uint32_t si:1;
- uint32_t dpt:4;
- uint32_t pciecv:4;
- uint32_t ncp:8;
- uint32_t pcieid:8;
-#else
- uint32_t pcieid:8;
- uint32_t ncp:8;
- uint32_t pciecv:4;
- uint32_t dpt:4;
- uint32_t si:1;
- uint32_t imn:5;
- uint32_t reserved_30_31:2;
-#endif
- } s;
- struct cvmx_pciercx_cfg028_s cn52xx;
- struct cvmx_pciercx_cfg028_s cn52xxp1;
- struct cvmx_pciercx_cfg028_s cn56xx;
- struct cvmx_pciercx_cfg028_s cn56xxp1;
- struct cvmx_pciercx_cfg028_s cn61xx;
- struct cvmx_pciercx_cfg028_s cn63xx;
- struct cvmx_pciercx_cfg028_s cn63xxp1;
- struct cvmx_pciercx_cfg028_s cn66xx;
- struct cvmx_pciercx_cfg028_s cn68xx;
- struct cvmx_pciercx_cfg028_s cn68xxp1;
- struct cvmx_pciercx_cfg028_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg029 {
- uint32_t u32;
- struct cvmx_pciercx_cfg029_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_28_31:4;
- uint32_t cspls:2;
- uint32_t csplv:8;
- uint32_t reserved_16_17:2;
- uint32_t rber:1;
- uint32_t reserved_12_14:3;
- uint32_t el1al:3;
- uint32_t el0al:3;
- uint32_t etfs:1;
- uint32_t pfs:2;
- uint32_t mpss:3;
-#else
- uint32_t mpss:3;
- uint32_t pfs:2;
- uint32_t etfs:1;
- uint32_t el0al:3;
- uint32_t el1al:3;
- uint32_t reserved_12_14:3;
- uint32_t rber:1;
- uint32_t reserved_16_17:2;
- uint32_t csplv:8;
- uint32_t cspls:2;
- uint32_t reserved_28_31:4;
-#endif
- } s;
- struct cvmx_pciercx_cfg029_s cn52xx;
- struct cvmx_pciercx_cfg029_s cn52xxp1;
- struct cvmx_pciercx_cfg029_s cn56xx;
- struct cvmx_pciercx_cfg029_s cn56xxp1;
- struct cvmx_pciercx_cfg029_s cn61xx;
- struct cvmx_pciercx_cfg029_s cn63xx;
- struct cvmx_pciercx_cfg029_s cn63xxp1;
- struct cvmx_pciercx_cfg029_s cn66xx;
- struct cvmx_pciercx_cfg029_s cn68xx;
- struct cvmx_pciercx_cfg029_s cn68xxp1;
- struct cvmx_pciercx_cfg029_s cnf71xx;
};
union cvmx_pciercx_cfg030 {
uint32_t u32;
struct cvmx_pciercx_cfg030_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_22_31:10;
- uint32_t tp:1;
- uint32_t ap_d:1;
- uint32_t ur_d:1;
- uint32_t fe_d:1;
- uint32_t nfe_d:1;
- uint32_t ce_d:1;
- uint32_t reserved_15_15:1;
- uint32_t mrrs:3;
- uint32_t ns_en:1;
- uint32_t ap_en:1;
- uint32_t pf_en:1;
- uint32_t etf_en:1;
- uint32_t mps:3;
- uint32_t ro_en:1;
- uint32_t ur_en:1;
- uint32_t fe_en:1;
- uint32_t nfe_en:1;
- uint32_t ce_en:1;
-#else
- uint32_t ce_en:1;
- uint32_t nfe_en:1;
- uint32_t fe_en:1;
- uint32_t ur_en:1;
- uint32_t ro_en:1;
- uint32_t mps:3;
- uint32_t etf_en:1;
- uint32_t pf_en:1;
- uint32_t ap_en:1;
- uint32_t ns_en:1;
- uint32_t mrrs:3;
- uint32_t reserved_15_15:1;
- uint32_t ce_d:1;
- uint32_t nfe_d:1;
- uint32_t fe_d:1;
- uint32_t ur_d:1;
- uint32_t ap_d:1;
- uint32_t tp:1;
- uint32_t reserved_22_31:10;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_22_31:10,
+ __BITFIELD_FIELD(uint32_t tp:1,
+ __BITFIELD_FIELD(uint32_t ap_d:1,
+ __BITFIELD_FIELD(uint32_t ur_d:1,
+ __BITFIELD_FIELD(uint32_t fe_d:1,
+ __BITFIELD_FIELD(uint32_t nfe_d:1,
+ __BITFIELD_FIELD(uint32_t ce_d:1,
+ __BITFIELD_FIELD(uint32_t reserved_15_15:1,
+ __BITFIELD_FIELD(uint32_t mrrs:3,
+ __BITFIELD_FIELD(uint32_t ns_en:1,
+ __BITFIELD_FIELD(uint32_t ap_en:1,
+ __BITFIELD_FIELD(uint32_t pf_en:1,
+ __BITFIELD_FIELD(uint32_t etf_en:1,
+ __BITFIELD_FIELD(uint32_t mps:3,
+ __BITFIELD_FIELD(uint32_t ro_en:1,
+ __BITFIELD_FIELD(uint32_t ur_en:1,
+ __BITFIELD_FIELD(uint32_t fe_en:1,
+ __BITFIELD_FIELD(uint32_t nfe_en:1,
+ __BITFIELD_FIELD(uint32_t ce_en:1,
+ ;)))))))))))))))))))
} s;
- struct cvmx_pciercx_cfg030_s cn52xx;
- struct cvmx_pciercx_cfg030_s cn52xxp1;
- struct cvmx_pciercx_cfg030_s cn56xx;
- struct cvmx_pciercx_cfg030_s cn56xxp1;
- struct cvmx_pciercx_cfg030_s cn61xx;
- struct cvmx_pciercx_cfg030_s cn63xx;
- struct cvmx_pciercx_cfg030_s cn63xxp1;
- struct cvmx_pciercx_cfg030_s cn66xx;
- struct cvmx_pciercx_cfg030_s cn68xx;
- struct cvmx_pciercx_cfg030_s cn68xxp1;
- struct cvmx_pciercx_cfg030_s cnf71xx;
};
union cvmx_pciercx_cfg031 {
uint32_t u32;
struct cvmx_pciercx_cfg031_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t pnum:8;
- uint32_t reserved_23_23:1;
- uint32_t aspm:1;
- uint32_t lbnc:1;
- uint32_t dllarc:1;
- uint32_t sderc:1;
- uint32_t cpm:1;
- uint32_t l1el:3;
- uint32_t l0el:3;
- uint32_t aslpms:2;
- uint32_t mlw:6;
- uint32_t mls:4;
-#else
- uint32_t mls:4;
- uint32_t mlw:6;
- uint32_t aslpms:2;
- uint32_t l0el:3;
- uint32_t l1el:3;
- uint32_t cpm:1;
- uint32_t sderc:1;
- uint32_t dllarc:1;
- uint32_t lbnc:1;
- uint32_t aspm:1;
- uint32_t reserved_23_23:1;
- uint32_t pnum:8;
-#endif
+ __BITFIELD_FIELD(uint32_t pnum:8,
+ __BITFIELD_FIELD(uint32_t reserved_23_23:1,
+ __BITFIELD_FIELD(uint32_t aspm:1,
+ __BITFIELD_FIELD(uint32_t lbnc:1,
+ __BITFIELD_FIELD(uint32_t dllarc:1,
+ __BITFIELD_FIELD(uint32_t sderc:1,
+ __BITFIELD_FIELD(uint32_t cpm:1,
+ __BITFIELD_FIELD(uint32_t l1el:3,
+ __BITFIELD_FIELD(uint32_t l0el:3,
+ __BITFIELD_FIELD(uint32_t aslpms:2,
+ __BITFIELD_FIELD(uint32_t mlw:6,
+ __BITFIELD_FIELD(uint32_t mls:4,
+ ;))))))))))))
} s;
- struct cvmx_pciercx_cfg031_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t pnum:8;
- uint32_t reserved_22_23:2;
- uint32_t lbnc:1;
- uint32_t dllarc:1;
- uint32_t sderc:1;
- uint32_t cpm:1;
- uint32_t l1el:3;
- uint32_t l0el:3;
- uint32_t aslpms:2;
- uint32_t mlw:6;
- uint32_t mls:4;
-#else
- uint32_t mls:4;
- uint32_t mlw:6;
- uint32_t aslpms:2;
- uint32_t l0el:3;
- uint32_t l1el:3;
- uint32_t cpm:1;
- uint32_t sderc:1;
- uint32_t dllarc:1;
- uint32_t lbnc:1;
- uint32_t reserved_22_23:2;
- uint32_t pnum:8;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg031_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg031_cn52xx cn56xx;
- struct cvmx_pciercx_cfg031_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg031_s cn61xx;
- struct cvmx_pciercx_cfg031_cn52xx cn63xx;
- struct cvmx_pciercx_cfg031_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg031_s cn66xx;
- struct cvmx_pciercx_cfg031_s cn68xx;
- struct cvmx_pciercx_cfg031_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg031_s cnf71xx;
};
union cvmx_pciercx_cfg032 {
uint32_t u32;
struct cvmx_pciercx_cfg032_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t lab:1;
- uint32_t lbm:1;
- uint32_t dlla:1;
- uint32_t scc:1;
- uint32_t lt:1;
- uint32_t reserved_26_26:1;
- uint32_t nlw:6;
- uint32_t ls:4;
- uint32_t reserved_12_15:4;
- uint32_t lab_int_enb:1;
- uint32_t lbm_int_enb:1;
- uint32_t hawd:1;
- uint32_t ecpm:1;
- uint32_t es:1;
- uint32_t ccc:1;
- uint32_t rl:1;
- uint32_t ld:1;
- uint32_t rcb:1;
- uint32_t reserved_2_2:1;
- uint32_t aslpc:2;
-#else
- uint32_t aslpc:2;
- uint32_t reserved_2_2:1;
- uint32_t rcb:1;
- uint32_t ld:1;
- uint32_t rl:1;
- uint32_t ccc:1;
- uint32_t es:1;
- uint32_t ecpm:1;
- uint32_t hawd:1;
- uint32_t lbm_int_enb:1;
- uint32_t lab_int_enb:1;
- uint32_t reserved_12_15:4;
- uint32_t ls:4;
- uint32_t nlw:6;
- uint32_t reserved_26_26:1;
- uint32_t lt:1;
- uint32_t scc:1;
- uint32_t dlla:1;
- uint32_t lbm:1;
- uint32_t lab:1;
-#endif
- } s;
- struct cvmx_pciercx_cfg032_s cn52xx;
- struct cvmx_pciercx_cfg032_s cn52xxp1;
- struct cvmx_pciercx_cfg032_s cn56xx;
- struct cvmx_pciercx_cfg032_s cn56xxp1;
- struct cvmx_pciercx_cfg032_s cn61xx;
- struct cvmx_pciercx_cfg032_s cn63xx;
- struct cvmx_pciercx_cfg032_s cn63xxp1;
- struct cvmx_pciercx_cfg032_s cn66xx;
- struct cvmx_pciercx_cfg032_s cn68xx;
- struct cvmx_pciercx_cfg032_s cn68xxp1;
- struct cvmx_pciercx_cfg032_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg033 {
- uint32_t u32;
- struct cvmx_pciercx_cfg033_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t ps_num:13;
- uint32_t nccs:1;
- uint32_t emip:1;
- uint32_t sp_ls:2;
- uint32_t sp_lv:8;
- uint32_t hp_c:1;
- uint32_t hp_s:1;
- uint32_t pip:1;
- uint32_t aip:1;
- uint32_t mrlsp:1;
- uint32_t pcp:1;
- uint32_t abp:1;
-#else
- uint32_t abp:1;
- uint32_t pcp:1;
- uint32_t mrlsp:1;
- uint32_t aip:1;
- uint32_t pip:1;
- uint32_t hp_s:1;
- uint32_t hp_c:1;
- uint32_t sp_lv:8;
- uint32_t sp_ls:2;
- uint32_t emip:1;
- uint32_t nccs:1;
- uint32_t ps_num:13;
-#endif
+ __BITFIELD_FIELD(uint32_t lab:1,
+ __BITFIELD_FIELD(uint32_t lbm:1,
+ __BITFIELD_FIELD(uint32_t dlla:1,
+ __BITFIELD_FIELD(uint32_t scc:1,
+ __BITFIELD_FIELD(uint32_t lt:1,
+ __BITFIELD_FIELD(uint32_t reserved_26_26:1,
+ __BITFIELD_FIELD(uint32_t nlw:6,
+ __BITFIELD_FIELD(uint32_t ls:4,
+ __BITFIELD_FIELD(uint32_t reserved_12_15:4,
+ __BITFIELD_FIELD(uint32_t lab_int_enb:1,
+ __BITFIELD_FIELD(uint32_t lbm_int_enb:1,
+ __BITFIELD_FIELD(uint32_t hawd:1,
+ __BITFIELD_FIELD(uint32_t ecpm:1,
+ __BITFIELD_FIELD(uint32_t es:1,
+ __BITFIELD_FIELD(uint32_t ccc:1,
+ __BITFIELD_FIELD(uint32_t rl:1,
+ __BITFIELD_FIELD(uint32_t ld:1,
+ __BITFIELD_FIELD(uint32_t rcb:1,
+ __BITFIELD_FIELD(uint32_t reserved_2_2:1,
+ __BITFIELD_FIELD(uint32_t aslpc:2,
+ ;))))))))))))))))))))
} s;
- struct cvmx_pciercx_cfg033_s cn52xx;
- struct cvmx_pciercx_cfg033_s cn52xxp1;
- struct cvmx_pciercx_cfg033_s cn56xx;
- struct cvmx_pciercx_cfg033_s cn56xxp1;
- struct cvmx_pciercx_cfg033_s cn61xx;
- struct cvmx_pciercx_cfg033_s cn63xx;
- struct cvmx_pciercx_cfg033_s cn63xxp1;
- struct cvmx_pciercx_cfg033_s cn66xx;
- struct cvmx_pciercx_cfg033_s cn68xx;
- struct cvmx_pciercx_cfg033_s cn68xxp1;
- struct cvmx_pciercx_cfg033_s cnf71xx;
};
union cvmx_pciercx_cfg034 {
uint32_t u32;
struct cvmx_pciercx_cfg034_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t dlls_c:1;
- uint32_t emis:1;
- uint32_t pds:1;
- uint32_t mrlss:1;
- uint32_t ccint_d:1;
- uint32_t pd_c:1;
- uint32_t mrls_c:1;
- uint32_t pf_d:1;
- uint32_t abp_d:1;
- uint32_t reserved_13_15:3;
- uint32_t dlls_en:1;
- uint32_t emic:1;
- uint32_t pcc:1;
- uint32_t pic:2;
- uint32_t aic:2;
- uint32_t hpint_en:1;
- uint32_t ccint_en:1;
- uint32_t pd_en:1;
- uint32_t mrls_en:1;
- uint32_t pf_en:1;
- uint32_t abp_en:1;
-#else
- uint32_t abp_en:1;
- uint32_t pf_en:1;
- uint32_t mrls_en:1;
- uint32_t pd_en:1;
- uint32_t ccint_en:1;
- uint32_t hpint_en:1;
- uint32_t aic:2;
- uint32_t pic:2;
- uint32_t pcc:1;
- uint32_t emic:1;
- uint32_t dlls_en:1;
- uint32_t reserved_13_15:3;
- uint32_t abp_d:1;
- uint32_t pf_d:1;
- uint32_t mrls_c:1;
- uint32_t pd_c:1;
- uint32_t ccint_d:1;
- uint32_t mrlss:1;
- uint32_t pds:1;
- uint32_t emis:1;
- uint32_t dlls_c:1;
- uint32_t reserved_25_31:7;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_25_31:7,
+ __BITFIELD_FIELD(uint32_t dlls_c:1,
+ __BITFIELD_FIELD(uint32_t emis:1,
+ __BITFIELD_FIELD(uint32_t pds:1,
+ __BITFIELD_FIELD(uint32_t mrlss:1,
+ __BITFIELD_FIELD(uint32_t ccint_d:1,
+ __BITFIELD_FIELD(uint32_t pd_c:1,
+ __BITFIELD_FIELD(uint32_t mrls_c:1,
+ __BITFIELD_FIELD(uint32_t pf_d:1,
+ __BITFIELD_FIELD(uint32_t abp_d:1,
+ __BITFIELD_FIELD(uint32_t reserved_13_15:3,
+ __BITFIELD_FIELD(uint32_t dlls_en:1,
+ __BITFIELD_FIELD(uint32_t emic:1,
+ __BITFIELD_FIELD(uint32_t pcc:1,
+ __BITFIELD_FIELD(uint32_t pic:1,
+ __BITFIELD_FIELD(uint32_t aic:1,
+ __BITFIELD_FIELD(uint32_t hpint_en:1,
+ __BITFIELD_FIELD(uint32_t ccint_en:1,
+ __BITFIELD_FIELD(uint32_t pd_en:1,
+ __BITFIELD_FIELD(uint32_t mrls_en:1,
+ __BITFIELD_FIELD(uint32_t pf_en:1,
+ __BITFIELD_FIELD(uint32_t abp_en:1,
+ ;))))))))))))))))))))))
} s;
- struct cvmx_pciercx_cfg034_s cn52xx;
- struct cvmx_pciercx_cfg034_s cn52xxp1;
- struct cvmx_pciercx_cfg034_s cn56xx;
- struct cvmx_pciercx_cfg034_s cn56xxp1;
- struct cvmx_pciercx_cfg034_s cn61xx;
- struct cvmx_pciercx_cfg034_s cn63xx;
- struct cvmx_pciercx_cfg034_s cn63xxp1;
- struct cvmx_pciercx_cfg034_s cn66xx;
- struct cvmx_pciercx_cfg034_s cn68xx;
- struct cvmx_pciercx_cfg034_s cn68xxp1;
- struct cvmx_pciercx_cfg034_s cnf71xx;
};
union cvmx_pciercx_cfg035 {
uint32_t u32;
struct cvmx_pciercx_cfg035_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_17_31:15;
- uint32_t crssv:1;
- uint32_t reserved_5_15:11;
- uint32_t crssve:1;
- uint32_t pmeie:1;
- uint32_t sefee:1;
- uint32_t senfee:1;
- uint32_t secee:1;
-#else
- uint32_t secee:1;
- uint32_t senfee:1;
- uint32_t sefee:1;
- uint32_t pmeie:1;
- uint32_t crssve:1;
- uint32_t reserved_5_15:11;
- uint32_t crssv:1;
- uint32_t reserved_17_31:15;
-#endif
- } s;
- struct cvmx_pciercx_cfg035_s cn52xx;
- struct cvmx_pciercx_cfg035_s cn52xxp1;
- struct cvmx_pciercx_cfg035_s cn56xx;
- struct cvmx_pciercx_cfg035_s cn56xxp1;
- struct cvmx_pciercx_cfg035_s cn61xx;
- struct cvmx_pciercx_cfg035_s cn63xx;
- struct cvmx_pciercx_cfg035_s cn63xxp1;
- struct cvmx_pciercx_cfg035_s cn66xx;
- struct cvmx_pciercx_cfg035_s cn68xx;
- struct cvmx_pciercx_cfg035_s cn68xxp1;
- struct cvmx_pciercx_cfg035_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg036 {
- uint32_t u32;
- struct cvmx_pciercx_cfg036_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_18_31:14;
- uint32_t pme_pend:1;
- uint32_t pme_stat:1;
- uint32_t pme_rid:16;
-#else
- uint32_t pme_rid:16;
- uint32_t pme_stat:1;
- uint32_t pme_pend:1;
- uint32_t reserved_18_31:14;
-#endif
- } s;
- struct cvmx_pciercx_cfg036_s cn52xx;
- struct cvmx_pciercx_cfg036_s cn52xxp1;
- struct cvmx_pciercx_cfg036_s cn56xx;
- struct cvmx_pciercx_cfg036_s cn56xxp1;
- struct cvmx_pciercx_cfg036_s cn61xx;
- struct cvmx_pciercx_cfg036_s cn63xx;
- struct cvmx_pciercx_cfg036_s cn63xxp1;
- struct cvmx_pciercx_cfg036_s cn66xx;
- struct cvmx_pciercx_cfg036_s cn68xx;
- struct cvmx_pciercx_cfg036_s cn68xxp1;
- struct cvmx_pciercx_cfg036_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg037 {
- uint32_t u32;
- struct cvmx_pciercx_cfg037_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_20_31:12;
- uint32_t obffs:2;
- uint32_t reserved_12_17:6;
- uint32_t ltrs:1;
- uint32_t noroprpr:1;
- uint32_t atom128s:1;
- uint32_t atom64s:1;
- uint32_t atom32s:1;
- uint32_t atom_ops:1;
- uint32_t reserved_5_5:1;
- uint32_t ctds:1;
- uint32_t ctrs:4;
-#else
- uint32_t ctrs:4;
- uint32_t ctds:1;
- uint32_t reserved_5_5:1;
- uint32_t atom_ops:1;
- uint32_t atom32s:1;
- uint32_t atom64s:1;
- uint32_t atom128s:1;
- uint32_t noroprpr:1;
- uint32_t ltrs:1;
- uint32_t reserved_12_17:6;
- uint32_t obffs:2;
- uint32_t reserved_20_31:12;
-#endif
- } s;
- struct cvmx_pciercx_cfg037_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_5_31:27;
- uint32_t ctds:1;
- uint32_t ctrs:4;
-#else
- uint32_t ctrs:4;
- uint32_t ctds:1;
- uint32_t reserved_5_31:27;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg037_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg037_cn52xx cn56xx;
- struct cvmx_pciercx_cfg037_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg037_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_14_31:18;
- uint32_t tph:2;
- uint32_t reserved_11_11:1;
- uint32_t noroprpr:1;
- uint32_t atom128s:1;
- uint32_t atom64s:1;
- uint32_t atom32s:1;
- uint32_t atom_ops:1;
- uint32_t ari_fw:1;
- uint32_t ctds:1;
- uint32_t ctrs:4;
-#else
- uint32_t ctrs:4;
- uint32_t ctds:1;
- uint32_t ari_fw:1;
- uint32_t atom_ops:1;
- uint32_t atom32s:1;
- uint32_t atom64s:1;
- uint32_t atom128s:1;
- uint32_t noroprpr:1;
- uint32_t reserved_11_11:1;
- uint32_t tph:2;
- uint32_t reserved_14_31:18;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg037_cn52xx cn63xx;
- struct cvmx_pciercx_cfg037_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg037_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_14_31:18;
- uint32_t tph:2;
- uint32_t reserved_11_11:1;
- uint32_t noroprpr:1;
- uint32_t atom128s:1;
- uint32_t atom64s:1;
- uint32_t atom32s:1;
- uint32_t atom_ops:1;
- uint32_t ari:1;
- uint32_t ctds:1;
- uint32_t ctrs:4;
-#else
- uint32_t ctrs:4;
- uint32_t ctds:1;
- uint32_t ari:1;
- uint32_t atom_ops:1;
- uint32_t atom32s:1;
- uint32_t atom64s:1;
- uint32_t atom128s:1;
- uint32_t noroprpr:1;
- uint32_t reserved_11_11:1;
- uint32_t tph:2;
- uint32_t reserved_14_31:18;
-#endif
- } cn66xx;
- struct cvmx_pciercx_cfg037_cn66xx cn68xx;
- struct cvmx_pciercx_cfg037_cn66xx cn68xxp1;
- struct cvmx_pciercx_cfg037_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_20_31:12;
- uint32_t obffs:2;
- uint32_t reserved_14_17:4;
- uint32_t tphs:2;
- uint32_t ltrs:1;
- uint32_t noroprpr:1;
- uint32_t atom128s:1;
- uint32_t atom64s:1;
- uint32_t atom32s:1;
- uint32_t atom_ops:1;
- uint32_t ari_fw:1;
- uint32_t ctds:1;
- uint32_t ctrs:4;
-#else
- uint32_t ctrs:4;
- uint32_t ctds:1;
- uint32_t ari_fw:1;
- uint32_t atom_ops:1;
- uint32_t atom32s:1;
- uint32_t atom64s:1;
- uint32_t atom128s:1;
- uint32_t noroprpr:1;
- uint32_t ltrs:1;
- uint32_t tphs:2;
- uint32_t reserved_14_17:4;
- uint32_t obffs:2;
- uint32_t reserved_20_31:12;
-#endif
- } cnf71xx;
-};
-
-union cvmx_pciercx_cfg038 {
- uint32_t u32;
- struct cvmx_pciercx_cfg038_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_15_31:17;
- uint32_t obffe:2;
- uint32_t reserved_11_12:2;
- uint32_t ltre:1;
- uint32_t id0_cp:1;
- uint32_t id0_rq:1;
- uint32_t atom_op_eb:1;
- uint32_t atom_op:1;
- uint32_t ari:1;
- uint32_t ctd:1;
- uint32_t ctv:4;
-#else
- uint32_t ctv:4;
- uint32_t ctd:1;
- uint32_t ari:1;
- uint32_t atom_op:1;
- uint32_t atom_op_eb:1;
- uint32_t id0_rq:1;
- uint32_t id0_cp:1;
- uint32_t ltre:1;
- uint32_t reserved_11_12:2;
- uint32_t obffe:2;
- uint32_t reserved_15_31:17;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_17_31:15,
+ __BITFIELD_FIELD(uint32_t crssv:1,
+ __BITFIELD_FIELD(uint32_t reserved_5_15:11,
+ __BITFIELD_FIELD(uint32_t crssve:1,
+ __BITFIELD_FIELD(uint32_t pmeie:1,
+ __BITFIELD_FIELD(uint32_t sefee:1,
+ __BITFIELD_FIELD(uint32_t senfee:1,
+ __BITFIELD_FIELD(uint32_t secee:1,
+ ;))))))))
} s;
- struct cvmx_pciercx_cfg038_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_5_31:27;
- uint32_t ctd:1;
- uint32_t ctv:4;
-#else
- uint32_t ctv:4;
- uint32_t ctd:1;
- uint32_t reserved_5_31:27;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg038_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg038_cn52xx cn56xx;
- struct cvmx_pciercx_cfg038_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg038_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_10_31:22;
- uint32_t id0_cp:1;
- uint32_t id0_rq:1;
- uint32_t atom_op_eb:1;
- uint32_t atom_op:1;
- uint32_t ari:1;
- uint32_t ctd:1;
- uint32_t ctv:4;
-#else
- uint32_t ctv:4;
- uint32_t ctd:1;
- uint32_t ari:1;
- uint32_t atom_op:1;
- uint32_t atom_op_eb:1;
- uint32_t id0_rq:1;
- uint32_t id0_cp:1;
- uint32_t reserved_10_31:22;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg038_cn52xx cn63xx;
- struct cvmx_pciercx_cfg038_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg038_cn61xx cn66xx;
- struct cvmx_pciercx_cfg038_cn61xx cn68xx;
- struct cvmx_pciercx_cfg038_cn61xx cn68xxp1;
- struct cvmx_pciercx_cfg038_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg039 {
- uint32_t u32;
- struct cvmx_pciercx_cfg039_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_9_31:23;
- uint32_t cls:1;
- uint32_t slsv:7;
- uint32_t reserved_0_0:1;
-#else
- uint32_t reserved_0_0:1;
- uint32_t slsv:7;
- uint32_t cls:1;
- uint32_t reserved_9_31:23;
-#endif
- } s;
- struct cvmx_pciercx_cfg039_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg039_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg039_cn52xx cn56xx;
- struct cvmx_pciercx_cfg039_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg039_s cn61xx;
- struct cvmx_pciercx_cfg039_s cn63xx;
- struct cvmx_pciercx_cfg039_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg039_s cn66xx;
- struct cvmx_pciercx_cfg039_s cn68xx;
- struct cvmx_pciercx_cfg039_s cn68xxp1;
- struct cvmx_pciercx_cfg039_s cnf71xx;
};
union cvmx_pciercx_cfg040 {
uint32_t u32;
struct cvmx_pciercx_cfg040_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_17_31:15;
- uint32_t cdl:1;
- uint32_t reserved_13_15:3;
- uint32_t cde:1;
- uint32_t csos:1;
- uint32_t emc:1;
- uint32_t tm:3;
- uint32_t sde:1;
- uint32_t hasd:1;
- uint32_t ec:1;
- uint32_t tls:4;
-#else
- uint32_t tls:4;
- uint32_t ec:1;
- uint32_t hasd:1;
- uint32_t sde:1;
- uint32_t tm:3;
- uint32_t emc:1;
- uint32_t csos:1;
- uint32_t cde:1;
- uint32_t reserved_13_15:3;
- uint32_t cdl:1;
- uint32_t reserved_17_31:15;
-#endif
- } s;
- struct cvmx_pciercx_cfg040_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg040_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg040_cn52xx cn56xx;
- struct cvmx_pciercx_cfg040_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg040_s cn61xx;
- struct cvmx_pciercx_cfg040_s cn63xx;
- struct cvmx_pciercx_cfg040_s cn63xxp1;
- struct cvmx_pciercx_cfg040_s cn66xx;
- struct cvmx_pciercx_cfg040_s cn68xx;
- struct cvmx_pciercx_cfg040_s cn68xxp1;
- struct cvmx_pciercx_cfg040_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg041 {
- uint32_t u32;
- struct cvmx_pciercx_cfg041_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg041_s cn52xx;
- struct cvmx_pciercx_cfg041_s cn52xxp1;
- struct cvmx_pciercx_cfg041_s cn56xx;
- struct cvmx_pciercx_cfg041_s cn56xxp1;
- struct cvmx_pciercx_cfg041_s cn61xx;
- struct cvmx_pciercx_cfg041_s cn63xx;
- struct cvmx_pciercx_cfg041_s cn63xxp1;
- struct cvmx_pciercx_cfg041_s cn66xx;
- struct cvmx_pciercx_cfg041_s cn68xx;
- struct cvmx_pciercx_cfg041_s cn68xxp1;
- struct cvmx_pciercx_cfg041_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg042 {
- uint32_t u32;
- struct cvmx_pciercx_cfg042_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_22_31:10,
+ __BITFIELD_FIELD(uint32_t ler:1,
+ __BITFIELD_FIELD(uint32_t ep3s:1,
+ __BITFIELD_FIELD(uint32_t ep2s:1,
+ __BITFIELD_FIELD(uint32_t ep1s:1,
+ __BITFIELD_FIELD(uint32_t eqc:1,
+ __BITFIELD_FIELD(uint32_t cdl:1,
+ __BITFIELD_FIELD(uint32_t cde:4,
+ __BITFIELD_FIELD(uint32_t csos:1,
+ __BITFIELD_FIELD(uint32_t emc:1,
+ __BITFIELD_FIELD(uint32_t tm:3,
+ __BITFIELD_FIELD(uint32_t sde:1,
+ __BITFIELD_FIELD(uint32_t hasd:1,
+ __BITFIELD_FIELD(uint32_t ec:1,
+ __BITFIELD_FIELD(uint32_t tls:4,
+ ;)))))))))))))))
} s;
- struct cvmx_pciercx_cfg042_s cn52xx;
- struct cvmx_pciercx_cfg042_s cn52xxp1;
- struct cvmx_pciercx_cfg042_s cn56xx;
- struct cvmx_pciercx_cfg042_s cn56xxp1;
- struct cvmx_pciercx_cfg042_s cn61xx;
- struct cvmx_pciercx_cfg042_s cn63xx;
- struct cvmx_pciercx_cfg042_s cn63xxp1;
- struct cvmx_pciercx_cfg042_s cn66xx;
- struct cvmx_pciercx_cfg042_s cn68xx;
- struct cvmx_pciercx_cfg042_s cn68xxp1;
- struct cvmx_pciercx_cfg042_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg064 {
- uint32_t u32;
- struct cvmx_pciercx_cfg064_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t nco:12;
- uint32_t cv:4;
- uint32_t pcieec:16;
-#else
- uint32_t pcieec:16;
- uint32_t cv:4;
- uint32_t nco:12;
-#endif
- } s;
- struct cvmx_pciercx_cfg064_s cn52xx;
- struct cvmx_pciercx_cfg064_s cn52xxp1;
- struct cvmx_pciercx_cfg064_s cn56xx;
- struct cvmx_pciercx_cfg064_s cn56xxp1;
- struct cvmx_pciercx_cfg064_s cn61xx;
- struct cvmx_pciercx_cfg064_s cn63xx;
- struct cvmx_pciercx_cfg064_s cn63xxp1;
- struct cvmx_pciercx_cfg064_s cn66xx;
- struct cvmx_pciercx_cfg064_s cn68xx;
- struct cvmx_pciercx_cfg064_s cn68xxp1;
- struct cvmx_pciercx_cfg064_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg065 {
- uint32_t u32;
- struct cvmx_pciercx_cfg065_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t uatombs:1;
- uint32_t reserved_23_23:1;
- uint32_t ucies:1;
- uint32_t reserved_21_21:1;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpes:1;
- uint32_t sdes:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlps:1;
- uint32_t fcpes:1;
- uint32_t cts:1;
- uint32_t cas:1;
- uint32_t ucs:1;
- uint32_t ros:1;
- uint32_t mtlps:1;
- uint32_t ecrces:1;
- uint32_t ures:1;
- uint32_t reserved_21_21:1;
- uint32_t ucies:1;
- uint32_t reserved_23_23:1;
- uint32_t uatombs:1;
- uint32_t reserved_25_31:7;
-#endif
- } s;
- struct cvmx_pciercx_cfg065_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_21_31:11;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpes:1;
- uint32_t sdes:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlps:1;
- uint32_t fcpes:1;
- uint32_t cts:1;
- uint32_t cas:1;
- uint32_t ucs:1;
- uint32_t ros:1;
- uint32_t mtlps:1;
- uint32_t ecrces:1;
- uint32_t ures:1;
- uint32_t reserved_21_31:11;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg065_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg065_cn52xx cn56xx;
- struct cvmx_pciercx_cfg065_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg065_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t uatombs:1;
- uint32_t reserved_21_23:3;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpes:1;
- uint32_t sdes:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlps:1;
- uint32_t fcpes:1;
- uint32_t cts:1;
- uint32_t cas:1;
- uint32_t ucs:1;
- uint32_t ros:1;
- uint32_t mtlps:1;
- uint32_t ecrces:1;
- uint32_t ures:1;
- uint32_t reserved_21_23:3;
- uint32_t uatombs:1;
- uint32_t reserved_25_31:7;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg065_cn52xx cn63xx;
- struct cvmx_pciercx_cfg065_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg065_cn61xx cn66xx;
- struct cvmx_pciercx_cfg065_cn61xx cn68xx;
- struct cvmx_pciercx_cfg065_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg065_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg066 {
- uint32_t u32;
- struct cvmx_pciercx_cfg066_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t uatombm:1;
- uint32_t reserved_23_23:1;
- uint32_t uciem:1;
- uint32_t reserved_21_21:1;
- uint32_t urem:1;
- uint32_t ecrcem:1;
- uint32_t mtlpm:1;
- uint32_t rom:1;
- uint32_t ucm:1;
- uint32_t cam:1;
- uint32_t ctm:1;
- uint32_t fcpem:1;
- uint32_t ptlpm:1;
- uint32_t reserved_6_11:6;
- uint32_t sdem:1;
- uint32_t dlpem:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpem:1;
- uint32_t sdem:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlpm:1;
- uint32_t fcpem:1;
- uint32_t ctm:1;
- uint32_t cam:1;
- uint32_t ucm:1;
- uint32_t rom:1;
- uint32_t mtlpm:1;
- uint32_t ecrcem:1;
- uint32_t urem:1;
- uint32_t reserved_21_21:1;
- uint32_t uciem:1;
- uint32_t reserved_23_23:1;
- uint32_t uatombm:1;
- uint32_t reserved_25_31:7;
-#endif
- } s;
- struct cvmx_pciercx_cfg066_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_21_31:11;
- uint32_t urem:1;
- uint32_t ecrcem:1;
- uint32_t mtlpm:1;
- uint32_t rom:1;
- uint32_t ucm:1;
- uint32_t cam:1;
- uint32_t ctm:1;
- uint32_t fcpem:1;
- uint32_t ptlpm:1;
- uint32_t reserved_6_11:6;
- uint32_t sdem:1;
- uint32_t dlpem:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpem:1;
- uint32_t sdem:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlpm:1;
- uint32_t fcpem:1;
- uint32_t ctm:1;
- uint32_t cam:1;
- uint32_t ucm:1;
- uint32_t rom:1;
- uint32_t mtlpm:1;
- uint32_t ecrcem:1;
- uint32_t urem:1;
- uint32_t reserved_21_31:11;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg066_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg066_cn52xx cn56xx;
- struct cvmx_pciercx_cfg066_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg066_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t uatombm:1;
- uint32_t reserved_21_23:3;
- uint32_t urem:1;
- uint32_t ecrcem:1;
- uint32_t mtlpm:1;
- uint32_t rom:1;
- uint32_t ucm:1;
- uint32_t cam:1;
- uint32_t ctm:1;
- uint32_t fcpem:1;
- uint32_t ptlpm:1;
- uint32_t reserved_6_11:6;
- uint32_t sdem:1;
- uint32_t dlpem:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpem:1;
- uint32_t sdem:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlpm:1;
- uint32_t fcpem:1;
- uint32_t ctm:1;
- uint32_t cam:1;
- uint32_t ucm:1;
- uint32_t rom:1;
- uint32_t mtlpm:1;
- uint32_t ecrcem:1;
- uint32_t urem:1;
- uint32_t reserved_21_23:3;
- uint32_t uatombm:1;
- uint32_t reserved_25_31:7;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg066_cn52xx cn63xx;
- struct cvmx_pciercx_cfg066_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg066_cn61xx cn66xx;
- struct cvmx_pciercx_cfg066_cn61xx cn68xx;
- struct cvmx_pciercx_cfg066_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg066_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg067 {
- uint32_t u32;
- struct cvmx_pciercx_cfg067_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t uatombs:1;
- uint32_t reserved_23_23:1;
- uint32_t ucies:1;
- uint32_t reserved_21_21:1;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpes:1;
- uint32_t sdes:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlps:1;
- uint32_t fcpes:1;
- uint32_t cts:1;
- uint32_t cas:1;
- uint32_t ucs:1;
- uint32_t ros:1;
- uint32_t mtlps:1;
- uint32_t ecrces:1;
- uint32_t ures:1;
- uint32_t reserved_21_21:1;
- uint32_t ucies:1;
- uint32_t reserved_23_23:1;
- uint32_t uatombs:1;
- uint32_t reserved_25_31:7;
-#endif
- } s;
- struct cvmx_pciercx_cfg067_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_21_31:11;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpes:1;
- uint32_t sdes:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlps:1;
- uint32_t fcpes:1;
- uint32_t cts:1;
- uint32_t cas:1;
- uint32_t ucs:1;
- uint32_t ros:1;
- uint32_t mtlps:1;
- uint32_t ecrces:1;
- uint32_t ures:1;
- uint32_t reserved_21_31:11;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg067_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg067_cn52xx cn56xx;
- struct cvmx_pciercx_cfg067_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg067_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t uatombs:1;
- uint32_t reserved_21_23:3;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpes:1;
- uint32_t sdes:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlps:1;
- uint32_t fcpes:1;
- uint32_t cts:1;
- uint32_t cas:1;
- uint32_t ucs:1;
- uint32_t ros:1;
- uint32_t mtlps:1;
- uint32_t ecrces:1;
- uint32_t ures:1;
- uint32_t reserved_21_23:3;
- uint32_t uatombs:1;
- uint32_t reserved_25_31:7;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg067_cn52xx cn63xx;
- struct cvmx_pciercx_cfg067_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg067_cn61xx cn66xx;
- struct cvmx_pciercx_cfg067_cn61xx cn68xx;
- struct cvmx_pciercx_cfg067_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg067_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg068 {
- uint32_t u32;
- struct cvmx_pciercx_cfg068_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_15_31:17;
- uint32_t cies:1;
- uint32_t anfes:1;
- uint32_t rtts:1;
- uint32_t reserved_9_11:3;
- uint32_t rnrs:1;
- uint32_t bdllps:1;
- uint32_t btlps:1;
- uint32_t reserved_1_5:5;
- uint32_t res:1;
-#else
- uint32_t res:1;
- uint32_t reserved_1_5:5;
- uint32_t btlps:1;
- uint32_t bdllps:1;
- uint32_t rnrs:1;
- uint32_t reserved_9_11:3;
- uint32_t rtts:1;
- uint32_t anfes:1;
- uint32_t cies:1;
- uint32_t reserved_15_31:17;
-#endif
- } s;
- struct cvmx_pciercx_cfg068_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_14_31:18;
- uint32_t anfes:1;
- uint32_t rtts:1;
- uint32_t reserved_9_11:3;
- uint32_t rnrs:1;
- uint32_t bdllps:1;
- uint32_t btlps:1;
- uint32_t reserved_1_5:5;
- uint32_t res:1;
-#else
- uint32_t res:1;
- uint32_t reserved_1_5:5;
- uint32_t btlps:1;
- uint32_t bdllps:1;
- uint32_t rnrs:1;
- uint32_t reserved_9_11:3;
- uint32_t rtts:1;
- uint32_t anfes:1;
- uint32_t reserved_14_31:18;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg068_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg068_cn52xx cn56xx;
- struct cvmx_pciercx_cfg068_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg068_cn52xx cn61xx;
- struct cvmx_pciercx_cfg068_cn52xx cn63xx;
- struct cvmx_pciercx_cfg068_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg068_cn52xx cn66xx;
- struct cvmx_pciercx_cfg068_cn52xx cn68xx;
- struct cvmx_pciercx_cfg068_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg068_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg069 {
- uint32_t u32;
- struct cvmx_pciercx_cfg069_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_15_31:17;
- uint32_t ciem:1;
- uint32_t anfem:1;
- uint32_t rttm:1;
- uint32_t reserved_9_11:3;
- uint32_t rnrm:1;
- uint32_t bdllpm:1;
- uint32_t btlpm:1;
- uint32_t reserved_1_5:5;
- uint32_t rem:1;
-#else
- uint32_t rem:1;
- uint32_t reserved_1_5:5;
- uint32_t btlpm:1;
- uint32_t bdllpm:1;
- uint32_t rnrm:1;
- uint32_t reserved_9_11:3;
- uint32_t rttm:1;
- uint32_t anfem:1;
- uint32_t ciem:1;
- uint32_t reserved_15_31:17;
-#endif
- } s;
- struct cvmx_pciercx_cfg069_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_14_31:18;
- uint32_t anfem:1;
- uint32_t rttm:1;
- uint32_t reserved_9_11:3;
- uint32_t rnrm:1;
- uint32_t bdllpm:1;
- uint32_t btlpm:1;
- uint32_t reserved_1_5:5;
- uint32_t rem:1;
-#else
- uint32_t rem:1;
- uint32_t reserved_1_5:5;
- uint32_t btlpm:1;
- uint32_t bdllpm:1;
- uint32_t rnrm:1;
- uint32_t reserved_9_11:3;
- uint32_t rttm:1;
- uint32_t anfem:1;
- uint32_t reserved_14_31:18;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg069_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg069_cn52xx cn56xx;
- struct cvmx_pciercx_cfg069_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg069_cn52xx cn61xx;
- struct cvmx_pciercx_cfg069_cn52xx cn63xx;
- struct cvmx_pciercx_cfg069_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg069_cn52xx cn66xx;
- struct cvmx_pciercx_cfg069_cn52xx cn68xx;
- struct cvmx_pciercx_cfg069_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg069_s cnf71xx;
};
union cvmx_pciercx_cfg070 {
uint32_t u32;
struct cvmx_pciercx_cfg070_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_9_31:23;
- uint32_t ce:1;
- uint32_t cc:1;
- uint32_t ge:1;
- uint32_t gc:1;
- uint32_t fep:5;
-#else
- uint32_t fep:5;
- uint32_t gc:1;
- uint32_t ge:1;
- uint32_t cc:1;
- uint32_t ce:1;
- uint32_t reserved_9_31:23;
-#endif
- } s;
- struct cvmx_pciercx_cfg070_s cn52xx;
- struct cvmx_pciercx_cfg070_s cn52xxp1;
- struct cvmx_pciercx_cfg070_s cn56xx;
- struct cvmx_pciercx_cfg070_s cn56xxp1;
- struct cvmx_pciercx_cfg070_s cn61xx;
- struct cvmx_pciercx_cfg070_s cn63xx;
- struct cvmx_pciercx_cfg070_s cn63xxp1;
- struct cvmx_pciercx_cfg070_s cn66xx;
- struct cvmx_pciercx_cfg070_s cn68xx;
- struct cvmx_pciercx_cfg070_s cn68xxp1;
- struct cvmx_pciercx_cfg070_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg071 {
- uint32_t u32;
- struct cvmx_pciercx_cfg071_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dword1:32;
-#else
- uint32_t dword1:32;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_12_31:20,
+ __BITFIELD_FIELD(uint32_t tplp:1,
+ __BITFIELD_FIELD(uint32_t reserved_9_10:2,
+ __BITFIELD_FIELD(uint32_t ce:1,
+ __BITFIELD_FIELD(uint32_t cc:1,
+ __BITFIELD_FIELD(uint32_t ge:1,
+ __BITFIELD_FIELD(uint32_t gc:1,
+ __BITFIELD_FIELD(uint32_t fep:5,
+ ;))))))))
} s;
- struct cvmx_pciercx_cfg071_s cn52xx;
- struct cvmx_pciercx_cfg071_s cn52xxp1;
- struct cvmx_pciercx_cfg071_s cn56xx;
- struct cvmx_pciercx_cfg071_s cn56xxp1;
- struct cvmx_pciercx_cfg071_s cn61xx;
- struct cvmx_pciercx_cfg071_s cn63xx;
- struct cvmx_pciercx_cfg071_s cn63xxp1;
- struct cvmx_pciercx_cfg071_s cn66xx;
- struct cvmx_pciercx_cfg071_s cn68xx;
- struct cvmx_pciercx_cfg071_s cn68xxp1;
- struct cvmx_pciercx_cfg071_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg072 {
- uint32_t u32;
- struct cvmx_pciercx_cfg072_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dword2:32;
-#else
- uint32_t dword2:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg072_s cn52xx;
- struct cvmx_pciercx_cfg072_s cn52xxp1;
- struct cvmx_pciercx_cfg072_s cn56xx;
- struct cvmx_pciercx_cfg072_s cn56xxp1;
- struct cvmx_pciercx_cfg072_s cn61xx;
- struct cvmx_pciercx_cfg072_s cn63xx;
- struct cvmx_pciercx_cfg072_s cn63xxp1;
- struct cvmx_pciercx_cfg072_s cn66xx;
- struct cvmx_pciercx_cfg072_s cn68xx;
- struct cvmx_pciercx_cfg072_s cn68xxp1;
- struct cvmx_pciercx_cfg072_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg073 {
- uint32_t u32;
- struct cvmx_pciercx_cfg073_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dword3:32;
-#else
- uint32_t dword3:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg073_s cn52xx;
- struct cvmx_pciercx_cfg073_s cn52xxp1;
- struct cvmx_pciercx_cfg073_s cn56xx;
- struct cvmx_pciercx_cfg073_s cn56xxp1;
- struct cvmx_pciercx_cfg073_s cn61xx;
- struct cvmx_pciercx_cfg073_s cn63xx;
- struct cvmx_pciercx_cfg073_s cn63xxp1;
- struct cvmx_pciercx_cfg073_s cn66xx;
- struct cvmx_pciercx_cfg073_s cn68xx;
- struct cvmx_pciercx_cfg073_s cn68xxp1;
- struct cvmx_pciercx_cfg073_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg074 {
- uint32_t u32;
- struct cvmx_pciercx_cfg074_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dword4:32;
-#else
- uint32_t dword4:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg074_s cn52xx;
- struct cvmx_pciercx_cfg074_s cn52xxp1;
- struct cvmx_pciercx_cfg074_s cn56xx;
- struct cvmx_pciercx_cfg074_s cn56xxp1;
- struct cvmx_pciercx_cfg074_s cn61xx;
- struct cvmx_pciercx_cfg074_s cn63xx;
- struct cvmx_pciercx_cfg074_s cn63xxp1;
- struct cvmx_pciercx_cfg074_s cn66xx;
- struct cvmx_pciercx_cfg074_s cn68xx;
- struct cvmx_pciercx_cfg074_s cn68xxp1;
- struct cvmx_pciercx_cfg074_s cnf71xx;
};
union cvmx_pciercx_cfg075 {
uint32_t u32;
struct cvmx_pciercx_cfg075_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_3_31:29;
- uint32_t fere:1;
- uint32_t nfere:1;
- uint32_t cere:1;
-#else
- uint32_t cere:1;
- uint32_t nfere:1;
- uint32_t fere:1;
- uint32_t reserved_3_31:29;
-#endif
- } s;
- struct cvmx_pciercx_cfg075_s cn52xx;
- struct cvmx_pciercx_cfg075_s cn52xxp1;
- struct cvmx_pciercx_cfg075_s cn56xx;
- struct cvmx_pciercx_cfg075_s cn56xxp1;
- struct cvmx_pciercx_cfg075_s cn61xx;
- struct cvmx_pciercx_cfg075_s cn63xx;
- struct cvmx_pciercx_cfg075_s cn63xxp1;
- struct cvmx_pciercx_cfg075_s cn66xx;
- struct cvmx_pciercx_cfg075_s cn68xx;
- struct cvmx_pciercx_cfg075_s cn68xxp1;
- struct cvmx_pciercx_cfg075_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg076 {
- uint32_t u32;
- struct cvmx_pciercx_cfg076_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t aeimn:5;
- uint32_t reserved_7_26:20;
- uint32_t femr:1;
- uint32_t nfemr:1;
- uint32_t fuf:1;
- uint32_t multi_efnfr:1;
- uint32_t efnfr:1;
- uint32_t multi_ecr:1;
- uint32_t ecr:1;
-#else
- uint32_t ecr:1;
- uint32_t multi_ecr:1;
- uint32_t efnfr:1;
- uint32_t multi_efnfr:1;
- uint32_t fuf:1;
- uint32_t nfemr:1;
- uint32_t femr:1;
- uint32_t reserved_7_26:20;
- uint32_t aeimn:5;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_3_31:29,
+ __BITFIELD_FIELD(uint32_t fere:1,
+ __BITFIELD_FIELD(uint32_t nfere:1,
+ __BITFIELD_FIELD(uint32_t cere:1,
+ ;))))
} s;
- struct cvmx_pciercx_cfg076_s cn52xx;
- struct cvmx_pciercx_cfg076_s cn52xxp1;
- struct cvmx_pciercx_cfg076_s cn56xx;
- struct cvmx_pciercx_cfg076_s cn56xxp1;
- struct cvmx_pciercx_cfg076_s cn61xx;
- struct cvmx_pciercx_cfg076_s cn63xx;
- struct cvmx_pciercx_cfg076_s cn63xxp1;
- struct cvmx_pciercx_cfg076_s cn66xx;
- struct cvmx_pciercx_cfg076_s cn68xx;
- struct cvmx_pciercx_cfg076_s cn68xxp1;
- struct cvmx_pciercx_cfg076_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg077 {
- uint32_t u32;
- struct cvmx_pciercx_cfg077_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t efnfsi:16;
- uint32_t ecsi:16;
-#else
- uint32_t ecsi:16;
- uint32_t efnfsi:16;
-#endif
- } s;
- struct cvmx_pciercx_cfg077_s cn52xx;
- struct cvmx_pciercx_cfg077_s cn52xxp1;
- struct cvmx_pciercx_cfg077_s cn56xx;
- struct cvmx_pciercx_cfg077_s cn56xxp1;
- struct cvmx_pciercx_cfg077_s cn61xx;
- struct cvmx_pciercx_cfg077_s cn63xx;
- struct cvmx_pciercx_cfg077_s cn63xxp1;
- struct cvmx_pciercx_cfg077_s cn66xx;
- struct cvmx_pciercx_cfg077_s cn68xx;
- struct cvmx_pciercx_cfg077_s cn68xxp1;
- struct cvmx_pciercx_cfg077_s cnf71xx;
};
union cvmx_pciercx_cfg448 {
uint32_t u32;
struct cvmx_pciercx_cfg448_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rtl:16;
- uint32_t rtltl:16;
-#else
- uint32_t rtltl:16;
- uint32_t rtl:16;
-#endif
- } s;
- struct cvmx_pciercx_cfg448_s cn52xx;
- struct cvmx_pciercx_cfg448_s cn52xxp1;
- struct cvmx_pciercx_cfg448_s cn56xx;
- struct cvmx_pciercx_cfg448_s cn56xxp1;
- struct cvmx_pciercx_cfg448_s cn61xx;
- struct cvmx_pciercx_cfg448_s cn63xx;
- struct cvmx_pciercx_cfg448_s cn63xxp1;
- struct cvmx_pciercx_cfg448_s cn66xx;
- struct cvmx_pciercx_cfg448_s cn68xx;
- struct cvmx_pciercx_cfg448_s cn68xxp1;
- struct cvmx_pciercx_cfg448_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg449 {
- uint32_t u32;
- struct cvmx_pciercx_cfg449_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t omr:32;
-#else
- uint32_t omr:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg449_s cn52xx;
- struct cvmx_pciercx_cfg449_s cn52xxp1;
- struct cvmx_pciercx_cfg449_s cn56xx;
- struct cvmx_pciercx_cfg449_s cn56xxp1;
- struct cvmx_pciercx_cfg449_s cn61xx;
- struct cvmx_pciercx_cfg449_s cn63xx;
- struct cvmx_pciercx_cfg449_s cn63xxp1;
- struct cvmx_pciercx_cfg449_s cn66xx;
- struct cvmx_pciercx_cfg449_s cn68xx;
- struct cvmx_pciercx_cfg449_s cn68xxp1;
- struct cvmx_pciercx_cfg449_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg450 {
- uint32_t u32;
- struct cvmx_pciercx_cfg450_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t lpec:8;
- uint32_t reserved_22_23:2;
- uint32_t link_state:6;
- uint32_t force_link:1;
- uint32_t reserved_8_14:7;
- uint32_t link_num:8;
-#else
- uint32_t link_num:8;
- uint32_t reserved_8_14:7;
- uint32_t force_link:1;
- uint32_t link_state:6;
- uint32_t reserved_22_23:2;
- uint32_t lpec:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg450_s cn52xx;
- struct cvmx_pciercx_cfg450_s cn52xxp1;
- struct cvmx_pciercx_cfg450_s cn56xx;
- struct cvmx_pciercx_cfg450_s cn56xxp1;
- struct cvmx_pciercx_cfg450_s cn61xx;
- struct cvmx_pciercx_cfg450_s cn63xx;
- struct cvmx_pciercx_cfg450_s cn63xxp1;
- struct cvmx_pciercx_cfg450_s cn66xx;
- struct cvmx_pciercx_cfg450_s cn68xx;
- struct cvmx_pciercx_cfg450_s cn68xxp1;
- struct cvmx_pciercx_cfg450_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg451 {
- uint32_t u32;
- struct cvmx_pciercx_cfg451_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_31_31:1;
- uint32_t easpml1:1;
- uint32_t l1el:3;
- uint32_t l0el:3;
- uint32_t n_fts_cc:8;
- uint32_t n_fts:8;
- uint32_t ack_freq:8;
-#else
- uint32_t ack_freq:8;
- uint32_t n_fts:8;
- uint32_t n_fts_cc:8;
- uint32_t l0el:3;
- uint32_t l1el:3;
- uint32_t easpml1:1;
- uint32_t reserved_31_31:1;
-#endif
+ __BITFIELD_FIELD(uint32_t rtl:16,
+ __BITFIELD_FIELD(uint32_t rtltl:16,
+ ;))
} s;
- struct cvmx_pciercx_cfg451_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_30_31:2;
- uint32_t l1el:3;
- uint32_t l0el:3;
- uint32_t n_fts_cc:8;
- uint32_t n_fts:8;
- uint32_t ack_freq:8;
-#else
- uint32_t ack_freq:8;
- uint32_t n_fts:8;
- uint32_t n_fts_cc:8;
- uint32_t l0el:3;
- uint32_t l1el:3;
- uint32_t reserved_30_31:2;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg451_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg451_cn52xx cn56xx;
- struct cvmx_pciercx_cfg451_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg451_s cn61xx;
- struct cvmx_pciercx_cfg451_cn52xx cn63xx;
- struct cvmx_pciercx_cfg451_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg451_s cn66xx;
- struct cvmx_pciercx_cfg451_s cn68xx;
- struct cvmx_pciercx_cfg451_s cn68xxp1;
- struct cvmx_pciercx_cfg451_s cnf71xx;
};
union cvmx_pciercx_cfg452 {
uint32_t u32;
struct cvmx_pciercx_cfg452_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_26_31:6;
- uint32_t eccrc:1;
- uint32_t reserved_22_24:3;
- uint32_t lme:6;
- uint32_t reserved_8_15:8;
- uint32_t flm:1;
- uint32_t reserved_6_6:1;
- uint32_t dllle:1;
- uint32_t reserved_4_4:1;
- uint32_t ra:1;
- uint32_t le:1;
- uint32_t sd:1;
- uint32_t omr:1;
-#else
- uint32_t omr:1;
- uint32_t sd:1;
- uint32_t le:1;
- uint32_t ra:1;
- uint32_t reserved_4_4:1;
- uint32_t dllle:1;
- uint32_t reserved_6_6:1;
- uint32_t flm:1;
- uint32_t reserved_8_15:8;
- uint32_t lme:6;
- uint32_t reserved_22_24:3;
- uint32_t eccrc:1;
- uint32_t reserved_26_31:6;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_26_31:6,
+ __BITFIELD_FIELD(uint32_t eccrc:1,
+ __BITFIELD_FIELD(uint32_t reserved_22_24:3,
+ __BITFIELD_FIELD(uint32_t lme:6,
+ __BITFIELD_FIELD(uint32_t reserved_12_15:4,
+ __BITFIELD_FIELD(uint32_t link_rate:4,
+ __BITFIELD_FIELD(uint32_t flm:1,
+ __BITFIELD_FIELD(uint32_t reserved_6_6:1,
+ __BITFIELD_FIELD(uint32_t dllle:1,
+ __BITFIELD_FIELD(uint32_t reserved_4_4:1,
+ __BITFIELD_FIELD(uint32_t ra:1,
+ __BITFIELD_FIELD(uint32_t le:1,
+ __BITFIELD_FIELD(uint32_t sd:1,
+ __BITFIELD_FIELD(uint32_t omr:1,
+ ;))))))))))))))
} s;
- struct cvmx_pciercx_cfg452_s cn52xx;
- struct cvmx_pciercx_cfg452_s cn52xxp1;
- struct cvmx_pciercx_cfg452_s cn56xx;
- struct cvmx_pciercx_cfg452_s cn56xxp1;
- struct cvmx_pciercx_cfg452_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_22_31:10;
- uint32_t lme:6;
- uint32_t reserved_8_15:8;
- uint32_t flm:1;
- uint32_t reserved_6_6:1;
- uint32_t dllle:1;
- uint32_t reserved_4_4:1;
- uint32_t ra:1;
- uint32_t le:1;
- uint32_t sd:1;
- uint32_t omr:1;
-#else
- uint32_t omr:1;
- uint32_t sd:1;
- uint32_t le:1;
- uint32_t ra:1;
- uint32_t reserved_4_4:1;
- uint32_t dllle:1;
- uint32_t reserved_6_6:1;
- uint32_t flm:1;
- uint32_t reserved_8_15:8;
- uint32_t lme:6;
- uint32_t reserved_22_31:10;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg452_s cn63xx;
- struct cvmx_pciercx_cfg452_s cn63xxp1;
- struct cvmx_pciercx_cfg452_cn61xx cn66xx;
- struct cvmx_pciercx_cfg452_cn61xx cn68xx;
- struct cvmx_pciercx_cfg452_cn61xx cn68xxp1;
- struct cvmx_pciercx_cfg452_cn61xx cnf71xx;
-};
-
-union cvmx_pciercx_cfg453 {
- uint32_t u32;
- struct cvmx_pciercx_cfg453_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dlld:1;
- uint32_t reserved_26_30:5;
- uint32_t ack_nak:1;
- uint32_t fcd:1;
- uint32_t ilst:24;
-#else
- uint32_t ilst:24;
- uint32_t fcd:1;
- uint32_t ack_nak:1;
- uint32_t reserved_26_30:5;
- uint32_t dlld:1;
-#endif
- } s;
- struct cvmx_pciercx_cfg453_s cn52xx;
- struct cvmx_pciercx_cfg453_s cn52xxp1;
- struct cvmx_pciercx_cfg453_s cn56xx;
- struct cvmx_pciercx_cfg453_s cn56xxp1;
- struct cvmx_pciercx_cfg453_s cn61xx;
- struct cvmx_pciercx_cfg453_s cn63xx;
- struct cvmx_pciercx_cfg453_s cn63xxp1;
- struct cvmx_pciercx_cfg453_s cn66xx;
- struct cvmx_pciercx_cfg453_s cn68xx;
- struct cvmx_pciercx_cfg453_s cn68xxp1;
- struct cvmx_pciercx_cfg453_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg454 {
- uint32_t u32;
- struct cvmx_pciercx_cfg454_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t cx_nfunc:3;
- uint32_t tmfcwt:5;
- uint32_t tmanlt:5;
- uint32_t tmrt:5;
- uint32_t reserved_11_13:3;
- uint32_t nskps:3;
- uint32_t reserved_0_7:8;
-#else
- uint32_t reserved_0_7:8;
- uint32_t nskps:3;
- uint32_t reserved_11_13:3;
- uint32_t tmrt:5;
- uint32_t tmanlt:5;
- uint32_t tmfcwt:5;
- uint32_t cx_nfunc:3;
-#endif
- } s;
- struct cvmx_pciercx_cfg454_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_29_31:3;
- uint32_t tmfcwt:5;
- uint32_t tmanlt:5;
- uint32_t tmrt:5;
- uint32_t reserved_11_13:3;
- uint32_t nskps:3;
- uint32_t reserved_4_7:4;
- uint32_t ntss:4;
-#else
- uint32_t ntss:4;
- uint32_t reserved_4_7:4;
- uint32_t nskps:3;
- uint32_t reserved_11_13:3;
- uint32_t tmrt:5;
- uint32_t tmanlt:5;
- uint32_t tmfcwt:5;
- uint32_t reserved_29_31:3;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg454_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg454_cn52xx cn56xx;
- struct cvmx_pciercx_cfg454_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg454_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t cx_nfunc:3;
- uint32_t tmfcwt:5;
- uint32_t tmanlt:5;
- uint32_t tmrt:5;
- uint32_t reserved_8_13:6;
- uint32_t mfuncn:8;
-#else
- uint32_t mfuncn:8;
- uint32_t reserved_8_13:6;
- uint32_t tmrt:5;
- uint32_t tmanlt:5;
- uint32_t tmfcwt:5;
- uint32_t cx_nfunc:3;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg454_cn52xx cn63xx;
- struct cvmx_pciercx_cfg454_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg454_cn61xx cn66xx;
- struct cvmx_pciercx_cfg454_cn61xx cn68xx;
- struct cvmx_pciercx_cfg454_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg454_cn61xx cnf71xx;
};
union cvmx_pciercx_cfg455 {
uint32_t u32;
struct cvmx_pciercx_cfg455_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t m_cfg0_filt:1;
- uint32_t m_io_filt:1;
- uint32_t msg_ctrl:1;
- uint32_t m_cpl_ecrc_filt:1;
- uint32_t m_ecrc_filt:1;
- uint32_t m_cpl_len_err:1;
- uint32_t m_cpl_attr_err:1;
- uint32_t m_cpl_tc_err:1;
- uint32_t m_cpl_fun_err:1;
- uint32_t m_cpl_rid_err:1;
- uint32_t m_cpl_tag_err:1;
- uint32_t m_lk_filt:1;
- uint32_t m_cfg1_filt:1;
- uint32_t m_bar_match:1;
- uint32_t m_pois_filt:1;
- uint32_t m_fun:1;
- uint32_t dfcwt:1;
- uint32_t reserved_11_14:4;
- uint32_t skpiv:11;
-#else
- uint32_t skpiv:11;
- uint32_t reserved_11_14:4;
- uint32_t dfcwt:1;
- uint32_t m_fun:1;
- uint32_t m_pois_filt:1;
- uint32_t m_bar_match:1;
- uint32_t m_cfg1_filt:1;
- uint32_t m_lk_filt:1;
- uint32_t m_cpl_tag_err:1;
- uint32_t m_cpl_rid_err:1;
- uint32_t m_cpl_fun_err:1;
- uint32_t m_cpl_tc_err:1;
- uint32_t m_cpl_attr_err:1;
- uint32_t m_cpl_len_err:1;
- uint32_t m_ecrc_filt:1;
- uint32_t m_cpl_ecrc_filt:1;
- uint32_t msg_ctrl:1;
- uint32_t m_io_filt:1;
- uint32_t m_cfg0_filt:1;
-#endif
- } s;
- struct cvmx_pciercx_cfg455_s cn52xx;
- struct cvmx_pciercx_cfg455_s cn52xxp1;
- struct cvmx_pciercx_cfg455_s cn56xx;
- struct cvmx_pciercx_cfg455_s cn56xxp1;
- struct cvmx_pciercx_cfg455_s cn61xx;
- struct cvmx_pciercx_cfg455_s cn63xx;
- struct cvmx_pciercx_cfg455_s cn63xxp1;
- struct cvmx_pciercx_cfg455_s cn66xx;
- struct cvmx_pciercx_cfg455_s cn68xx;
- struct cvmx_pciercx_cfg455_s cn68xxp1;
- struct cvmx_pciercx_cfg455_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg456 {
- uint32_t u32;
- struct cvmx_pciercx_cfg456_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_4_31:28;
- uint32_t m_handle_flush:1;
- uint32_t m_dabort_4ucpl:1;
- uint32_t m_vend1_drp:1;
- uint32_t m_vend0_drp:1;
-#else
- uint32_t m_vend0_drp:1;
- uint32_t m_vend1_drp:1;
- uint32_t m_dabort_4ucpl:1;
- uint32_t m_handle_flush:1;
- uint32_t reserved_4_31:28;
-#endif
- } s;
- struct cvmx_pciercx_cfg456_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_2_31:30;
- uint32_t m_vend1_drp:1;
- uint32_t m_vend0_drp:1;
-#else
- uint32_t m_vend0_drp:1;
- uint32_t m_vend1_drp:1;
- uint32_t reserved_2_31:30;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg456_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg456_cn52xx cn56xx;
- struct cvmx_pciercx_cfg456_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg456_s cn61xx;
- struct cvmx_pciercx_cfg456_cn52xx cn63xx;
- struct cvmx_pciercx_cfg456_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg456_s cn66xx;
- struct cvmx_pciercx_cfg456_s cn68xx;
- struct cvmx_pciercx_cfg456_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg456_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg458 {
- uint32_t u32;
- struct cvmx_pciercx_cfg458_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dbg_info_l32:32;
-#else
- uint32_t dbg_info_l32:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg458_s cn52xx;
- struct cvmx_pciercx_cfg458_s cn52xxp1;
- struct cvmx_pciercx_cfg458_s cn56xx;
- struct cvmx_pciercx_cfg458_s cn56xxp1;
- struct cvmx_pciercx_cfg458_s cn61xx;
- struct cvmx_pciercx_cfg458_s cn63xx;
- struct cvmx_pciercx_cfg458_s cn63xxp1;
- struct cvmx_pciercx_cfg458_s cn66xx;
- struct cvmx_pciercx_cfg458_s cn68xx;
- struct cvmx_pciercx_cfg458_s cn68xxp1;
- struct cvmx_pciercx_cfg458_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg459 {
- uint32_t u32;
- struct cvmx_pciercx_cfg459_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dbg_info_u32:32;
-#else
- uint32_t dbg_info_u32:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg459_s cn52xx;
- struct cvmx_pciercx_cfg459_s cn52xxp1;
- struct cvmx_pciercx_cfg459_s cn56xx;
- struct cvmx_pciercx_cfg459_s cn56xxp1;
- struct cvmx_pciercx_cfg459_s cn61xx;
- struct cvmx_pciercx_cfg459_s cn63xx;
- struct cvmx_pciercx_cfg459_s cn63xxp1;
- struct cvmx_pciercx_cfg459_s cn66xx;
- struct cvmx_pciercx_cfg459_s cn68xx;
- struct cvmx_pciercx_cfg459_s cn68xxp1;
- struct cvmx_pciercx_cfg459_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg460 {
- uint32_t u32;
- struct cvmx_pciercx_cfg460_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_20_31:12;
- uint32_t tphfcc:8;
- uint32_t tpdfcc:12;
-#else
- uint32_t tpdfcc:12;
- uint32_t tphfcc:8;
- uint32_t reserved_20_31:12;
-#endif
- } s;
- struct cvmx_pciercx_cfg460_s cn52xx;
- struct cvmx_pciercx_cfg460_s cn52xxp1;
- struct cvmx_pciercx_cfg460_s cn56xx;
- struct cvmx_pciercx_cfg460_s cn56xxp1;
- struct cvmx_pciercx_cfg460_s cn61xx;
- struct cvmx_pciercx_cfg460_s cn63xx;
- struct cvmx_pciercx_cfg460_s cn63xxp1;
- struct cvmx_pciercx_cfg460_s cn66xx;
- struct cvmx_pciercx_cfg460_s cn68xx;
- struct cvmx_pciercx_cfg460_s cn68xxp1;
- struct cvmx_pciercx_cfg460_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg461 {
- uint32_t u32;
- struct cvmx_pciercx_cfg461_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_20_31:12;
- uint32_t tchfcc:8;
- uint32_t tcdfcc:12;
-#else
- uint32_t tcdfcc:12;
- uint32_t tchfcc:8;
- uint32_t reserved_20_31:12;
-#endif
- } s;
- struct cvmx_pciercx_cfg461_s cn52xx;
- struct cvmx_pciercx_cfg461_s cn52xxp1;
- struct cvmx_pciercx_cfg461_s cn56xx;
- struct cvmx_pciercx_cfg461_s cn56xxp1;
- struct cvmx_pciercx_cfg461_s cn61xx;
- struct cvmx_pciercx_cfg461_s cn63xx;
- struct cvmx_pciercx_cfg461_s cn63xxp1;
- struct cvmx_pciercx_cfg461_s cn66xx;
- struct cvmx_pciercx_cfg461_s cn68xx;
- struct cvmx_pciercx_cfg461_s cn68xxp1;
- struct cvmx_pciercx_cfg461_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg462 {
- uint32_t u32;
- struct cvmx_pciercx_cfg462_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_20_31:12;
- uint32_t tchfcc:8;
- uint32_t tcdfcc:12;
-#else
- uint32_t tcdfcc:12;
- uint32_t tchfcc:8;
- uint32_t reserved_20_31:12;
-#endif
- } s;
- struct cvmx_pciercx_cfg462_s cn52xx;
- struct cvmx_pciercx_cfg462_s cn52xxp1;
- struct cvmx_pciercx_cfg462_s cn56xx;
- struct cvmx_pciercx_cfg462_s cn56xxp1;
- struct cvmx_pciercx_cfg462_s cn61xx;
- struct cvmx_pciercx_cfg462_s cn63xx;
- struct cvmx_pciercx_cfg462_s cn63xxp1;
- struct cvmx_pciercx_cfg462_s cn66xx;
- struct cvmx_pciercx_cfg462_s cn68xx;
- struct cvmx_pciercx_cfg462_s cn68xxp1;
- struct cvmx_pciercx_cfg462_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg463 {
- uint32_t u32;
- struct cvmx_pciercx_cfg463_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_3_31:29;
- uint32_t rqne:1;
- uint32_t trbne:1;
- uint32_t rtlpfccnr:1;
-#else
- uint32_t rtlpfccnr:1;
- uint32_t trbne:1;
- uint32_t rqne:1;
- uint32_t reserved_3_31:29;
-#endif
- } s;
- struct cvmx_pciercx_cfg463_s cn52xx;
- struct cvmx_pciercx_cfg463_s cn52xxp1;
- struct cvmx_pciercx_cfg463_s cn56xx;
- struct cvmx_pciercx_cfg463_s cn56xxp1;
- struct cvmx_pciercx_cfg463_s cn61xx;
- struct cvmx_pciercx_cfg463_s cn63xx;
- struct cvmx_pciercx_cfg463_s cn63xxp1;
- struct cvmx_pciercx_cfg463_s cn66xx;
- struct cvmx_pciercx_cfg463_s cn68xx;
- struct cvmx_pciercx_cfg463_s cn68xxp1;
- struct cvmx_pciercx_cfg463_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg464 {
- uint32_t u32;
- struct cvmx_pciercx_cfg464_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t wrr_vc3:8;
- uint32_t wrr_vc2:8;
- uint32_t wrr_vc1:8;
- uint32_t wrr_vc0:8;
-#else
- uint32_t wrr_vc0:8;
- uint32_t wrr_vc1:8;
- uint32_t wrr_vc2:8;
- uint32_t wrr_vc3:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg464_s cn52xx;
- struct cvmx_pciercx_cfg464_s cn52xxp1;
- struct cvmx_pciercx_cfg464_s cn56xx;
- struct cvmx_pciercx_cfg464_s cn56xxp1;
- struct cvmx_pciercx_cfg464_s cn61xx;
- struct cvmx_pciercx_cfg464_s cn63xx;
- struct cvmx_pciercx_cfg464_s cn63xxp1;
- struct cvmx_pciercx_cfg464_s cn66xx;
- struct cvmx_pciercx_cfg464_s cn68xx;
- struct cvmx_pciercx_cfg464_s cn68xxp1;
- struct cvmx_pciercx_cfg464_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg465 {
- uint32_t u32;
- struct cvmx_pciercx_cfg465_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t wrr_vc7:8;
- uint32_t wrr_vc6:8;
- uint32_t wrr_vc5:8;
- uint32_t wrr_vc4:8;
-#else
- uint32_t wrr_vc4:8;
- uint32_t wrr_vc5:8;
- uint32_t wrr_vc6:8;
- uint32_t wrr_vc7:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg465_s cn52xx;
- struct cvmx_pciercx_cfg465_s cn52xxp1;
- struct cvmx_pciercx_cfg465_s cn56xx;
- struct cvmx_pciercx_cfg465_s cn56xxp1;
- struct cvmx_pciercx_cfg465_s cn61xx;
- struct cvmx_pciercx_cfg465_s cn63xx;
- struct cvmx_pciercx_cfg465_s cn63xxp1;
- struct cvmx_pciercx_cfg465_s cn66xx;
- struct cvmx_pciercx_cfg465_s cn68xx;
- struct cvmx_pciercx_cfg465_s cn68xxp1;
- struct cvmx_pciercx_cfg465_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg466 {
- uint32_t u32;
- struct cvmx_pciercx_cfg466_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rx_queue_order:1;
- uint32_t type_ordering:1;
- uint32_t reserved_24_29:6;
- uint32_t queue_mode:3;
- uint32_t reserved_20_20:1;
- uint32_t header_credits:8;
- uint32_t data_credits:12;
-#else
- uint32_t data_credits:12;
- uint32_t header_credits:8;
- uint32_t reserved_20_20:1;
- uint32_t queue_mode:3;
- uint32_t reserved_24_29:6;
- uint32_t type_ordering:1;
- uint32_t rx_queue_order:1;
-#endif
- } s;
- struct cvmx_pciercx_cfg466_s cn52xx;
- struct cvmx_pciercx_cfg466_s cn52xxp1;
- struct cvmx_pciercx_cfg466_s cn56xx;
- struct cvmx_pciercx_cfg466_s cn56xxp1;
- struct cvmx_pciercx_cfg466_s cn61xx;
- struct cvmx_pciercx_cfg466_s cn63xx;
- struct cvmx_pciercx_cfg466_s cn63xxp1;
- struct cvmx_pciercx_cfg466_s cn66xx;
- struct cvmx_pciercx_cfg466_s cn68xx;
- struct cvmx_pciercx_cfg466_s cn68xxp1;
- struct cvmx_pciercx_cfg466_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg467 {
- uint32_t u32;
- struct cvmx_pciercx_cfg467_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_24_31:8;
- uint32_t queue_mode:3;
- uint32_t reserved_20_20:1;
- uint32_t header_credits:8;
- uint32_t data_credits:12;
-#else
- uint32_t data_credits:12;
- uint32_t header_credits:8;
- uint32_t reserved_20_20:1;
- uint32_t queue_mode:3;
- uint32_t reserved_24_31:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg467_s cn52xx;
- struct cvmx_pciercx_cfg467_s cn52xxp1;
- struct cvmx_pciercx_cfg467_s cn56xx;
- struct cvmx_pciercx_cfg467_s cn56xxp1;
- struct cvmx_pciercx_cfg467_s cn61xx;
- struct cvmx_pciercx_cfg467_s cn63xx;
- struct cvmx_pciercx_cfg467_s cn63xxp1;
- struct cvmx_pciercx_cfg467_s cn66xx;
- struct cvmx_pciercx_cfg467_s cn68xx;
- struct cvmx_pciercx_cfg467_s cn68xxp1;
- struct cvmx_pciercx_cfg467_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg468 {
- uint32_t u32;
- struct cvmx_pciercx_cfg468_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_24_31:8;
- uint32_t queue_mode:3;
- uint32_t reserved_20_20:1;
- uint32_t header_credits:8;
- uint32_t data_credits:12;
-#else
- uint32_t data_credits:12;
- uint32_t header_credits:8;
- uint32_t reserved_20_20:1;
- uint32_t queue_mode:3;
- uint32_t reserved_24_31:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg468_s cn52xx;
- struct cvmx_pciercx_cfg468_s cn52xxp1;
- struct cvmx_pciercx_cfg468_s cn56xx;
- struct cvmx_pciercx_cfg468_s cn56xxp1;
- struct cvmx_pciercx_cfg468_s cn61xx;
- struct cvmx_pciercx_cfg468_s cn63xx;
- struct cvmx_pciercx_cfg468_s cn63xxp1;
- struct cvmx_pciercx_cfg468_s cn66xx;
- struct cvmx_pciercx_cfg468_s cn68xx;
- struct cvmx_pciercx_cfg468_s cn68xxp1;
- struct cvmx_pciercx_cfg468_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg490 {
- uint32_t u32;
- struct cvmx_pciercx_cfg490_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_26_31:6;
- uint32_t header_depth:10;
- uint32_t reserved_14_15:2;
- uint32_t data_depth:14;
-#else
- uint32_t data_depth:14;
- uint32_t reserved_14_15:2;
- uint32_t header_depth:10;
- uint32_t reserved_26_31:6;
-#endif
+ __BITFIELD_FIELD(uint32_t m_cfg0_filt:1,
+ __BITFIELD_FIELD(uint32_t m_io_filt:1,
+ __BITFIELD_FIELD(uint32_t msg_ctrl:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_ecrc_filt:1,
+ __BITFIELD_FIELD(uint32_t m_ecrc_filt:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_len_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_attr_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_tc_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_fun_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_rid_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_tag_err:1,
+ __BITFIELD_FIELD(uint32_t m_lk_filt:1,
+ __BITFIELD_FIELD(uint32_t m_cfg1_filt:1,
+ __BITFIELD_FIELD(uint32_t m_bar_match:1,
+ __BITFIELD_FIELD(uint32_t m_pois_filt:1,
+ __BITFIELD_FIELD(uint32_t m_fun:1,
+ __BITFIELD_FIELD(uint32_t dfcwt:1,
+ __BITFIELD_FIELD(uint32_t reserved_11_14:4,
+ __BITFIELD_FIELD(uint32_t skpiv:11,
+ ;)))))))))))))))))))
} s;
- struct cvmx_pciercx_cfg490_s cn52xx;
- struct cvmx_pciercx_cfg490_s cn52xxp1;
- struct cvmx_pciercx_cfg490_s cn56xx;
- struct cvmx_pciercx_cfg490_s cn56xxp1;
- struct cvmx_pciercx_cfg490_s cn61xx;
- struct cvmx_pciercx_cfg490_s cn63xx;
- struct cvmx_pciercx_cfg490_s cn63xxp1;
- struct cvmx_pciercx_cfg490_s cn66xx;
- struct cvmx_pciercx_cfg490_s cn68xx;
- struct cvmx_pciercx_cfg490_s cn68xxp1;
- struct cvmx_pciercx_cfg490_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg491 {
- uint32_t u32;
- struct cvmx_pciercx_cfg491_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_26_31:6;
- uint32_t header_depth:10;
- uint32_t reserved_14_15:2;
- uint32_t data_depth:14;
-#else
- uint32_t data_depth:14;
- uint32_t reserved_14_15:2;
- uint32_t header_depth:10;
- uint32_t reserved_26_31:6;
-#endif
- } s;
- struct cvmx_pciercx_cfg491_s cn52xx;
- struct cvmx_pciercx_cfg491_s cn52xxp1;
- struct cvmx_pciercx_cfg491_s cn56xx;
- struct cvmx_pciercx_cfg491_s cn56xxp1;
- struct cvmx_pciercx_cfg491_s cn61xx;
- struct cvmx_pciercx_cfg491_s cn63xx;
- struct cvmx_pciercx_cfg491_s cn63xxp1;
- struct cvmx_pciercx_cfg491_s cn66xx;
- struct cvmx_pciercx_cfg491_s cn68xx;
- struct cvmx_pciercx_cfg491_s cn68xxp1;
- struct cvmx_pciercx_cfg491_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg492 {
- uint32_t u32;
- struct cvmx_pciercx_cfg492_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_26_31:6;
- uint32_t header_depth:10;
- uint32_t reserved_14_15:2;
- uint32_t data_depth:14;
-#else
- uint32_t data_depth:14;
- uint32_t reserved_14_15:2;
- uint32_t header_depth:10;
- uint32_t reserved_26_31:6;
-#endif
- } s;
- struct cvmx_pciercx_cfg492_s cn52xx;
- struct cvmx_pciercx_cfg492_s cn52xxp1;
- struct cvmx_pciercx_cfg492_s cn56xx;
- struct cvmx_pciercx_cfg492_s cn56xxp1;
- struct cvmx_pciercx_cfg492_s cn61xx;
- struct cvmx_pciercx_cfg492_s cn63xx;
- struct cvmx_pciercx_cfg492_s cn63xxp1;
- struct cvmx_pciercx_cfg492_s cn66xx;
- struct cvmx_pciercx_cfg492_s cn68xx;
- struct cvmx_pciercx_cfg492_s cn68xxp1;
- struct cvmx_pciercx_cfg492_s cnf71xx;
};
union cvmx_pciercx_cfg515 {
uint32_t u32;
struct cvmx_pciercx_cfg515_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_21_31:11;
- uint32_t s_d_e:1;
- uint32_t ctcrb:1;
- uint32_t cpyts:1;
- uint32_t dsc:1;
- uint32_t le:9;
- uint32_t n_fts:8;
-#else
- uint32_t n_fts:8;
- uint32_t le:9;
- uint32_t dsc:1;
- uint32_t cpyts:1;
- uint32_t ctcrb:1;
- uint32_t s_d_e:1;
- uint32_t reserved_21_31:11;
-#endif
- } s;
- struct cvmx_pciercx_cfg515_s cn61xx;
- struct cvmx_pciercx_cfg515_s cn63xx;
- struct cvmx_pciercx_cfg515_s cn63xxp1;
- struct cvmx_pciercx_cfg515_s cn66xx;
- struct cvmx_pciercx_cfg515_s cn68xx;
- struct cvmx_pciercx_cfg515_s cn68xxp1;
- struct cvmx_pciercx_cfg515_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg516 {
- uint32_t u32;
- struct cvmx_pciercx_cfg516_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t phy_stat:32;
-#else
- uint32_t phy_stat:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg516_s cn52xx;
- struct cvmx_pciercx_cfg516_s cn52xxp1;
- struct cvmx_pciercx_cfg516_s cn56xx;
- struct cvmx_pciercx_cfg516_s cn56xxp1;
- struct cvmx_pciercx_cfg516_s cn61xx;
- struct cvmx_pciercx_cfg516_s cn63xx;
- struct cvmx_pciercx_cfg516_s cn63xxp1;
- struct cvmx_pciercx_cfg516_s cn66xx;
- struct cvmx_pciercx_cfg516_s cn68xx;
- struct cvmx_pciercx_cfg516_s cn68xxp1;
- struct cvmx_pciercx_cfg516_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg517 {
- uint32_t u32;
- struct cvmx_pciercx_cfg517_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t phy_ctrl:32;
-#else
- uint32_t phy_ctrl:32;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_21_31:11,
+ __BITFIELD_FIELD(uint32_t s_d_e:1,
+ __BITFIELD_FIELD(uint32_t ctcrb:1,
+ __BITFIELD_FIELD(uint32_t cpyts:1,
+ __BITFIELD_FIELD(uint32_t dsc:1,
+ __BITFIELD_FIELD(uint32_t le:9,
+ __BITFIELD_FIELD(uint32_t n_fts:8,
+ ;)))))))
} s;
- struct cvmx_pciercx_cfg517_s cn52xx;
- struct cvmx_pciercx_cfg517_s cn52xxp1;
- struct cvmx_pciercx_cfg517_s cn56xx;
- struct cvmx_pciercx_cfg517_s cn56xxp1;
- struct cvmx_pciercx_cfg517_s cn61xx;
- struct cvmx_pciercx_cfg517_s cn63xx;
- struct cvmx_pciercx_cfg517_s cn63xxp1;
- struct cvmx_pciercx_cfg517_s cn66xx;
- struct cvmx_pciercx_cfg517_s cn68xx;
- struct cvmx_pciercx_cfg517_s cn68xxp1;
- struct cvmx_pciercx_cfg517_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-sli-defs.h b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
index e697c2f52a62..52cf96ea43e5 100644
--- a/arch/mips/include/asm/octeon/cvmx-sli-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,3494 +28,101 @@
#ifndef __CVMX_SLI_DEFS_H__
#define __CVMX_SLI_DEFS_H__
-#define CVMX_SLI_BIST_STATUS (0x0000000000000580ull)
-#define CVMX_SLI_CTL_PORTX(offset) (0x0000000000000050ull + ((offset) & 3) * 16)
-#define CVMX_SLI_CTL_STATUS (0x0000000000000570ull)
-#define CVMX_SLI_DATA_OUT_CNT (0x00000000000005F0ull)
-#define CVMX_SLI_DBG_DATA (0x0000000000000310ull)
-#define CVMX_SLI_DBG_SELECT (0x0000000000000300ull)
-#define CVMX_SLI_DMAX_CNT(offset) (0x0000000000000400ull + ((offset) & 1) * 16)
-#define CVMX_SLI_DMAX_INT_LEVEL(offset) (0x00000000000003E0ull + ((offset) & 1) * 16)
-#define CVMX_SLI_DMAX_TIM(offset) (0x0000000000000420ull + ((offset) & 1) * 16)
-#define CVMX_SLI_INT_ENB_CIU (0x0000000000003CD0ull)
-#define CVMX_SLI_INT_ENB_PORTX(offset) (0x0000000000000340ull + ((offset) & 1) * 16)
-#define CVMX_SLI_INT_SUM (0x0000000000000330ull)
-#define CVMX_SLI_LAST_WIN_RDATA0 (0x0000000000000600ull)
-#define CVMX_SLI_LAST_WIN_RDATA1 (0x0000000000000610ull)
-#define CVMX_SLI_LAST_WIN_RDATA2 (0x00000000000006C0ull)
-#define CVMX_SLI_LAST_WIN_RDATA3 (0x00000000000006D0ull)
-#define CVMX_SLI_MAC_CREDIT_CNT (0x0000000000003D70ull)
-#define CVMX_SLI_MAC_CREDIT_CNT2 (0x0000000000003E10ull)
-#define CVMX_SLI_MAC_NUMBER (0x0000000000003E00ull)
-#define CVMX_SLI_MEM_ACCESS_CTL (0x00000000000002F0ull)
-#define CVMX_SLI_MEM_ACCESS_SUBIDX(offset) (0x00000000000000E0ull + ((offset) & 31) * 16 - 16*12)
-#define CVMX_SLI_MSI_ENB0 (0x0000000000003C50ull)
-#define CVMX_SLI_MSI_ENB1 (0x0000000000003C60ull)
-#define CVMX_SLI_MSI_ENB2 (0x0000000000003C70ull)
-#define CVMX_SLI_MSI_ENB3 (0x0000000000003C80ull)
-#define CVMX_SLI_MSI_RCV0 (0x0000000000003C10ull)
-#define CVMX_SLI_MSI_RCV1 (0x0000000000003C20ull)
-#define CVMX_SLI_MSI_RCV2 (0x0000000000003C30ull)
-#define CVMX_SLI_MSI_RCV3 (0x0000000000003C40ull)
-#define CVMX_SLI_MSI_RD_MAP (0x0000000000003CA0ull)
-#define CVMX_SLI_MSI_W1C_ENB0 (0x0000000000003CF0ull)
-#define CVMX_SLI_MSI_W1C_ENB1 (0x0000000000003D00ull)
-#define CVMX_SLI_MSI_W1C_ENB2 (0x0000000000003D10ull)
-#define CVMX_SLI_MSI_W1C_ENB3 (0x0000000000003D20ull)
-#define CVMX_SLI_MSI_W1S_ENB0 (0x0000000000003D30ull)
-#define CVMX_SLI_MSI_W1S_ENB1 (0x0000000000003D40ull)
-#define CVMX_SLI_MSI_W1S_ENB2 (0x0000000000003D50ull)
-#define CVMX_SLI_MSI_W1S_ENB3 (0x0000000000003D60ull)
-#define CVMX_SLI_MSI_WR_MAP (0x0000000000003C90ull)
-#define CVMX_SLI_PCIE_MSI_RCV (0x0000000000003CB0ull)
-#define CVMX_SLI_PCIE_MSI_RCV_B1 (0x0000000000000650ull)
-#define CVMX_SLI_PCIE_MSI_RCV_B2 (0x0000000000000660ull)
-#define CVMX_SLI_PCIE_MSI_RCV_B3 (0x0000000000000670ull)
-#define CVMX_SLI_PKTX_CNTS(offset) (0x0000000000002400ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_INSTR_BADDR(offset) (0x0000000000002800ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_INSTR_BAOFF_DBELL(offset) (0x0000000000002C00ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_INSTR_FIFO_RSIZE(offset) (0x0000000000003000ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_INSTR_HEADER(offset) (0x0000000000003400ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_IN_BP(offset) (0x0000000000003800ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_OUT_SIZE(offset) (0x0000000000000C00ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_SLIST_BADDR(offset) (0x0000000000001400ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_SLIST_BAOFF_DBELL(offset) (0x0000000000001800ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_SLIST_FIFO_RSIZE(offset) (0x0000000000001C00ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKT_CNT_INT (0x0000000000001130ull)
-#define CVMX_SLI_PKT_CNT_INT_ENB (0x0000000000001150ull)
-#define CVMX_SLI_PKT_CTL (0x0000000000001220ull)
-#define CVMX_SLI_PKT_DATA_OUT_ES (0x00000000000010B0ull)
-#define CVMX_SLI_PKT_DATA_OUT_NS (0x00000000000010A0ull)
-#define CVMX_SLI_PKT_DATA_OUT_ROR (0x0000000000001090ull)
-#define CVMX_SLI_PKT_DPADDR (0x0000000000001080ull)
-#define CVMX_SLI_PKT_INPUT_CONTROL (0x0000000000001170ull)
-#define CVMX_SLI_PKT_INSTR_ENB (0x0000000000001000ull)
-#define CVMX_SLI_PKT_INSTR_RD_SIZE (0x00000000000011A0ull)
-#define CVMX_SLI_PKT_INSTR_SIZE (0x0000000000001020ull)
-#define CVMX_SLI_PKT_INT_LEVELS (0x0000000000001120ull)
-#define CVMX_SLI_PKT_IN_BP (0x0000000000001210ull)
-#define CVMX_SLI_PKT_IN_DONEX_CNTS(offset) (0x0000000000002000ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKT_IN_INSTR_COUNTS (0x0000000000001200ull)
-#define CVMX_SLI_PKT_IN_PCIE_PORT (0x00000000000011B0ull)
-#define CVMX_SLI_PKT_IPTR (0x0000000000001070ull)
-#define CVMX_SLI_PKT_OUTPUT_WMARK (0x0000000000001180ull)
-#define CVMX_SLI_PKT_OUT_BMODE (0x00000000000010D0ull)
-#define CVMX_SLI_PKT_OUT_BP_EN (0x0000000000001240ull)
-#define CVMX_SLI_PKT_OUT_ENB (0x0000000000001010ull)
-#define CVMX_SLI_PKT_PCIE_PORT (0x00000000000010E0ull)
-#define CVMX_SLI_PKT_PORT_IN_RST (0x00000000000011F0ull)
-#define CVMX_SLI_PKT_SLIST_ES (0x0000000000001050ull)
-#define CVMX_SLI_PKT_SLIST_NS (0x0000000000001040ull)
-#define CVMX_SLI_PKT_SLIST_ROR (0x0000000000001030ull)
-#define CVMX_SLI_PKT_TIME_INT (0x0000000000001140ull)
-#define CVMX_SLI_PKT_TIME_INT_ENB (0x0000000000001160ull)
-#define CVMX_SLI_PORTX_PKIND(offset) (0x0000000000000800ull + ((offset) & 31) * 16)
-#define CVMX_SLI_S2M_PORTX_CTL(offset) (0x0000000000003D80ull + ((offset) & 3) * 16)
-#define CVMX_SLI_SCRATCH_1 (0x00000000000003C0ull)
-#define CVMX_SLI_SCRATCH_2 (0x00000000000003D0ull)
-#define CVMX_SLI_STATE1 (0x0000000000000620ull)
-#define CVMX_SLI_STATE2 (0x0000000000000630ull)
-#define CVMX_SLI_STATE3 (0x0000000000000640ull)
-#define CVMX_SLI_TX_PIPE (0x0000000000001230ull)
-#define CVMX_SLI_WINDOW_CTL (0x00000000000002E0ull)
-#define CVMX_SLI_WIN_RD_ADDR (0x0000000000000010ull)
-#define CVMX_SLI_WIN_RD_DATA (0x0000000000000040ull)
-#define CVMX_SLI_WIN_WR_ADDR (0x0000000000000000ull)
-#define CVMX_SLI_WIN_WR_DATA (0x0000000000000020ull)
-#define CVMX_SLI_WIN_WR_MASK (0x0000000000000030ull)
+#include <uapi/asm/bitfield.h>
+
+#define CVMX_SLI_PCIE_MSI_RCV CVMX_SLI_PCIE_MSI_RCV_FUNC()
+static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_FUNC(void)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ return 0x0000000000003CB0ull;
+ case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+ return 0x0000000000003CB0ull;
+ default:
+ return 0x0000000000023CB0ull;
+ }
+}
-union cvmx_sli_bist_status {
- uint64_t u64;
- struct cvmx_sli_bist_status_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t ncb_req:1;
- uint64_t n2p0_c:1;
- uint64_t n2p0_o:1;
- uint64_t n2p1_c:1;
- uint64_t n2p1_o:1;
- uint64_t cpl_p0:1;
- uint64_t cpl_p1:1;
- uint64_t reserved_19_24:6;
- uint64_t p2n0_c0:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_p1:1;
- uint64_t reserved_6_8:3;
- uint64_t dsi1_1:1;
- uint64_t dsi1_0:1;
- uint64_t dsi0_1:1;
- uint64_t dsi0_0:1;
- uint64_t msi:1;
- uint64_t ncb_cmd:1;
-#else
- uint64_t ncb_cmd:1;
- uint64_t msi:1;
- uint64_t dsi0_0:1;
- uint64_t dsi0_1:1;
- uint64_t dsi1_0:1;
- uint64_t dsi1_1:1;
- uint64_t reserved_6_8:3;
- uint64_t p2n1_p1:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_c0:1;
- uint64_t reserved_19_24:6;
- uint64_t cpl_p1:1;
- uint64_t cpl_p0:1;
- uint64_t n2p1_o:1;
- uint64_t n2p1_c:1;
- uint64_t n2p0_o:1;
- uint64_t n2p0_c:1;
- uint64_t ncb_req:1;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_bist_status_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_31_63:33;
- uint64_t n2p0_c:1;
- uint64_t n2p0_o:1;
- uint64_t reserved_27_28:2;
- uint64_t cpl_p0:1;
- uint64_t cpl_p1:1;
- uint64_t reserved_19_24:6;
- uint64_t p2n0_c0:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_p1:1;
- uint64_t reserved_6_8:3;
- uint64_t dsi1_1:1;
- uint64_t dsi1_0:1;
- uint64_t dsi0_1:1;
- uint64_t dsi0_0:1;
- uint64_t msi:1;
- uint64_t ncb_cmd:1;
-#else
- uint64_t ncb_cmd:1;
- uint64_t msi:1;
- uint64_t dsi0_0:1;
- uint64_t dsi0_1:1;
- uint64_t dsi1_0:1;
- uint64_t dsi1_1:1;
- uint64_t reserved_6_8:3;
- uint64_t p2n1_p1:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_c0:1;
- uint64_t reserved_19_24:6;
- uint64_t cpl_p1:1;
- uint64_t cpl_p0:1;
- uint64_t reserved_27_28:2;
- uint64_t n2p0_o:1;
- uint64_t n2p0_c:1;
- uint64_t reserved_31_63:33;
-#endif
- } cn61xx;
- struct cvmx_sli_bist_status_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_31_63:33;
- uint64_t n2p0_c:1;
- uint64_t n2p0_o:1;
- uint64_t n2p1_c:1;
- uint64_t n2p1_o:1;
- uint64_t cpl_p0:1;
- uint64_t cpl_p1:1;
- uint64_t reserved_19_24:6;
- uint64_t p2n0_c0:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_p1:1;
- uint64_t reserved_6_8:3;
- uint64_t dsi1_1:1;
- uint64_t dsi1_0:1;
- uint64_t dsi0_1:1;
- uint64_t dsi0_0:1;
- uint64_t msi:1;
- uint64_t ncb_cmd:1;
-#else
- uint64_t ncb_cmd:1;
- uint64_t msi:1;
- uint64_t dsi0_0:1;
- uint64_t dsi0_1:1;
- uint64_t dsi1_0:1;
- uint64_t dsi1_1:1;
- uint64_t reserved_6_8:3;
- uint64_t p2n1_p1:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_c0:1;
- uint64_t reserved_19_24:6;
- uint64_t cpl_p1:1;
- uint64_t cpl_p0:1;
- uint64_t n2p1_o:1;
- uint64_t n2p1_c:1;
- uint64_t n2p0_o:1;
- uint64_t n2p0_c:1;
- uint64_t reserved_31_63:33;
-#endif
- } cn63xx;
- struct cvmx_sli_bist_status_cn63xx cn63xxp1;
- struct cvmx_sli_bist_status_cn61xx cn66xx;
- struct cvmx_sli_bist_status_s cn68xx;
- struct cvmx_sli_bist_status_s cn68xxp1;
- struct cvmx_sli_bist_status_cn61xx cnf71xx;
-};
union cvmx_sli_ctl_portx {
uint64_t u64;
struct cvmx_sli_ctl_portx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_22_63:42;
- uint64_t intd:1;
- uint64_t intc:1;
- uint64_t intb:1;
- uint64_t inta:1;
- uint64_t dis_port:1;
- uint64_t waitl_com:1;
- uint64_t intd_map:2;
- uint64_t intc_map:2;
- uint64_t intb_map:2;
- uint64_t inta_map:2;
- uint64_t ctlp_ro:1;
- uint64_t reserved_6_6:1;
- uint64_t ptlp_ro:1;
- uint64_t reserved_1_4:4;
- uint64_t wait_com:1;
-#else
- uint64_t wait_com:1;
- uint64_t reserved_1_4:4;
- uint64_t ptlp_ro:1;
- uint64_t reserved_6_6:1;
- uint64_t ctlp_ro:1;
- uint64_t inta_map:2;
- uint64_t intb_map:2;
- uint64_t intc_map:2;
- uint64_t intd_map:2;
- uint64_t waitl_com:1;
- uint64_t dis_port:1;
- uint64_t inta:1;
- uint64_t intb:1;
- uint64_t intc:1;
- uint64_t intd:1;
- uint64_t reserved_22_63:42;
-#endif
- } s;
- struct cvmx_sli_ctl_portx_s cn61xx;
- struct cvmx_sli_ctl_portx_s cn63xx;
- struct cvmx_sli_ctl_portx_s cn63xxp1;
- struct cvmx_sli_ctl_portx_s cn66xx;
- struct cvmx_sli_ctl_portx_s cn68xx;
- struct cvmx_sli_ctl_portx_s cn68xxp1;
- struct cvmx_sli_ctl_portx_s cnf71xx;
-};
-
-union cvmx_sli_ctl_status {
- uint64_t u64;
- struct cvmx_sli_ctl_status_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t p1_ntags:6;
- uint64_t p0_ntags:6;
- uint64_t chip_rev:8;
-#else
- uint64_t chip_rev:8;
- uint64_t p0_ntags:6;
- uint64_t p1_ntags:6;
- uint64_t reserved_20_63:44;
-#endif
- } s;
- struct cvmx_sli_ctl_status_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t p0_ntags:6;
- uint64_t chip_rev:8;
-#else
- uint64_t chip_rev:8;
- uint64_t p0_ntags:6;
- uint64_t reserved_14_63:50;
-#endif
- } cn61xx;
- struct cvmx_sli_ctl_status_s cn63xx;
- struct cvmx_sli_ctl_status_s cn63xxp1;
- struct cvmx_sli_ctl_status_cn61xx cn66xx;
- struct cvmx_sli_ctl_status_s cn68xx;
- struct cvmx_sli_ctl_status_s cn68xxp1;
- struct cvmx_sli_ctl_status_cn61xx cnf71xx;
-};
-
-union cvmx_sli_data_out_cnt {
- uint64_t u64;
- struct cvmx_sli_data_out_cnt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_44_63:20;
- uint64_t p1_ucnt:16;
- uint64_t p1_fcnt:6;
- uint64_t p0_ucnt:16;
- uint64_t p0_fcnt:6;
-#else
- uint64_t p0_fcnt:6;
- uint64_t p0_ucnt:16;
- uint64_t p1_fcnt:6;
- uint64_t p1_ucnt:16;
- uint64_t reserved_44_63:20;
-#endif
- } s;
- struct cvmx_sli_data_out_cnt_s cn61xx;
- struct cvmx_sli_data_out_cnt_s cn63xx;
- struct cvmx_sli_data_out_cnt_s cn63xxp1;
- struct cvmx_sli_data_out_cnt_s cn66xx;
- struct cvmx_sli_data_out_cnt_s cn68xx;
- struct cvmx_sli_data_out_cnt_s cn68xxp1;
- struct cvmx_sli_data_out_cnt_s cnf71xx;
-};
-
-union cvmx_sli_dbg_data {
- uint64_t u64;
- struct cvmx_sli_dbg_data_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t dsel_ext:1;
- uint64_t data:17;
-#else
- uint64_t data:17;
- uint64_t dsel_ext:1;
- uint64_t reserved_18_63:46;
-#endif
- } s;
- struct cvmx_sli_dbg_data_s cn61xx;
- struct cvmx_sli_dbg_data_s cn63xx;
- struct cvmx_sli_dbg_data_s cn63xxp1;
- struct cvmx_sli_dbg_data_s cn66xx;
- struct cvmx_sli_dbg_data_s cn68xx;
- struct cvmx_sli_dbg_data_s cn68xxp1;
- struct cvmx_sli_dbg_data_s cnf71xx;
-};
-
-union cvmx_sli_dbg_select {
- uint64_t u64;
- struct cvmx_sli_dbg_select_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_33_63:31;
- uint64_t adbg_sel:1;
- uint64_t dbg_sel:32;
-#else
- uint64_t dbg_sel:32;
- uint64_t adbg_sel:1;
- uint64_t reserved_33_63:31;
-#endif
- } s;
- struct cvmx_sli_dbg_select_s cn61xx;
- struct cvmx_sli_dbg_select_s cn63xx;
- struct cvmx_sli_dbg_select_s cn63xxp1;
- struct cvmx_sli_dbg_select_s cn66xx;
- struct cvmx_sli_dbg_select_s cn68xx;
- struct cvmx_sli_dbg_select_s cn68xxp1;
- struct cvmx_sli_dbg_select_s cnf71xx;
-};
-
-union cvmx_sli_dmax_cnt {
- uint64_t u64;
- struct cvmx_sli_dmax_cnt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_dmax_cnt_s cn61xx;
- struct cvmx_sli_dmax_cnt_s cn63xx;
- struct cvmx_sli_dmax_cnt_s cn63xxp1;
- struct cvmx_sli_dmax_cnt_s cn66xx;
- struct cvmx_sli_dmax_cnt_s cn68xx;
- struct cvmx_sli_dmax_cnt_s cn68xxp1;
- struct cvmx_sli_dmax_cnt_s cnf71xx;
-};
-
-union cvmx_sli_dmax_int_level {
- uint64_t u64;
- struct cvmx_sli_dmax_int_level_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t time:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t time:32;
-#endif
- } s;
- struct cvmx_sli_dmax_int_level_s cn61xx;
- struct cvmx_sli_dmax_int_level_s cn63xx;
- struct cvmx_sli_dmax_int_level_s cn63xxp1;
- struct cvmx_sli_dmax_int_level_s cn66xx;
- struct cvmx_sli_dmax_int_level_s cn68xx;
- struct cvmx_sli_dmax_int_level_s cn68xxp1;
- struct cvmx_sli_dmax_int_level_s cnf71xx;
-};
-
-union cvmx_sli_dmax_tim {
- uint64_t u64;
- struct cvmx_sli_dmax_tim_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t tim:32;
-#else
- uint64_t tim:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_dmax_tim_s cn61xx;
- struct cvmx_sli_dmax_tim_s cn63xx;
- struct cvmx_sli_dmax_tim_s cn63xxp1;
- struct cvmx_sli_dmax_tim_s cn66xx;
- struct cvmx_sli_dmax_tim_s cn68xx;
- struct cvmx_sli_dmax_tim_s cn68xxp1;
- struct cvmx_sli_dmax_tim_s cnf71xx;
-};
-
-union cvmx_sli_int_enb_ciu {
- uint64_t u64;
- struct cvmx_sli_int_enb_ciu_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t pipe_err:1;
- uint64_t ill_pad:1;
- uint64_t sprt3_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_28_31:4;
- uint64_t m3_un_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_up_b0:1;
- uint64_t reserved_18_19:2;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t reserved_18_19:2;
- uint64_t m2_up_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_un_wi:1;
- uint64_t reserved_28_31:4;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt3_err:1;
- uint64_t ill_pad:1;
- uint64_t pipe_err:1;
- uint64_t reserved_62_63:2;
-#endif
- } s;
- struct cvmx_sli_int_enb_ciu_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_61_63:3;
- uint64_t ill_pad:1;
- uint64_t sprt3_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_28_31:4;
- uint64_t m3_un_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_up_b0:1;
- uint64_t reserved_18_19:2;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t reserved_18_19:2;
- uint64_t m2_up_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_un_wi:1;
- uint64_t reserved_28_31:4;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt3_err:1;
- uint64_t ill_pad:1;
- uint64_t reserved_61_63:3;
-#endif
- } cn61xx;
- struct cvmx_sli_int_enb_ciu_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_61_63:3;
- uint64_t ill_pad:1;
- uint64_t reserved_58_59:2;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_18_31:14;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t reserved_18_31:14;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t reserved_58_59:2;
- uint64_t ill_pad:1;
- uint64_t reserved_61_63:3;
-#endif
- } cn63xx;
- struct cvmx_sli_int_enb_ciu_cn63xx cn63xxp1;
- struct cvmx_sli_int_enb_ciu_cn61xx cn66xx;
- struct cvmx_sli_int_enb_ciu_cn68xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t pipe_err:1;
- uint64_t ill_pad:1;
- uint64_t reserved_58_59:2;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t reserved_51_51:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_18_31:14;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t reserved_18_31:14;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t reserved_51_51:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t reserved_58_59:2;
- uint64_t ill_pad:1;
- uint64_t pipe_err:1;
- uint64_t reserved_62_63:2;
-#endif
- } cn68xx;
- struct cvmx_sli_int_enb_ciu_cn68xx cn68xxp1;
- struct cvmx_sli_int_enb_ciu_cn61xx cnf71xx;
-};
-
-union cvmx_sli_int_enb_portx {
- uint64_t u64;
- struct cvmx_sli_int_enb_portx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t pipe_err:1;
- uint64_t ill_pad:1;
- uint64_t sprt3_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_28_31:4;
- uint64_t m3_un_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_up_b0:1;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t m2_up_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_un_wi:1;
- uint64_t reserved_28_31:4;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt3_err:1;
- uint64_t ill_pad:1;
- uint64_t pipe_err:1;
- uint64_t reserved_62_63:2;
-#endif
- } s;
- struct cvmx_sli_int_enb_portx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_61_63:3;
- uint64_t ill_pad:1;
- uint64_t sprt3_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_28_31:4;
- uint64_t m3_un_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_up_b0:1;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t m2_up_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_un_wi:1;
- uint64_t reserved_28_31:4;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt3_err:1;
- uint64_t ill_pad:1;
- uint64_t reserved_61_63:3;
-#endif
- } cn61xx;
- struct cvmx_sli_int_enb_portx_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_61_63:3;
- uint64_t ill_pad:1;
- uint64_t reserved_58_59:2;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_20_31:12;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t reserved_20_31:12;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t reserved_58_59:2;
- uint64_t ill_pad:1;
- uint64_t reserved_61_63:3;
-#endif
- } cn63xx;
- struct cvmx_sli_int_enb_portx_cn63xx cn63xxp1;
- struct cvmx_sli_int_enb_portx_cn61xx cn66xx;
- struct cvmx_sli_int_enb_portx_cn68xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t pipe_err:1;
- uint64_t ill_pad:1;
- uint64_t reserved_58_59:2;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t reserved_51_51:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_20_31:12;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t reserved_20_31:12;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t reserved_51_51:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t reserved_58_59:2;
- uint64_t ill_pad:1;
- uint64_t pipe_err:1;
- uint64_t reserved_62_63:2;
-#endif
- } cn68xx;
- struct cvmx_sli_int_enb_portx_cn68xx cn68xxp1;
- struct cvmx_sli_int_enb_portx_cn61xx cnf71xx;
-};
-
-union cvmx_sli_int_sum {
- uint64_t u64;
- struct cvmx_sli_int_sum_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t pipe_err:1;
- uint64_t ill_pad:1;
- uint64_t sprt3_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_28_31:4;
- uint64_t m3_un_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_up_b0:1;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t m2_up_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_un_wi:1;
- uint64_t reserved_28_31:4;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt3_err:1;
- uint64_t ill_pad:1;
- uint64_t pipe_err:1;
- uint64_t reserved_62_63:2;
-#endif
- } s;
- struct cvmx_sli_int_sum_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_61_63:3;
- uint64_t ill_pad:1;
- uint64_t sprt3_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_28_31:4;
- uint64_t m3_un_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_up_b0:1;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t m2_up_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_un_wi:1;
- uint64_t reserved_28_31:4;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt3_err:1;
- uint64_t ill_pad:1;
- uint64_t reserved_61_63:3;
-#endif
- } cn61xx;
- struct cvmx_sli_int_sum_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_61_63:3;
- uint64_t ill_pad:1;
- uint64_t reserved_58_59:2;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_20_31:12;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t reserved_20_31:12;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t reserved_58_59:2;
- uint64_t ill_pad:1;
- uint64_t reserved_61_63:3;
-#endif
- } cn63xx;
- struct cvmx_sli_int_sum_cn63xx cn63xxp1;
- struct cvmx_sli_int_sum_cn61xx cn66xx;
- struct cvmx_sli_int_sum_cn68xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t pipe_err:1;
- uint64_t ill_pad:1;
- uint64_t reserved_58_59:2;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t reserved_51_51:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_20_31:12;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t reserved_20_31:12;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t reserved_51_51:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t reserved_58_59:2;
- uint64_t ill_pad:1;
- uint64_t pipe_err:1;
- uint64_t reserved_62_63:2;
-#endif
- } cn68xx;
- struct cvmx_sli_int_sum_cn68xx cn68xxp1;
- struct cvmx_sli_int_sum_cn61xx cnf71xx;
-};
-
-union cvmx_sli_last_win_rdata0 {
- uint64_t u64;
- struct cvmx_sli_last_win_rdata0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_22_63:42,
+ __BITFIELD_FIELD(uint64_t intd:1,
+ __BITFIELD_FIELD(uint64_t intc:1,
+ __BITFIELD_FIELD(uint64_t intb:1,
+ __BITFIELD_FIELD(uint64_t inta:1,
+ __BITFIELD_FIELD(uint64_t dis_port:1,
+ __BITFIELD_FIELD(uint64_t waitl_com:1,
+ __BITFIELD_FIELD(uint64_t intd_map:2,
+ __BITFIELD_FIELD(uint64_t intc_map:2,
+ __BITFIELD_FIELD(uint64_t intb_map:2,
+ __BITFIELD_FIELD(uint64_t inta_map:2,
+ __BITFIELD_FIELD(uint64_t ctlp_ro:1,
+ __BITFIELD_FIELD(uint64_t reserved_6_6:1,
+ __BITFIELD_FIELD(uint64_t ptlp_ro:1,
+ __BITFIELD_FIELD(uint64_t reserved_1_4:4,
+ __BITFIELD_FIELD(uint64_t wait_com:1,
+ ;))))))))))))))))
} s;
- struct cvmx_sli_last_win_rdata0_s cn61xx;
- struct cvmx_sli_last_win_rdata0_s cn63xx;
- struct cvmx_sli_last_win_rdata0_s cn63xxp1;
- struct cvmx_sli_last_win_rdata0_s cn66xx;
- struct cvmx_sli_last_win_rdata0_s cn68xx;
- struct cvmx_sli_last_win_rdata0_s cn68xxp1;
- struct cvmx_sli_last_win_rdata0_s cnf71xx;
-};
-
-union cvmx_sli_last_win_rdata1 {
- uint64_t u64;
- struct cvmx_sli_last_win_rdata1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
- } s;
- struct cvmx_sli_last_win_rdata1_s cn61xx;
- struct cvmx_sli_last_win_rdata1_s cn63xx;
- struct cvmx_sli_last_win_rdata1_s cn63xxp1;
- struct cvmx_sli_last_win_rdata1_s cn66xx;
- struct cvmx_sli_last_win_rdata1_s cn68xx;
- struct cvmx_sli_last_win_rdata1_s cn68xxp1;
- struct cvmx_sli_last_win_rdata1_s cnf71xx;
-};
-
-union cvmx_sli_last_win_rdata2 {
- uint64_t u64;
- struct cvmx_sli_last_win_rdata2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
- } s;
- struct cvmx_sli_last_win_rdata2_s cn61xx;
- struct cvmx_sli_last_win_rdata2_s cn66xx;
- struct cvmx_sli_last_win_rdata2_s cnf71xx;
-};
-
-union cvmx_sli_last_win_rdata3 {
- uint64_t u64;
- struct cvmx_sli_last_win_rdata3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
- } s;
- struct cvmx_sli_last_win_rdata3_s cn61xx;
- struct cvmx_sli_last_win_rdata3_s cn66xx;
- struct cvmx_sli_last_win_rdata3_s cnf71xx;
-};
-
-union cvmx_sli_mac_credit_cnt {
- uint64_t u64;
- struct cvmx_sli_mac_credit_cnt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t p1_c_d:1;
- uint64_t p1_n_d:1;
- uint64_t p1_p_d:1;
- uint64_t p0_c_d:1;
- uint64_t p0_n_d:1;
- uint64_t p0_p_d:1;
- uint64_t p1_ccnt:8;
- uint64_t p1_ncnt:8;
- uint64_t p1_pcnt:8;
- uint64_t p0_ccnt:8;
- uint64_t p0_ncnt:8;
- uint64_t p0_pcnt:8;
-#else
- uint64_t p0_pcnt:8;
- uint64_t p0_ncnt:8;
- uint64_t p0_ccnt:8;
- uint64_t p1_pcnt:8;
- uint64_t p1_ncnt:8;
- uint64_t p1_ccnt:8;
- uint64_t p0_p_d:1;
- uint64_t p0_n_d:1;
- uint64_t p0_c_d:1;
- uint64_t p1_p_d:1;
- uint64_t p1_n_d:1;
- uint64_t p1_c_d:1;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_sli_mac_credit_cnt_s cn61xx;
- struct cvmx_sli_mac_credit_cnt_s cn63xx;
- struct cvmx_sli_mac_credit_cnt_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_48_63:16;
- uint64_t p1_ccnt:8;
- uint64_t p1_ncnt:8;
- uint64_t p1_pcnt:8;
- uint64_t p0_ccnt:8;
- uint64_t p0_ncnt:8;
- uint64_t p0_pcnt:8;
-#else
- uint64_t p0_pcnt:8;
- uint64_t p0_ncnt:8;
- uint64_t p0_ccnt:8;
- uint64_t p1_pcnt:8;
- uint64_t p1_ncnt:8;
- uint64_t p1_ccnt:8;
- uint64_t reserved_48_63:16;
-#endif
- } cn63xxp1;
- struct cvmx_sli_mac_credit_cnt_s cn66xx;
- struct cvmx_sli_mac_credit_cnt_s cn68xx;
- struct cvmx_sli_mac_credit_cnt_s cn68xxp1;
- struct cvmx_sli_mac_credit_cnt_s cnf71xx;
-};
-
-union cvmx_sli_mac_credit_cnt2 {
- uint64_t u64;
- struct cvmx_sli_mac_credit_cnt2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t p3_c_d:1;
- uint64_t p3_n_d:1;
- uint64_t p3_p_d:1;
- uint64_t p2_c_d:1;
- uint64_t p2_n_d:1;
- uint64_t p2_p_d:1;
- uint64_t p3_ccnt:8;
- uint64_t p3_ncnt:8;
- uint64_t p3_pcnt:8;
- uint64_t p2_ccnt:8;
- uint64_t p2_ncnt:8;
- uint64_t p2_pcnt:8;
-#else
- uint64_t p2_pcnt:8;
- uint64_t p2_ncnt:8;
- uint64_t p2_ccnt:8;
- uint64_t p3_pcnt:8;
- uint64_t p3_ncnt:8;
- uint64_t p3_ccnt:8;
- uint64_t p2_p_d:1;
- uint64_t p2_n_d:1;
- uint64_t p2_c_d:1;
- uint64_t p3_p_d:1;
- uint64_t p3_n_d:1;
- uint64_t p3_c_d:1;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_sli_mac_credit_cnt2_s cn61xx;
- struct cvmx_sli_mac_credit_cnt2_s cn66xx;
- struct cvmx_sli_mac_credit_cnt2_s cnf71xx;
-};
-
-union cvmx_sli_mac_number {
- uint64_t u64;
- struct cvmx_sli_mac_number_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t a_mode:1;
- uint64_t num:8;
-#else
- uint64_t num:8;
- uint64_t a_mode:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_sli_mac_number_s cn61xx;
- struct cvmx_sli_mac_number_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t num:8;
-#else
- uint64_t num:8;
- uint64_t reserved_8_63:56;
-#endif
- } cn63xx;
- struct cvmx_sli_mac_number_s cn66xx;
- struct cvmx_sli_mac_number_cn63xx cn68xx;
- struct cvmx_sli_mac_number_cn63xx cn68xxp1;
- struct cvmx_sli_mac_number_s cnf71xx;
};
union cvmx_sli_mem_access_ctl {
uint64_t u64;
struct cvmx_sli_mem_access_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t max_word:4;
- uint64_t timer:10;
-#else
- uint64_t timer:10;
- uint64_t max_word:4;
- uint64_t reserved_14_63:50;
-#endif
- } s;
- struct cvmx_sli_mem_access_ctl_s cn61xx;
- struct cvmx_sli_mem_access_ctl_s cn63xx;
- struct cvmx_sli_mem_access_ctl_s cn63xxp1;
- struct cvmx_sli_mem_access_ctl_s cn66xx;
- struct cvmx_sli_mem_access_ctl_s cn68xx;
- struct cvmx_sli_mem_access_ctl_s cn68xxp1;
- struct cvmx_sli_mem_access_ctl_s cnf71xx;
-};
-
-union cvmx_sli_mem_access_subidx {
- uint64_t u64;
- struct cvmx_sli_mem_access_subidx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t zero:1;
- uint64_t port:3;
- uint64_t nmerge:1;
- uint64_t esr:2;
- uint64_t esw:2;
- uint64_t wtype:2;
- uint64_t rtype:2;
- uint64_t reserved_0_29:30;
-#else
- uint64_t reserved_0_29:30;
- uint64_t rtype:2;
- uint64_t wtype:2;
- uint64_t esw:2;
- uint64_t esr:2;
- uint64_t nmerge:1;
- uint64_t port:3;
- uint64_t zero:1;
- uint64_t reserved_43_63:21;
-#endif
- } s;
- struct cvmx_sli_mem_access_subidx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t zero:1;
- uint64_t port:3;
- uint64_t nmerge:1;
- uint64_t esr:2;
- uint64_t esw:2;
- uint64_t wtype:2;
- uint64_t rtype:2;
- uint64_t ba:30;
-#else
- uint64_t ba:30;
- uint64_t rtype:2;
- uint64_t wtype:2;
- uint64_t esw:2;
- uint64_t esr:2;
- uint64_t nmerge:1;
- uint64_t port:3;
- uint64_t zero:1;
- uint64_t reserved_43_63:21;
-#endif
- } cn61xx;
- struct cvmx_sli_mem_access_subidx_cn61xx cn63xx;
- struct cvmx_sli_mem_access_subidx_cn61xx cn63xxp1;
- struct cvmx_sli_mem_access_subidx_cn61xx cn66xx;
- struct cvmx_sli_mem_access_subidx_cn68xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t zero:1;
- uint64_t port:3;
- uint64_t nmerge:1;
- uint64_t esr:2;
- uint64_t esw:2;
- uint64_t wtype:2;
- uint64_t rtype:2;
- uint64_t ba:28;
- uint64_t reserved_0_1:2;
-#else
- uint64_t reserved_0_1:2;
- uint64_t ba:28;
- uint64_t rtype:2;
- uint64_t wtype:2;
- uint64_t esw:2;
- uint64_t esr:2;
- uint64_t nmerge:1;
- uint64_t port:3;
- uint64_t zero:1;
- uint64_t reserved_43_63:21;
-#endif
- } cn68xx;
- struct cvmx_sli_mem_access_subidx_cn68xx cn68xxp1;
- struct cvmx_sli_mem_access_subidx_cn61xx cnf71xx;
-};
-
-union cvmx_sli_msi_enb0 {
- uint64_t u64;
- struct cvmx_sli_msi_enb0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t enb:64;
-#else
- uint64_t enb:64;
-#endif
- } s;
- struct cvmx_sli_msi_enb0_s cn61xx;
- struct cvmx_sli_msi_enb0_s cn63xx;
- struct cvmx_sli_msi_enb0_s cn63xxp1;
- struct cvmx_sli_msi_enb0_s cn66xx;
- struct cvmx_sli_msi_enb0_s cn68xx;
- struct cvmx_sli_msi_enb0_s cn68xxp1;
- struct cvmx_sli_msi_enb0_s cnf71xx;
-};
-
-union cvmx_sli_msi_enb1 {
- uint64_t u64;
- struct cvmx_sli_msi_enb1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t enb:64;
-#else
- uint64_t enb:64;
-#endif
- } s;
- struct cvmx_sli_msi_enb1_s cn61xx;
- struct cvmx_sli_msi_enb1_s cn63xx;
- struct cvmx_sli_msi_enb1_s cn63xxp1;
- struct cvmx_sli_msi_enb1_s cn66xx;
- struct cvmx_sli_msi_enb1_s cn68xx;
- struct cvmx_sli_msi_enb1_s cn68xxp1;
- struct cvmx_sli_msi_enb1_s cnf71xx;
-};
-
-union cvmx_sli_msi_enb2 {
- uint64_t u64;
- struct cvmx_sli_msi_enb2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t enb:64;
-#else
- uint64_t enb:64;
-#endif
- } s;
- struct cvmx_sli_msi_enb2_s cn61xx;
- struct cvmx_sli_msi_enb2_s cn63xx;
- struct cvmx_sli_msi_enb2_s cn63xxp1;
- struct cvmx_sli_msi_enb2_s cn66xx;
- struct cvmx_sli_msi_enb2_s cn68xx;
- struct cvmx_sli_msi_enb2_s cn68xxp1;
- struct cvmx_sli_msi_enb2_s cnf71xx;
-};
-
-union cvmx_sli_msi_enb3 {
- uint64_t u64;
- struct cvmx_sli_msi_enb3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t enb:64;
-#else
- uint64_t enb:64;
-#endif
- } s;
- struct cvmx_sli_msi_enb3_s cn61xx;
- struct cvmx_sli_msi_enb3_s cn63xx;
- struct cvmx_sli_msi_enb3_s cn63xxp1;
- struct cvmx_sli_msi_enb3_s cn66xx;
- struct cvmx_sli_msi_enb3_s cn68xx;
- struct cvmx_sli_msi_enb3_s cn68xxp1;
- struct cvmx_sli_msi_enb3_s cnf71xx;
-};
-
-union cvmx_sli_msi_rcv0 {
- uint64_t u64;
- struct cvmx_sli_msi_rcv0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t intr:64;
-#else
- uint64_t intr:64;
-#endif
- } s;
- struct cvmx_sli_msi_rcv0_s cn61xx;
- struct cvmx_sli_msi_rcv0_s cn63xx;
- struct cvmx_sli_msi_rcv0_s cn63xxp1;
- struct cvmx_sli_msi_rcv0_s cn66xx;
- struct cvmx_sli_msi_rcv0_s cn68xx;
- struct cvmx_sli_msi_rcv0_s cn68xxp1;
- struct cvmx_sli_msi_rcv0_s cnf71xx;
-};
-
-union cvmx_sli_msi_rcv1 {
- uint64_t u64;
- struct cvmx_sli_msi_rcv1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t intr:64;
-#else
- uint64_t intr:64;
-#endif
- } s;
- struct cvmx_sli_msi_rcv1_s cn61xx;
- struct cvmx_sli_msi_rcv1_s cn63xx;
- struct cvmx_sli_msi_rcv1_s cn63xxp1;
- struct cvmx_sli_msi_rcv1_s cn66xx;
- struct cvmx_sli_msi_rcv1_s cn68xx;
- struct cvmx_sli_msi_rcv1_s cn68xxp1;
- struct cvmx_sli_msi_rcv1_s cnf71xx;
-};
-
-union cvmx_sli_msi_rcv2 {
- uint64_t u64;
- struct cvmx_sli_msi_rcv2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t intr:64;
-#else
- uint64_t intr:64;
-#endif
- } s;
- struct cvmx_sli_msi_rcv2_s cn61xx;
- struct cvmx_sli_msi_rcv2_s cn63xx;
- struct cvmx_sli_msi_rcv2_s cn63xxp1;
- struct cvmx_sli_msi_rcv2_s cn66xx;
- struct cvmx_sli_msi_rcv2_s cn68xx;
- struct cvmx_sli_msi_rcv2_s cn68xxp1;
- struct cvmx_sli_msi_rcv2_s cnf71xx;
-};
-
-union cvmx_sli_msi_rcv3 {
- uint64_t u64;
- struct cvmx_sli_msi_rcv3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t intr:64;
-#else
- uint64_t intr:64;
-#endif
- } s;
- struct cvmx_sli_msi_rcv3_s cn61xx;
- struct cvmx_sli_msi_rcv3_s cn63xx;
- struct cvmx_sli_msi_rcv3_s cn63xxp1;
- struct cvmx_sli_msi_rcv3_s cn66xx;
- struct cvmx_sli_msi_rcv3_s cn68xx;
- struct cvmx_sli_msi_rcv3_s cn68xxp1;
- struct cvmx_sli_msi_rcv3_s cnf71xx;
-};
-
-union cvmx_sli_msi_rd_map {
- uint64_t u64;
- struct cvmx_sli_msi_rd_map_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t rd_int:8;
- uint64_t msi_int:8;
-#else
- uint64_t msi_int:8;
- uint64_t rd_int:8;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_sli_msi_rd_map_s cn61xx;
- struct cvmx_sli_msi_rd_map_s cn63xx;
- struct cvmx_sli_msi_rd_map_s cn63xxp1;
- struct cvmx_sli_msi_rd_map_s cn66xx;
- struct cvmx_sli_msi_rd_map_s cn68xx;
- struct cvmx_sli_msi_rd_map_s cn68xxp1;
- struct cvmx_sli_msi_rd_map_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1c_enb0 {
- uint64_t u64;
- struct cvmx_sli_msi_w1c_enb0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t clr:64;
-#else
- uint64_t clr:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1c_enb0_s cn61xx;
- struct cvmx_sli_msi_w1c_enb0_s cn63xx;
- struct cvmx_sli_msi_w1c_enb0_s cn63xxp1;
- struct cvmx_sli_msi_w1c_enb0_s cn66xx;
- struct cvmx_sli_msi_w1c_enb0_s cn68xx;
- struct cvmx_sli_msi_w1c_enb0_s cn68xxp1;
- struct cvmx_sli_msi_w1c_enb0_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1c_enb1 {
- uint64_t u64;
- struct cvmx_sli_msi_w1c_enb1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t clr:64;
-#else
- uint64_t clr:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1c_enb1_s cn61xx;
- struct cvmx_sli_msi_w1c_enb1_s cn63xx;
- struct cvmx_sli_msi_w1c_enb1_s cn63xxp1;
- struct cvmx_sli_msi_w1c_enb1_s cn66xx;
- struct cvmx_sli_msi_w1c_enb1_s cn68xx;
- struct cvmx_sli_msi_w1c_enb1_s cn68xxp1;
- struct cvmx_sli_msi_w1c_enb1_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1c_enb2 {
- uint64_t u64;
- struct cvmx_sli_msi_w1c_enb2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t clr:64;
-#else
- uint64_t clr:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1c_enb2_s cn61xx;
- struct cvmx_sli_msi_w1c_enb2_s cn63xx;
- struct cvmx_sli_msi_w1c_enb2_s cn63xxp1;
- struct cvmx_sli_msi_w1c_enb2_s cn66xx;
- struct cvmx_sli_msi_w1c_enb2_s cn68xx;
- struct cvmx_sli_msi_w1c_enb2_s cn68xxp1;
- struct cvmx_sli_msi_w1c_enb2_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1c_enb3 {
- uint64_t u64;
- struct cvmx_sli_msi_w1c_enb3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t clr:64;
-#else
- uint64_t clr:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1c_enb3_s cn61xx;
- struct cvmx_sli_msi_w1c_enb3_s cn63xx;
- struct cvmx_sli_msi_w1c_enb3_s cn63xxp1;
- struct cvmx_sli_msi_w1c_enb3_s cn66xx;
- struct cvmx_sli_msi_w1c_enb3_s cn68xx;
- struct cvmx_sli_msi_w1c_enb3_s cn68xxp1;
- struct cvmx_sli_msi_w1c_enb3_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1s_enb0 {
- uint64_t u64;
- struct cvmx_sli_msi_w1s_enb0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t set:64;
-#else
- uint64_t set:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1s_enb0_s cn61xx;
- struct cvmx_sli_msi_w1s_enb0_s cn63xx;
- struct cvmx_sli_msi_w1s_enb0_s cn63xxp1;
- struct cvmx_sli_msi_w1s_enb0_s cn66xx;
- struct cvmx_sli_msi_w1s_enb0_s cn68xx;
- struct cvmx_sli_msi_w1s_enb0_s cn68xxp1;
- struct cvmx_sli_msi_w1s_enb0_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1s_enb1 {
- uint64_t u64;
- struct cvmx_sli_msi_w1s_enb1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t set:64;
-#else
- uint64_t set:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1s_enb1_s cn61xx;
- struct cvmx_sli_msi_w1s_enb1_s cn63xx;
- struct cvmx_sli_msi_w1s_enb1_s cn63xxp1;
- struct cvmx_sli_msi_w1s_enb1_s cn66xx;
- struct cvmx_sli_msi_w1s_enb1_s cn68xx;
- struct cvmx_sli_msi_w1s_enb1_s cn68xxp1;
- struct cvmx_sli_msi_w1s_enb1_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1s_enb2 {
- uint64_t u64;
- struct cvmx_sli_msi_w1s_enb2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t set:64;
-#else
- uint64_t set:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1s_enb2_s cn61xx;
- struct cvmx_sli_msi_w1s_enb2_s cn63xx;
- struct cvmx_sli_msi_w1s_enb2_s cn63xxp1;
- struct cvmx_sli_msi_w1s_enb2_s cn66xx;
- struct cvmx_sli_msi_w1s_enb2_s cn68xx;
- struct cvmx_sli_msi_w1s_enb2_s cn68xxp1;
- struct cvmx_sli_msi_w1s_enb2_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1s_enb3 {
- uint64_t u64;
- struct cvmx_sli_msi_w1s_enb3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t set:64;
-#else
- uint64_t set:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1s_enb3_s cn61xx;
- struct cvmx_sli_msi_w1s_enb3_s cn63xx;
- struct cvmx_sli_msi_w1s_enb3_s cn63xxp1;
- struct cvmx_sli_msi_w1s_enb3_s cn66xx;
- struct cvmx_sli_msi_w1s_enb3_s cn68xx;
- struct cvmx_sli_msi_w1s_enb3_s cn68xxp1;
- struct cvmx_sli_msi_w1s_enb3_s cnf71xx;
-};
-
-union cvmx_sli_msi_wr_map {
- uint64_t u64;
- struct cvmx_sli_msi_wr_map_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t ciu_int:8;
- uint64_t msi_int:8;
-#else
- uint64_t msi_int:8;
- uint64_t ciu_int:8;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_sli_msi_wr_map_s cn61xx;
- struct cvmx_sli_msi_wr_map_s cn63xx;
- struct cvmx_sli_msi_wr_map_s cn63xxp1;
- struct cvmx_sli_msi_wr_map_s cn66xx;
- struct cvmx_sli_msi_wr_map_s cn68xx;
- struct cvmx_sli_msi_wr_map_s cn68xxp1;
- struct cvmx_sli_msi_wr_map_s cnf71xx;
-};
-
-union cvmx_sli_pcie_msi_rcv {
- uint64_t u64;
- struct cvmx_sli_pcie_msi_rcv_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t intr:8;
-#else
- uint64_t intr:8;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_sli_pcie_msi_rcv_s cn61xx;
- struct cvmx_sli_pcie_msi_rcv_s cn63xx;
- struct cvmx_sli_pcie_msi_rcv_s cn63xxp1;
- struct cvmx_sli_pcie_msi_rcv_s cn66xx;
- struct cvmx_sli_pcie_msi_rcv_s cn68xx;
- struct cvmx_sli_pcie_msi_rcv_s cn68xxp1;
- struct cvmx_sli_pcie_msi_rcv_s cnf71xx;
-};
-
-union cvmx_sli_pcie_msi_rcv_b1 {
- uint64_t u64;
- struct cvmx_sli_pcie_msi_rcv_b1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t intr:8;
- uint64_t reserved_0_7:8;
-#else
- uint64_t reserved_0_7:8;
- uint64_t intr:8;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_sli_pcie_msi_rcv_b1_s cn61xx;
- struct cvmx_sli_pcie_msi_rcv_b1_s cn63xx;
- struct cvmx_sli_pcie_msi_rcv_b1_s cn63xxp1;
- struct cvmx_sli_pcie_msi_rcv_b1_s cn66xx;
- struct cvmx_sli_pcie_msi_rcv_b1_s cn68xx;
- struct cvmx_sli_pcie_msi_rcv_b1_s cn68xxp1;
- struct cvmx_sli_pcie_msi_rcv_b1_s cnf71xx;
-};
-
-union cvmx_sli_pcie_msi_rcv_b2 {
- uint64_t u64;
- struct cvmx_sli_pcie_msi_rcv_b2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t intr:8;
- uint64_t reserved_0_15:16;
-#else
- uint64_t reserved_0_15:16;
- uint64_t intr:8;
- uint64_t reserved_24_63:40;
-#endif
- } s;
- struct cvmx_sli_pcie_msi_rcv_b2_s cn61xx;
- struct cvmx_sli_pcie_msi_rcv_b2_s cn63xx;
- struct cvmx_sli_pcie_msi_rcv_b2_s cn63xxp1;
- struct cvmx_sli_pcie_msi_rcv_b2_s cn66xx;
- struct cvmx_sli_pcie_msi_rcv_b2_s cn68xx;
- struct cvmx_sli_pcie_msi_rcv_b2_s cn68xxp1;
- struct cvmx_sli_pcie_msi_rcv_b2_s cnf71xx;
-};
-
-union cvmx_sli_pcie_msi_rcv_b3 {
- uint64_t u64;
- struct cvmx_sli_pcie_msi_rcv_b3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t intr:8;
- uint64_t reserved_0_23:24;
-#else
- uint64_t reserved_0_23:24;
- uint64_t intr:8;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pcie_msi_rcv_b3_s cn61xx;
- struct cvmx_sli_pcie_msi_rcv_b3_s cn63xx;
- struct cvmx_sli_pcie_msi_rcv_b3_s cn63xxp1;
- struct cvmx_sli_pcie_msi_rcv_b3_s cn66xx;
- struct cvmx_sli_pcie_msi_rcv_b3_s cn68xx;
- struct cvmx_sli_pcie_msi_rcv_b3_s cn68xxp1;
- struct cvmx_sli_pcie_msi_rcv_b3_s cnf71xx;
-};
-
-union cvmx_sli_pktx_cnts {
- uint64_t u64;
- struct cvmx_sli_pktx_cnts_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t timer:22;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t timer:22;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_sli_pktx_cnts_s cn61xx;
- struct cvmx_sli_pktx_cnts_s cn63xx;
- struct cvmx_sli_pktx_cnts_s cn63xxp1;
- struct cvmx_sli_pktx_cnts_s cn66xx;
- struct cvmx_sli_pktx_cnts_s cn68xx;
- struct cvmx_sli_pktx_cnts_s cn68xxp1;
- struct cvmx_sli_pktx_cnts_s cnf71xx;
-};
-
-union cvmx_sli_pktx_in_bp {
- uint64_t u64;
- struct cvmx_sli_pktx_in_bp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t wmark:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t wmark:32;
-#endif
- } s;
- struct cvmx_sli_pktx_in_bp_s cn61xx;
- struct cvmx_sli_pktx_in_bp_s cn63xx;
- struct cvmx_sli_pktx_in_bp_s cn63xxp1;
- struct cvmx_sli_pktx_in_bp_s cn66xx;
- struct cvmx_sli_pktx_in_bp_s cnf71xx;
-};
-
-union cvmx_sli_pktx_instr_baddr {
- uint64_t u64;
- struct cvmx_sli_pktx_instr_baddr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t addr:61;
- uint64_t reserved_0_2:3;
-#else
- uint64_t reserved_0_2:3;
- uint64_t addr:61;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_14_63:50,
+ __BITFIELD_FIELD(uint64_t max_word:4,
+ __BITFIELD_FIELD(uint64_t timer:10,
+ ;)))
} s;
- struct cvmx_sli_pktx_instr_baddr_s cn61xx;
- struct cvmx_sli_pktx_instr_baddr_s cn63xx;
- struct cvmx_sli_pktx_instr_baddr_s cn63xxp1;
- struct cvmx_sli_pktx_instr_baddr_s cn66xx;
- struct cvmx_sli_pktx_instr_baddr_s cn68xx;
- struct cvmx_sli_pktx_instr_baddr_s cn68xxp1;
- struct cvmx_sli_pktx_instr_baddr_s cnf71xx;
-};
-
-union cvmx_sli_pktx_instr_baoff_dbell {
- uint64_t u64;
- struct cvmx_sli_pktx_instr_baoff_dbell_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t aoff:32;
- uint64_t dbell:32;
-#else
- uint64_t dbell:32;
- uint64_t aoff:32;
-#endif
- } s;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cn61xx;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cn63xx;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cn63xxp1;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cn66xx;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cn68xx;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cn68xxp1;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cnf71xx;
-};
-
-union cvmx_sli_pktx_instr_fifo_rsize {
- uint64_t u64;
- struct cvmx_sli_pktx_instr_fifo_rsize_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t max:9;
- uint64_t rrp:9;
- uint64_t wrp:9;
- uint64_t fcnt:5;
- uint64_t rsize:32;
-#else
- uint64_t rsize:32;
- uint64_t fcnt:5;
- uint64_t wrp:9;
- uint64_t rrp:9;
- uint64_t max:9;
-#endif
- } s;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cn61xx;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cn63xx;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cn63xxp1;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cn66xx;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cn68xx;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cn68xxp1;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cnf71xx;
-};
-
-union cvmx_sli_pktx_instr_header {
- uint64_t u64;
- struct cvmx_sli_pktx_instr_header_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_44_63:20;
- uint64_t pbp:1;
- uint64_t reserved_38_42:5;
- uint64_t rparmode:2;
- uint64_t reserved_35_35:1;
- uint64_t rskp_len:7;
- uint64_t rngrpext:2;
- uint64_t rnqos:1;
- uint64_t rngrp:1;
- uint64_t rntt:1;
- uint64_t rntag:1;
- uint64_t use_ihdr:1;
- uint64_t reserved_16_20:5;
- uint64_t par_mode:2;
- uint64_t reserved_13_13:1;
- uint64_t skp_len:7;
- uint64_t ngrpext:2;
- uint64_t nqos:1;
- uint64_t ngrp:1;
- uint64_t ntt:1;
- uint64_t ntag:1;
-#else
- uint64_t ntag:1;
- uint64_t ntt:1;
- uint64_t ngrp:1;
- uint64_t nqos:1;
- uint64_t ngrpext:2;
- uint64_t skp_len:7;
- uint64_t reserved_13_13:1;
- uint64_t par_mode:2;
- uint64_t reserved_16_20:5;
- uint64_t use_ihdr:1;
- uint64_t rntag:1;
- uint64_t rntt:1;
- uint64_t rngrp:1;
- uint64_t rnqos:1;
- uint64_t rngrpext:2;
- uint64_t rskp_len:7;
- uint64_t reserved_35_35:1;
- uint64_t rparmode:2;
- uint64_t reserved_38_42:5;
- uint64_t pbp:1;
- uint64_t reserved_44_63:20;
-#endif
- } s;
- struct cvmx_sli_pktx_instr_header_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_44_63:20;
- uint64_t pbp:1;
- uint64_t reserved_38_42:5;
- uint64_t rparmode:2;
- uint64_t reserved_35_35:1;
- uint64_t rskp_len:7;
- uint64_t reserved_26_27:2;
- uint64_t rnqos:1;
- uint64_t rngrp:1;
- uint64_t rntt:1;
- uint64_t rntag:1;
- uint64_t use_ihdr:1;
- uint64_t reserved_16_20:5;
- uint64_t par_mode:2;
- uint64_t reserved_13_13:1;
- uint64_t skp_len:7;
- uint64_t reserved_4_5:2;
- uint64_t nqos:1;
- uint64_t ngrp:1;
- uint64_t ntt:1;
- uint64_t ntag:1;
-#else
- uint64_t ntag:1;
- uint64_t ntt:1;
- uint64_t ngrp:1;
- uint64_t nqos:1;
- uint64_t reserved_4_5:2;
- uint64_t skp_len:7;
- uint64_t reserved_13_13:1;
- uint64_t par_mode:2;
- uint64_t reserved_16_20:5;
- uint64_t use_ihdr:1;
- uint64_t rntag:1;
- uint64_t rntt:1;
- uint64_t rngrp:1;
- uint64_t rnqos:1;
- uint64_t reserved_26_27:2;
- uint64_t rskp_len:7;
- uint64_t reserved_35_35:1;
- uint64_t rparmode:2;
- uint64_t reserved_38_42:5;
- uint64_t pbp:1;
- uint64_t reserved_44_63:20;
-#endif
- } cn61xx;
- struct cvmx_sli_pktx_instr_header_cn61xx cn63xx;
- struct cvmx_sli_pktx_instr_header_cn61xx cn63xxp1;
- struct cvmx_sli_pktx_instr_header_cn61xx cn66xx;
- struct cvmx_sli_pktx_instr_header_s cn68xx;
- struct cvmx_sli_pktx_instr_header_cn61xx cn68xxp1;
- struct cvmx_sli_pktx_instr_header_cn61xx cnf71xx;
-};
-
-union cvmx_sli_pktx_out_size {
- uint64_t u64;
- struct cvmx_sli_pktx_out_size_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_23_63:41;
- uint64_t isize:7;
- uint64_t bsize:16;
-#else
- uint64_t bsize:16;
- uint64_t isize:7;
- uint64_t reserved_23_63:41;
-#endif
- } s;
- struct cvmx_sli_pktx_out_size_s cn61xx;
- struct cvmx_sli_pktx_out_size_s cn63xx;
- struct cvmx_sli_pktx_out_size_s cn63xxp1;
- struct cvmx_sli_pktx_out_size_s cn66xx;
- struct cvmx_sli_pktx_out_size_s cn68xx;
- struct cvmx_sli_pktx_out_size_s cn68xxp1;
- struct cvmx_sli_pktx_out_size_s cnf71xx;
-};
-
-union cvmx_sli_pktx_slist_baddr {
- uint64_t u64;
- struct cvmx_sli_pktx_slist_baddr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t addr:60;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t addr:60;
-#endif
- } s;
- struct cvmx_sli_pktx_slist_baddr_s cn61xx;
- struct cvmx_sli_pktx_slist_baddr_s cn63xx;
- struct cvmx_sli_pktx_slist_baddr_s cn63xxp1;
- struct cvmx_sli_pktx_slist_baddr_s cn66xx;
- struct cvmx_sli_pktx_slist_baddr_s cn68xx;
- struct cvmx_sli_pktx_slist_baddr_s cn68xxp1;
- struct cvmx_sli_pktx_slist_baddr_s cnf71xx;
-};
-
-union cvmx_sli_pktx_slist_baoff_dbell {
- uint64_t u64;
- struct cvmx_sli_pktx_slist_baoff_dbell_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t aoff:32;
- uint64_t dbell:32;
-#else
- uint64_t dbell:32;
- uint64_t aoff:32;
-#endif
- } s;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cn61xx;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cn63xx;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cn63xxp1;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cn66xx;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cn68xx;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cn68xxp1;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cnf71xx;
-};
-
-union cvmx_sli_pktx_slist_fifo_rsize {
- uint64_t u64;
- struct cvmx_sli_pktx_slist_fifo_rsize_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t rsize:32;
-#else
- uint64_t rsize:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cn61xx;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cn63xx;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cn63xxp1;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cn66xx;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cn68xx;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cn68xxp1;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cnf71xx;
-};
-
-union cvmx_sli_pkt_cnt_int {
- uint64_t u64;
- struct cvmx_sli_pkt_cnt_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t port:32;
-#else
- uint64_t port:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_cnt_int_s cn61xx;
- struct cvmx_sli_pkt_cnt_int_s cn63xx;
- struct cvmx_sli_pkt_cnt_int_s cn63xxp1;
- struct cvmx_sli_pkt_cnt_int_s cn66xx;
- struct cvmx_sli_pkt_cnt_int_s cn68xx;
- struct cvmx_sli_pkt_cnt_int_s cn68xxp1;
- struct cvmx_sli_pkt_cnt_int_s cnf71xx;
-};
-
-union cvmx_sli_pkt_cnt_int_enb {
- uint64_t u64;
- struct cvmx_sli_pkt_cnt_int_enb_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t port:32;
-#else
- uint64_t port:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_cnt_int_enb_s cn61xx;
- struct cvmx_sli_pkt_cnt_int_enb_s cn63xx;
- struct cvmx_sli_pkt_cnt_int_enb_s cn63xxp1;
- struct cvmx_sli_pkt_cnt_int_enb_s cn66xx;
- struct cvmx_sli_pkt_cnt_int_enb_s cn68xx;
- struct cvmx_sli_pkt_cnt_int_enb_s cn68xxp1;
- struct cvmx_sli_pkt_cnt_int_enb_s cnf71xx;
-};
-
-union cvmx_sli_pkt_ctl {
- uint64_t u64;
- struct cvmx_sli_pkt_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t ring_en:1;
- uint64_t pkt_bp:4;
-#else
- uint64_t pkt_bp:4;
- uint64_t ring_en:1;
- uint64_t reserved_5_63:59;
-#endif
- } s;
- struct cvmx_sli_pkt_ctl_s cn61xx;
- struct cvmx_sli_pkt_ctl_s cn63xx;
- struct cvmx_sli_pkt_ctl_s cn63xxp1;
- struct cvmx_sli_pkt_ctl_s cn66xx;
- struct cvmx_sli_pkt_ctl_s cn68xx;
- struct cvmx_sli_pkt_ctl_s cn68xxp1;
- struct cvmx_sli_pkt_ctl_s cnf71xx;
-};
-
-union cvmx_sli_pkt_data_out_es {
- uint64_t u64;
- struct cvmx_sli_pkt_data_out_es_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t es:64;
-#else
- uint64_t es:64;
-#endif
- } s;
- struct cvmx_sli_pkt_data_out_es_s cn61xx;
- struct cvmx_sli_pkt_data_out_es_s cn63xx;
- struct cvmx_sli_pkt_data_out_es_s cn63xxp1;
- struct cvmx_sli_pkt_data_out_es_s cn66xx;
- struct cvmx_sli_pkt_data_out_es_s cn68xx;
- struct cvmx_sli_pkt_data_out_es_s cn68xxp1;
- struct cvmx_sli_pkt_data_out_es_s cnf71xx;
-};
-
-union cvmx_sli_pkt_data_out_ns {
- uint64_t u64;
- struct cvmx_sli_pkt_data_out_ns_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t nsr:32;
-#else
- uint64_t nsr:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_data_out_ns_s cn61xx;
- struct cvmx_sli_pkt_data_out_ns_s cn63xx;
- struct cvmx_sli_pkt_data_out_ns_s cn63xxp1;
- struct cvmx_sli_pkt_data_out_ns_s cn66xx;
- struct cvmx_sli_pkt_data_out_ns_s cn68xx;
- struct cvmx_sli_pkt_data_out_ns_s cn68xxp1;
- struct cvmx_sli_pkt_data_out_ns_s cnf71xx;
-};
-
-union cvmx_sli_pkt_data_out_ror {
- uint64_t u64;
- struct cvmx_sli_pkt_data_out_ror_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t ror:32;
-#else
- uint64_t ror:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_data_out_ror_s cn61xx;
- struct cvmx_sli_pkt_data_out_ror_s cn63xx;
- struct cvmx_sli_pkt_data_out_ror_s cn63xxp1;
- struct cvmx_sli_pkt_data_out_ror_s cn66xx;
- struct cvmx_sli_pkt_data_out_ror_s cn68xx;
- struct cvmx_sli_pkt_data_out_ror_s cn68xxp1;
- struct cvmx_sli_pkt_data_out_ror_s cnf71xx;
-};
-
-union cvmx_sli_pkt_dpaddr {
- uint64_t u64;
- struct cvmx_sli_pkt_dpaddr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t dptr:32;
-#else
- uint64_t dptr:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_dpaddr_s cn61xx;
- struct cvmx_sli_pkt_dpaddr_s cn63xx;
- struct cvmx_sli_pkt_dpaddr_s cn63xxp1;
- struct cvmx_sli_pkt_dpaddr_s cn66xx;
- struct cvmx_sli_pkt_dpaddr_s cn68xx;
- struct cvmx_sli_pkt_dpaddr_s cn68xxp1;
- struct cvmx_sli_pkt_dpaddr_s cnf71xx;
-};
-
-union cvmx_sli_pkt_in_bp {
- uint64_t u64;
- struct cvmx_sli_pkt_in_bp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t bp:32;
-#else
- uint64_t bp:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_in_bp_s cn61xx;
- struct cvmx_sli_pkt_in_bp_s cn63xx;
- struct cvmx_sli_pkt_in_bp_s cn63xxp1;
- struct cvmx_sli_pkt_in_bp_s cn66xx;
- struct cvmx_sli_pkt_in_bp_s cnf71xx;
-};
-
-union cvmx_sli_pkt_in_donex_cnts {
- uint64_t u64;
- struct cvmx_sli_pkt_in_donex_cnts_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_in_donex_cnts_s cn61xx;
- struct cvmx_sli_pkt_in_donex_cnts_s cn63xx;
- struct cvmx_sli_pkt_in_donex_cnts_s cn63xxp1;
- struct cvmx_sli_pkt_in_donex_cnts_s cn66xx;
- struct cvmx_sli_pkt_in_donex_cnts_s cn68xx;
- struct cvmx_sli_pkt_in_donex_cnts_s cn68xxp1;
- struct cvmx_sli_pkt_in_donex_cnts_s cnf71xx;
-};
-
-union cvmx_sli_pkt_in_instr_counts {
- uint64_t u64;
- struct cvmx_sli_pkt_in_instr_counts_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t wr_cnt:32;
- uint64_t rd_cnt:32;
-#else
- uint64_t rd_cnt:32;
- uint64_t wr_cnt:32;
-#endif
- } s;
- struct cvmx_sli_pkt_in_instr_counts_s cn61xx;
- struct cvmx_sli_pkt_in_instr_counts_s cn63xx;
- struct cvmx_sli_pkt_in_instr_counts_s cn63xxp1;
- struct cvmx_sli_pkt_in_instr_counts_s cn66xx;
- struct cvmx_sli_pkt_in_instr_counts_s cn68xx;
- struct cvmx_sli_pkt_in_instr_counts_s cn68xxp1;
- struct cvmx_sli_pkt_in_instr_counts_s cnf71xx;
-};
-
-union cvmx_sli_pkt_in_pcie_port {
- uint64_t u64;
- struct cvmx_sli_pkt_in_pcie_port_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t pp:64;
-#else
- uint64_t pp:64;
-#endif
- } s;
- struct cvmx_sli_pkt_in_pcie_port_s cn61xx;
- struct cvmx_sli_pkt_in_pcie_port_s cn63xx;
- struct cvmx_sli_pkt_in_pcie_port_s cn63xxp1;
- struct cvmx_sli_pkt_in_pcie_port_s cn66xx;
- struct cvmx_sli_pkt_in_pcie_port_s cn68xx;
- struct cvmx_sli_pkt_in_pcie_port_s cn68xxp1;
- struct cvmx_sli_pkt_in_pcie_port_s cnf71xx;
-};
-
-union cvmx_sli_pkt_input_control {
- uint64_t u64;
- struct cvmx_sli_pkt_input_control_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t prd_erst:1;
- uint64_t prd_rds:7;
- uint64_t gii_erst:1;
- uint64_t gii_rds:7;
- uint64_t reserved_41_47:7;
- uint64_t prc_idle:1;
- uint64_t reserved_24_39:16;
- uint64_t pin_rst:1;
- uint64_t pkt_rr:1;
- uint64_t pbp_dhi:13;
- uint64_t d_nsr:1;
- uint64_t d_esr:2;
- uint64_t d_ror:1;
- uint64_t use_csr:1;
- uint64_t nsr:1;
- uint64_t esr:2;
- uint64_t ror:1;
-#else
- uint64_t ror:1;
- uint64_t esr:2;
- uint64_t nsr:1;
- uint64_t use_csr:1;
- uint64_t d_ror:1;
- uint64_t d_esr:2;
- uint64_t d_nsr:1;
- uint64_t pbp_dhi:13;
- uint64_t pkt_rr:1;
- uint64_t pin_rst:1;
- uint64_t reserved_24_39:16;
- uint64_t prc_idle:1;
- uint64_t reserved_41_47:7;
- uint64_t gii_rds:7;
- uint64_t gii_erst:1;
- uint64_t prd_rds:7;
- uint64_t prd_erst:1;
-#endif
- } s;
- struct cvmx_sli_pkt_input_control_s cn61xx;
- struct cvmx_sli_pkt_input_control_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_23_63:41;
- uint64_t pkt_rr:1;
- uint64_t pbp_dhi:13;
- uint64_t d_nsr:1;
- uint64_t d_esr:2;
- uint64_t d_ror:1;
- uint64_t use_csr:1;
- uint64_t nsr:1;
- uint64_t esr:2;
- uint64_t ror:1;
-#else
- uint64_t ror:1;
- uint64_t esr:2;
- uint64_t nsr:1;
- uint64_t use_csr:1;
- uint64_t d_ror:1;
- uint64_t d_esr:2;
- uint64_t d_nsr:1;
- uint64_t pbp_dhi:13;
- uint64_t pkt_rr:1;
- uint64_t reserved_23_63:41;
-#endif
- } cn63xx;
- struct cvmx_sli_pkt_input_control_cn63xx cn63xxp1;
- struct cvmx_sli_pkt_input_control_s cn66xx;
- struct cvmx_sli_pkt_input_control_s cn68xx;
- struct cvmx_sli_pkt_input_control_s cn68xxp1;
- struct cvmx_sli_pkt_input_control_s cnf71xx;
-};
-
-union cvmx_sli_pkt_instr_enb {
- uint64_t u64;
- struct cvmx_sli_pkt_instr_enb_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t enb:32;
-#else
- uint64_t enb:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_instr_enb_s cn61xx;
- struct cvmx_sli_pkt_instr_enb_s cn63xx;
- struct cvmx_sli_pkt_instr_enb_s cn63xxp1;
- struct cvmx_sli_pkt_instr_enb_s cn66xx;
- struct cvmx_sli_pkt_instr_enb_s cn68xx;
- struct cvmx_sli_pkt_instr_enb_s cn68xxp1;
- struct cvmx_sli_pkt_instr_enb_s cnf71xx;
-};
-
-union cvmx_sli_pkt_instr_rd_size {
- uint64_t u64;
- struct cvmx_sli_pkt_instr_rd_size_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rdsize:64;
-#else
- uint64_t rdsize:64;
-#endif
- } s;
- struct cvmx_sli_pkt_instr_rd_size_s cn61xx;
- struct cvmx_sli_pkt_instr_rd_size_s cn63xx;
- struct cvmx_sli_pkt_instr_rd_size_s cn63xxp1;
- struct cvmx_sli_pkt_instr_rd_size_s cn66xx;
- struct cvmx_sli_pkt_instr_rd_size_s cn68xx;
- struct cvmx_sli_pkt_instr_rd_size_s cn68xxp1;
- struct cvmx_sli_pkt_instr_rd_size_s cnf71xx;
-};
-
-union cvmx_sli_pkt_instr_size {
- uint64_t u64;
- struct cvmx_sli_pkt_instr_size_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t is_64b:32;
-#else
- uint64_t is_64b:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_instr_size_s cn61xx;
- struct cvmx_sli_pkt_instr_size_s cn63xx;
- struct cvmx_sli_pkt_instr_size_s cn63xxp1;
- struct cvmx_sli_pkt_instr_size_s cn66xx;
- struct cvmx_sli_pkt_instr_size_s cn68xx;
- struct cvmx_sli_pkt_instr_size_s cn68xxp1;
- struct cvmx_sli_pkt_instr_size_s cnf71xx;
-};
-
-union cvmx_sli_pkt_int_levels {
- uint64_t u64;
- struct cvmx_sli_pkt_int_levels_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t time:22;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t time:22;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_sli_pkt_int_levels_s cn61xx;
- struct cvmx_sli_pkt_int_levels_s cn63xx;
- struct cvmx_sli_pkt_int_levels_s cn63xxp1;
- struct cvmx_sli_pkt_int_levels_s cn66xx;
- struct cvmx_sli_pkt_int_levels_s cn68xx;
- struct cvmx_sli_pkt_int_levels_s cn68xxp1;
- struct cvmx_sli_pkt_int_levels_s cnf71xx;
-};
-
-union cvmx_sli_pkt_iptr {
- uint64_t u64;
- struct cvmx_sli_pkt_iptr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t iptr:32;
-#else
- uint64_t iptr:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_iptr_s cn61xx;
- struct cvmx_sli_pkt_iptr_s cn63xx;
- struct cvmx_sli_pkt_iptr_s cn63xxp1;
- struct cvmx_sli_pkt_iptr_s cn66xx;
- struct cvmx_sli_pkt_iptr_s cn68xx;
- struct cvmx_sli_pkt_iptr_s cn68xxp1;
- struct cvmx_sli_pkt_iptr_s cnf71xx;
-};
-
-union cvmx_sli_pkt_out_bmode {
- uint64_t u64;
- struct cvmx_sli_pkt_out_bmode_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t bmode:32;
-#else
- uint64_t bmode:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_out_bmode_s cn61xx;
- struct cvmx_sli_pkt_out_bmode_s cn63xx;
- struct cvmx_sli_pkt_out_bmode_s cn63xxp1;
- struct cvmx_sli_pkt_out_bmode_s cn66xx;
- struct cvmx_sli_pkt_out_bmode_s cn68xx;
- struct cvmx_sli_pkt_out_bmode_s cn68xxp1;
- struct cvmx_sli_pkt_out_bmode_s cnf71xx;
-};
-
-union cvmx_sli_pkt_out_bp_en {
- uint64_t u64;
- struct cvmx_sli_pkt_out_bp_en_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t bp_en:32;
-#else
- uint64_t bp_en:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_out_bp_en_s cn68xx;
- struct cvmx_sli_pkt_out_bp_en_s cn68xxp1;
-};
-
-union cvmx_sli_pkt_out_enb {
- uint64_t u64;
- struct cvmx_sli_pkt_out_enb_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t enb:32;
-#else
- uint64_t enb:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_out_enb_s cn61xx;
- struct cvmx_sli_pkt_out_enb_s cn63xx;
- struct cvmx_sli_pkt_out_enb_s cn63xxp1;
- struct cvmx_sli_pkt_out_enb_s cn66xx;
- struct cvmx_sli_pkt_out_enb_s cn68xx;
- struct cvmx_sli_pkt_out_enb_s cn68xxp1;
- struct cvmx_sli_pkt_out_enb_s cnf71xx;
-};
-
-union cvmx_sli_pkt_output_wmark {
- uint64_t u64;
- struct cvmx_sli_pkt_output_wmark_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wmark:32;
-#else
- uint64_t wmark:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_output_wmark_s cn61xx;
- struct cvmx_sli_pkt_output_wmark_s cn63xx;
- struct cvmx_sli_pkt_output_wmark_s cn63xxp1;
- struct cvmx_sli_pkt_output_wmark_s cn66xx;
- struct cvmx_sli_pkt_output_wmark_s cn68xx;
- struct cvmx_sli_pkt_output_wmark_s cn68xxp1;
- struct cvmx_sli_pkt_output_wmark_s cnf71xx;
-};
-
-union cvmx_sli_pkt_pcie_port {
- uint64_t u64;
- struct cvmx_sli_pkt_pcie_port_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t pp:64;
-#else
- uint64_t pp:64;
-#endif
- } s;
- struct cvmx_sli_pkt_pcie_port_s cn61xx;
- struct cvmx_sli_pkt_pcie_port_s cn63xx;
- struct cvmx_sli_pkt_pcie_port_s cn63xxp1;
- struct cvmx_sli_pkt_pcie_port_s cn66xx;
- struct cvmx_sli_pkt_pcie_port_s cn68xx;
- struct cvmx_sli_pkt_pcie_port_s cn68xxp1;
- struct cvmx_sli_pkt_pcie_port_s cnf71xx;
-};
-
-union cvmx_sli_pkt_port_in_rst {
- uint64_t u64;
- struct cvmx_sli_pkt_port_in_rst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t in_rst:32;
- uint64_t out_rst:32;
-#else
- uint64_t out_rst:32;
- uint64_t in_rst:32;
-#endif
- } s;
- struct cvmx_sli_pkt_port_in_rst_s cn61xx;
- struct cvmx_sli_pkt_port_in_rst_s cn63xx;
- struct cvmx_sli_pkt_port_in_rst_s cn63xxp1;
- struct cvmx_sli_pkt_port_in_rst_s cn66xx;
- struct cvmx_sli_pkt_port_in_rst_s cn68xx;
- struct cvmx_sli_pkt_port_in_rst_s cn68xxp1;
- struct cvmx_sli_pkt_port_in_rst_s cnf71xx;
-};
-
-union cvmx_sli_pkt_slist_es {
- uint64_t u64;
- struct cvmx_sli_pkt_slist_es_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t es:64;
-#else
- uint64_t es:64;
-#endif
- } s;
- struct cvmx_sli_pkt_slist_es_s cn61xx;
- struct cvmx_sli_pkt_slist_es_s cn63xx;
- struct cvmx_sli_pkt_slist_es_s cn63xxp1;
- struct cvmx_sli_pkt_slist_es_s cn66xx;
- struct cvmx_sli_pkt_slist_es_s cn68xx;
- struct cvmx_sli_pkt_slist_es_s cn68xxp1;
- struct cvmx_sli_pkt_slist_es_s cnf71xx;
-};
-
-union cvmx_sli_pkt_slist_ns {
- uint64_t u64;
- struct cvmx_sli_pkt_slist_ns_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t nsr:32;
-#else
- uint64_t nsr:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_slist_ns_s cn61xx;
- struct cvmx_sli_pkt_slist_ns_s cn63xx;
- struct cvmx_sli_pkt_slist_ns_s cn63xxp1;
- struct cvmx_sli_pkt_slist_ns_s cn66xx;
- struct cvmx_sli_pkt_slist_ns_s cn68xx;
- struct cvmx_sli_pkt_slist_ns_s cn68xxp1;
- struct cvmx_sli_pkt_slist_ns_s cnf71xx;
-};
-
-union cvmx_sli_pkt_slist_ror {
- uint64_t u64;
- struct cvmx_sli_pkt_slist_ror_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t ror:32;
-#else
- uint64_t ror:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_slist_ror_s cn61xx;
- struct cvmx_sli_pkt_slist_ror_s cn63xx;
- struct cvmx_sli_pkt_slist_ror_s cn63xxp1;
- struct cvmx_sli_pkt_slist_ror_s cn66xx;
- struct cvmx_sli_pkt_slist_ror_s cn68xx;
- struct cvmx_sli_pkt_slist_ror_s cn68xxp1;
- struct cvmx_sli_pkt_slist_ror_s cnf71xx;
-};
-
-union cvmx_sli_pkt_time_int {
- uint64_t u64;
- struct cvmx_sli_pkt_time_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t port:32;
-#else
- uint64_t port:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_time_int_s cn61xx;
- struct cvmx_sli_pkt_time_int_s cn63xx;
- struct cvmx_sli_pkt_time_int_s cn63xxp1;
- struct cvmx_sli_pkt_time_int_s cn66xx;
- struct cvmx_sli_pkt_time_int_s cn68xx;
- struct cvmx_sli_pkt_time_int_s cn68xxp1;
- struct cvmx_sli_pkt_time_int_s cnf71xx;
-};
-
-union cvmx_sli_pkt_time_int_enb {
- uint64_t u64;
- struct cvmx_sli_pkt_time_int_enb_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t port:32;
-#else
- uint64_t port:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_time_int_enb_s cn61xx;
- struct cvmx_sli_pkt_time_int_enb_s cn63xx;
- struct cvmx_sli_pkt_time_int_enb_s cn63xxp1;
- struct cvmx_sli_pkt_time_int_enb_s cn66xx;
- struct cvmx_sli_pkt_time_int_enb_s cn68xx;
- struct cvmx_sli_pkt_time_int_enb_s cn68xxp1;
- struct cvmx_sli_pkt_time_int_enb_s cnf71xx;
-};
-
-union cvmx_sli_portx_pkind {
- uint64_t u64;
- struct cvmx_sli_portx_pkind_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_25_63:39;
- uint64_t rpk_enb:1;
- uint64_t reserved_22_23:2;
- uint64_t pkindr:6;
- uint64_t reserved_14_15:2;
- uint64_t bpkind:6;
- uint64_t reserved_6_7:2;
- uint64_t pkind:6;
-#else
- uint64_t pkind:6;
- uint64_t reserved_6_7:2;
- uint64_t bpkind:6;
- uint64_t reserved_14_15:2;
- uint64_t pkindr:6;
- uint64_t reserved_22_23:2;
- uint64_t rpk_enb:1;
- uint64_t reserved_25_63:39;
-#endif
- } s;
- struct cvmx_sli_portx_pkind_s cn68xx;
- struct cvmx_sli_portx_pkind_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t bpkind:6;
- uint64_t reserved_6_7:2;
- uint64_t pkind:6;
-#else
- uint64_t pkind:6;
- uint64_t reserved_6_7:2;
- uint64_t bpkind:6;
- uint64_t reserved_14_63:50;
-#endif
- } cn68xxp1;
};
union cvmx_sli_s2m_portx_ctl {
uint64_t u64;
struct cvmx_sli_s2m_portx_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t wind_d:1;
- uint64_t bar0_d:1;
- uint64_t mrrs:3;
-#else
- uint64_t mrrs:3;
- uint64_t bar0_d:1;
- uint64_t wind_d:1;
- uint64_t reserved_5_63:59;
-#endif
- } s;
- struct cvmx_sli_s2m_portx_ctl_s cn61xx;
- struct cvmx_sli_s2m_portx_ctl_s cn63xx;
- struct cvmx_sli_s2m_portx_ctl_s cn63xxp1;
- struct cvmx_sli_s2m_portx_ctl_s cn66xx;
- struct cvmx_sli_s2m_portx_ctl_s cn68xx;
- struct cvmx_sli_s2m_portx_ctl_s cn68xxp1;
- struct cvmx_sli_s2m_portx_ctl_s cnf71xx;
-};
-
-union cvmx_sli_scratch_1 {
- uint64_t u64;
- struct cvmx_sli_scratch_1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
- } s;
- struct cvmx_sli_scratch_1_s cn61xx;
- struct cvmx_sli_scratch_1_s cn63xx;
- struct cvmx_sli_scratch_1_s cn63xxp1;
- struct cvmx_sli_scratch_1_s cn66xx;
- struct cvmx_sli_scratch_1_s cn68xx;
- struct cvmx_sli_scratch_1_s cn68xxp1;
- struct cvmx_sli_scratch_1_s cnf71xx;
-};
-
-union cvmx_sli_scratch_2 {
- uint64_t u64;
- struct cvmx_sli_scratch_2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
- } s;
- struct cvmx_sli_scratch_2_s cn61xx;
- struct cvmx_sli_scratch_2_s cn63xx;
- struct cvmx_sli_scratch_2_s cn63xxp1;
- struct cvmx_sli_scratch_2_s cn66xx;
- struct cvmx_sli_scratch_2_s cn68xx;
- struct cvmx_sli_scratch_2_s cn68xxp1;
- struct cvmx_sli_scratch_2_s cnf71xx;
-};
-
-union cvmx_sli_state1 {
- uint64_t u64;
- struct cvmx_sli_state1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t cpl1:12;
- uint64_t cpl0:12;
- uint64_t arb:1;
- uint64_t csr:39;
-#else
- uint64_t csr:39;
- uint64_t arb:1;
- uint64_t cpl0:12;
- uint64_t cpl1:12;
-#endif
- } s;
- struct cvmx_sli_state1_s cn61xx;
- struct cvmx_sli_state1_s cn63xx;
- struct cvmx_sli_state1_s cn63xxp1;
- struct cvmx_sli_state1_s cn66xx;
- struct cvmx_sli_state1_s cn68xx;
- struct cvmx_sli_state1_s cn68xxp1;
- struct cvmx_sli_state1_s cnf71xx;
-};
-
-union cvmx_sli_state2 {
- uint64_t u64;
- struct cvmx_sli_state2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t nnp1:8;
- uint64_t reserved_47_47:1;
- uint64_t rac:1;
- uint64_t csm1:15;
- uint64_t csm0:15;
- uint64_t nnp0:8;
- uint64_t nnd:8;
-#else
- uint64_t nnd:8;
- uint64_t nnp0:8;
- uint64_t csm0:15;
- uint64_t csm1:15;
- uint64_t rac:1;
- uint64_t reserved_47_47:1;
- uint64_t nnp1:8;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_sli_state2_s cn61xx;
- struct cvmx_sli_state2_s cn63xx;
- struct cvmx_sli_state2_s cn63xxp1;
- struct cvmx_sli_state2_s cn66xx;
- struct cvmx_sli_state2_s cn68xx;
- struct cvmx_sli_state2_s cn68xxp1;
- struct cvmx_sli_state2_s cnf71xx;
-};
-
-union cvmx_sli_state3 {
- uint64_t u64;
- struct cvmx_sli_state3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t psm1:15;
- uint64_t psm0:15;
- uint64_t nsm1:13;
- uint64_t nsm0:13;
-#else
- uint64_t nsm0:13;
- uint64_t nsm1:13;
- uint64_t psm0:15;
- uint64_t psm1:15;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_sli_state3_s cn61xx;
- struct cvmx_sli_state3_s cn63xx;
- struct cvmx_sli_state3_s cn63xxp1;
- struct cvmx_sli_state3_s cn66xx;
- struct cvmx_sli_state3_s cn68xx;
- struct cvmx_sli_state3_s cn68xxp1;
- struct cvmx_sli_state3_s cnf71xx;
-};
-
-union cvmx_sli_tx_pipe {
- uint64_t u64;
- struct cvmx_sli_tx_pipe_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t nump:8;
- uint64_t reserved_7_15:9;
- uint64_t base:7;
-#else
- uint64_t base:7;
- uint64_t reserved_7_15:9;
- uint64_t nump:8;
- uint64_t reserved_24_63:40;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_5_63:59,
+ __BITFIELD_FIELD(uint64_t wind_d:1,
+ __BITFIELD_FIELD(uint64_t bar0_d:1,
+ __BITFIELD_FIELD(uint64_t mrrs:3,
+ ;))))
} s;
- struct cvmx_sli_tx_pipe_s cn68xx;
- struct cvmx_sli_tx_pipe_s cn68xxp1;
};
-union cvmx_sli_win_rd_addr {
- uint64_t u64;
- struct cvmx_sli_win_rd_addr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_51_63:13;
- uint64_t ld_cmd:2;
- uint64_t iobit:1;
- uint64_t rd_addr:48;
-#else
- uint64_t rd_addr:48;
- uint64_t iobit:1;
- uint64_t ld_cmd:2;
- uint64_t reserved_51_63:13;
-#endif
- } s;
- struct cvmx_sli_win_rd_addr_s cn61xx;
- struct cvmx_sli_win_rd_addr_s cn63xx;
- struct cvmx_sli_win_rd_addr_s cn63xxp1;
- struct cvmx_sli_win_rd_addr_s cn66xx;
- struct cvmx_sli_win_rd_addr_s cn68xx;
- struct cvmx_sli_win_rd_addr_s cn68xxp1;
- struct cvmx_sli_win_rd_addr_s cnf71xx;
-};
-
-union cvmx_sli_win_rd_data {
- uint64_t u64;
- struct cvmx_sli_win_rd_data_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rd_data:64;
-#else
- uint64_t rd_data:64;
-#endif
- } s;
- struct cvmx_sli_win_rd_data_s cn61xx;
- struct cvmx_sli_win_rd_data_s cn63xx;
- struct cvmx_sli_win_rd_data_s cn63xxp1;
- struct cvmx_sli_win_rd_data_s cn66xx;
- struct cvmx_sli_win_rd_data_s cn68xx;
- struct cvmx_sli_win_rd_data_s cn68xxp1;
- struct cvmx_sli_win_rd_data_s cnf71xx;
-};
-
-union cvmx_sli_win_wr_addr {
- uint64_t u64;
- struct cvmx_sli_win_wr_addr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t iobit:1;
- uint64_t wr_addr:45;
- uint64_t reserved_0_2:3;
-#else
- uint64_t reserved_0_2:3;
- uint64_t wr_addr:45;
- uint64_t iobit:1;
- uint64_t reserved_49_63:15;
-#endif
- } s;
- struct cvmx_sli_win_wr_addr_s cn61xx;
- struct cvmx_sli_win_wr_addr_s cn63xx;
- struct cvmx_sli_win_wr_addr_s cn63xxp1;
- struct cvmx_sli_win_wr_addr_s cn66xx;
- struct cvmx_sli_win_wr_addr_s cn68xx;
- struct cvmx_sli_win_wr_addr_s cn68xxp1;
- struct cvmx_sli_win_wr_addr_s cnf71xx;
-};
-
-union cvmx_sli_win_wr_data {
- uint64_t u64;
- struct cvmx_sli_win_wr_data_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t wr_data:64;
-#else
- uint64_t wr_data:64;
-#endif
- } s;
- struct cvmx_sli_win_wr_data_s cn61xx;
- struct cvmx_sli_win_wr_data_s cn63xx;
- struct cvmx_sli_win_wr_data_s cn63xxp1;
- struct cvmx_sli_win_wr_data_s cn66xx;
- struct cvmx_sli_win_wr_data_s cn68xx;
- struct cvmx_sli_win_wr_data_s cn68xxp1;
- struct cvmx_sli_win_wr_data_s cnf71xx;
-};
-
-union cvmx_sli_win_wr_mask {
- uint64_t u64;
- struct cvmx_sli_win_wr_mask_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t wr_mask:8;
-#else
- uint64_t wr_mask:8;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_sli_win_wr_mask_s cn61xx;
- struct cvmx_sli_win_wr_mask_s cn63xx;
- struct cvmx_sli_win_wr_mask_s cn63xxp1;
- struct cvmx_sli_win_wr_mask_s cn66xx;
- struct cvmx_sli_win_wr_mask_s cn68xx;
- struct cvmx_sli_win_wr_mask_s cn68xxp1;
- struct cvmx_sli_win_wr_mask_s cnf71xx;
-};
-
-union cvmx_sli_window_ctl {
+union cvmx_sli_mem_access_subidx {
uint64_t u64;
- struct cvmx_sli_window_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t time:32;
-#else
- uint64_t time:32;
- uint64_t reserved_32_63:32;
-#endif
+ struct cvmx_sli_mem_access_subidx_s {
+ __BITFIELD_FIELD(uint64_t reserved_43_63:21,
+ __BITFIELD_FIELD(uint64_t zero:1,
+ __BITFIELD_FIELD(uint64_t port:3,
+ __BITFIELD_FIELD(uint64_t nmerge:1,
+ __BITFIELD_FIELD(uint64_t esr:2,
+ __BITFIELD_FIELD(uint64_t esw:2,
+ __BITFIELD_FIELD(uint64_t wtype:2,
+ __BITFIELD_FIELD(uint64_t rtype:2,
+ __BITFIELD_FIELD(uint64_t ba:30,
+ ;)))))))))
} s;
- struct cvmx_sli_window_ctl_s cn61xx;
- struct cvmx_sli_window_ctl_s cn63xx;
- struct cvmx_sli_window_ctl_s cn63xxp1;
- struct cvmx_sli_window_ctl_s cn66xx;
- struct cvmx_sli_window_ctl_s cn68xx;
- struct cvmx_sli_window_ctl_s cn68xxp1;
- struct cvmx_sli_window_ctl_s cnf71xx;
+ struct cvmx_sli_mem_access_subidx_cn68xx {
+ __BITFIELD_FIELD(uint64_t reserved_43_63:21,
+ __BITFIELD_FIELD(uint64_t zero:1,
+ __BITFIELD_FIELD(uint64_t port:3,
+ __BITFIELD_FIELD(uint64_t nmerge:1,
+ __BITFIELD_FIELD(uint64_t esr:2,
+ __BITFIELD_FIELD(uint64_t esw:2,
+ __BITFIELD_FIELD(uint64_t wtype:2,
+ __BITFIELD_FIELD(uint64_t rtype:2,
+ __BITFIELD_FIELD(uint64_t ba:28,
+ __BITFIELD_FIELD(uint64_t reserved_0_1:2,
+ ;))))))))))
+ } cn68xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 2530e8731c8a..9742202f2a32 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -62,7 +62,6 @@ enum cvmx_mips_space {
#include <asm/octeon/cvmx-iob-defs.h>
#include <asm/octeon/cvmx-ipd-defs.h>
#include <asm/octeon/cvmx-l2c-defs.h>
-#include <asm/octeon/cvmx-l2d-defs.h>
#include <asm/octeon/cvmx-l2t-defs.h>
#include <asm/octeon/cvmx-led-defs.h>
#include <asm/octeon/cvmx-mio-defs.h>
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index a8705f6c8180..a1bdb1ea5234 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -110,6 +110,32 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#endif
+#ifndef __PAGETABLE_PUD_FOLDED
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pud_t *pud;
+
+ pud = (pud_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PUD_ORDER);
+ if (pud)
+ pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
+ return pud;
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ free_pages((unsigned long)pud, PUD_ORDER);
+}
+
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+ set_pgd(pgd, __pgd((unsigned long)pud));
+}
+
+#define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x)
+
+#endif /* __PAGETABLE_PUD_FOLDED */
+
#define check_pgt_cache() do { } while (0)
extern void pagetable_init(void);
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 130a2a6c1531..67fe6dc5211c 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -20,7 +20,7 @@
#define __ARCH_USE_5LEVEL_HACK
#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
#include <asm-generic/pgtable-nopmd.h>
-#else
+#elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48))
#include <asm-generic/pgtable-nopud.h>
#endif
@@ -54,9 +54,18 @@
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
+# ifdef __PAGETABLE_PUD_FOLDED
+# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+# endif
+#endif
-#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#ifndef __PAGETABLE_PUD_FOLDED
+#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
#endif
+
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -79,8 +88,13 @@
* of virtual address space.
*/
#ifdef CONFIG_PAGE_SIZE_4KB
-#define PGD_ORDER 1
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
+# ifdef CONFIG_MIPS_VA_BITS_48
+# define PGD_ORDER 0
+# define PUD_ORDER 0
+# else
+# define PGD_ORDER 1
+# define PUD_ORDER aieeee_attempt_to_allocate_pud
+# endif
#define PMD_ORDER 0
#define PTE_ORDER 0
#endif
@@ -118,6 +132,9 @@
#endif
#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#ifndef __PAGETABLE_PUD_FOLDED
+#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t))
+#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
#endif
@@ -134,7 +151,7 @@
#define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
#define VMALLOC_END \
(MAP_BASE + \
- min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
+ min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
(1UL << cpu_vmbits)) - (1UL << 32))
#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
@@ -150,12 +167,72 @@
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
#endif
+#ifndef __PAGETABLE_PUD_FOLDED
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+#endif
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
extern pte_t invalid_pte_table[PTRS_PER_PTE];
extern pte_t empty_bad_page_table[PTRS_PER_PTE];
+#ifndef __PAGETABLE_PUD_FOLDED
+/*
+ * For 4-level pagetables we defines these ourselves, for 3-level the
+ * definitions are below, for 2-level the
+ * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
+ */
+typedef struct { unsigned long pud; } pud_t;
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+
+extern pud_t invalid_pud_table[PTRS_PER_PUD];
+
+/*
+ * Empty pgd entries point to the invalid_pud_table.
+ */
+static inline int pgd_none(pgd_t pgd)
+{
+ return pgd_val(pgd) == (unsigned long)invalid_pud_table;
+}
+
+static inline int pgd_bad(pgd_t pgd)
+{
+ if (unlikely(pgd_val(pgd) & ~PAGE_MASK))
+ return 1;
+
+ return 0;
+}
+
+static inline int pgd_present(pgd_t pgd)
+{
+ return pgd_val(pgd) != (unsigned long)invalid_pud_table;
+}
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+ pgd_val(*pgdp) = (unsigned long)invalid_pud_table;
+}
+
+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+
+static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+{
+ return pgd_val(pgd);
+}
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+{
+ return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
+}
+
+static inline void set_pgd(pgd_t *pgd, pgd_t pgdval)
+{
+ *pgd = pgdval;
+}
+
+#endif
#ifndef __PAGETABLE_PMD_FOLDED
/*
@@ -281,6 +358,7 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
* Initialize a new pgd / pmd table with invalid pointers.
*/
extern void pgd_init(unsigned long page);
+extern void pud_init(unsigned long page, unsigned long pagetable);
extern void pmd_init(unsigned long page, unsigned long pagetable);
/*
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index e9a9e2ade1d2..3748f4d120a5 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -21,77 +21,46 @@
#define UASM_EXPORT_SYMBOL(sym)
#endif
-#define _UASM_ISA_CLASSIC 0
-#define _UASM_ISA_MICROMIPS 1
-
-#ifndef UASM_ISA
-#ifdef CONFIG_CPU_MICROMIPS
-#define UASM_ISA _UASM_ISA_MICROMIPS
-#else
-#define UASM_ISA _UASM_ISA_CLASSIC
-#endif
-#endif
-
-#if (UASM_ISA == _UASM_ISA_CLASSIC)
-#ifdef CONFIG_CPU_MICROMIPS
-#define ISAOPC(op) CL_uasm_i##op
-#define ISAFUNC(x) CL_##x
-#else
-#define ISAOPC(op) uasm_i##op
-#define ISAFUNC(x) x
-#endif
-#elif (UASM_ISA == _UASM_ISA_MICROMIPS)
-#ifdef CONFIG_CPU_MICROMIPS
-#define ISAOPC(op) uasm_i##op
-#define ISAFUNC(x) x
-#else
-#define ISAOPC(op) MM_uasm_i##op
-#define ISAFUNC(x) MM_##x
-#endif
-#else
-#error Unsupported micro-assembler ISA!!!
-#endif
-
#define Ip_u1u2u3(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u2u1u3(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u2u1(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u1u2(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u1u2s3(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2s3u1(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
+void uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
#define Ip_s3s1s2(op) \
-void ISAOPC(op)(u32 **buf, int a, int b, int c)
+void uasm_i##op(u32 **buf, int a, int b, int c)
#define Ip_u2u1s3(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2u1msbu3(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
unsigned int d)
#define Ip_u1u2(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u2u1(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u1s2(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
+void uasm_i##op(u32 **buf, unsigned int a, signed int b)
-#define Ip_u1(op) void ISAOPC(op)(u32 **buf, unsigned int a)
+#define Ip_u1(op) void uasm_i##op(u32 **buf, unsigned int a)
-#define Ip_0(op) void ISAOPC(op)(u32 **buf)
+#define Ip_0(op) void uasm_i##op(u32 **buf)
Ip_u2u1s3(_addiu);
Ip_u3u1u2(_addu);
@@ -138,6 +107,7 @@ Ip_u2s3u1(_lb);
Ip_u2s3u1(_ld);
Ip_u3u1u2(_ldx);
Ip_u2s3u1(_lh);
+Ip_u2s3u1(_lhu);
Ip_u2s3u1(_ll);
Ip_u2s3u1(_lld);
Ip_u1s2(_lui);
@@ -190,20 +160,20 @@ struct uasm_label {
int lab;
};
-void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
+void uasm_build_label(struct uasm_label **lab, u32 *addr,
int lid);
#ifdef CONFIG_64BIT
-int ISAFUNC(uasm_in_compat_space_p)(long addr);
+int uasm_in_compat_space_p(long addr);
#endif
-int ISAFUNC(uasm_rel_hi)(long val);
-int ISAFUNC(uasm_rel_lo)(long val);
-void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
-void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
+int uasm_rel_hi(long val);
+int uasm_rel_lo(long val);
+void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
+void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
#define UASM_L_LA(lb) \
-static inline void ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
+static inline void uasm_l##lb(struct uasm_label **lab, u32 *addr) \
{ \
- ISAFUNC(uasm_build_label)(lab, addr, label##lb); \
+ uasm_build_label(lab, addr, label##lb); \
}
/* convenience macros for instructions */
@@ -255,27 +225,27 @@ static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
- ISAOPC(_drotr)(p, a1, a2, a3);
+ uasm_i_drotr(p, a1, a2, a3);
else
- ISAOPC(_drotr32)(p, a1, a2, a3 - 32);
+ uasm_i_drotr32(p, a1, a2, a3 - 32);
}
static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
- ISAOPC(_dsll)(p, a1, a2, a3);
+ uasm_i_dsll(p, a1, a2, a3);
else
- ISAOPC(_dsll32)(p, a1, a2, a3 - 32);
+ uasm_i_dsll32(p, a1, a2, a3 - 32);
}
static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
- ISAOPC(_dsrl)(p, a1, a2, a3);
+ uasm_i_dsrl(p, a1, a2, a3);
else
- ISAOPC(_dsrl32)(p, a1, a2, a3 - 32);
+ uasm_i_dsrl32(p, a1, a2, a3 - 32);
}
/* Handle relocations. */
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index f2cf41461146..a0266feba9e6 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -2,40 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += ipcbuf.h
-
-header-y += auxvec.h
-header-y += bitfield.h
-header-y += bitsperlong.h
-header-y += break.h
-header-y += byteorder.h
-header-y += cachectl.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += inst.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += sgidefs.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += sysmips.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 3382892544f0..1aba27786bd5 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -34,6 +34,7 @@
/* Hardware capabilities */
unsigned int elf_hwcap __read_mostly;
+EXPORT_SYMBOL_GPL(elf_hwcap);
/*
* Get the FPU Implementation/Revision.
@@ -1955,6 +1956,12 @@ void cpu_probe(void)
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int cpu = smp_processor_id();
+ /*
+ * Set a default elf platform, cpu probe may later
+ * overwrite it with a more precise value
+ */
+ set_elf_platform(cpu, "mips");
+
c->processor_id = PRID_IMP_UNKNOWN;
c->fpu_id = FPIR_IMP_NONE;
c->cputype = CPU_UNKNOWN;
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 550e7d03090a..ae64c8f56a8c 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -1096,10 +1096,20 @@ repeat:
}
break;
- case beql_op:
- case bnel_op:
case blezl_op:
case bgtzl_op:
+ /*
+ * For BLEZL and BGTZL, rt field must be set to 0. If this
+ * is not the case, this may be an encoding of a MIPS R6
+ * instruction, so return to CPU execution if this occurs
+ */
+ if (MIPSInst_RT(inst)) {
+ err = SIGILL;
+ break;
+ }
+ /* fall through */
+ case beql_op:
+ case bnel_op:
if (delay_slot(regs)) {
err = SIGILL;
break;
@@ -2329,6 +2339,8 @@ static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
__this_cpu_write((mipsr2bremustats).bgezl, 0);
__this_cpu_write((mipsr2bremustats).bltzll, 0);
__this_cpu_write((mipsr2bremustats).bgezll, 0);
+ __this_cpu_write((mipsr2bremustats).bltzall, 0);
+ __this_cpu_write((mipsr2bremustats).bgezall, 0);
__this_cpu_write((mipsr2bremustats).bltzal, 0);
__this_cpu_write((mipsr2bremustats).bgezal, 0);
__this_cpu_write((mipsr2bremustats).beql, 0);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index b68e10fc453d..918d4c73e951 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -114,8 +114,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
/*
* Copy architecture-specific thread state
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+ unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
{
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
@@ -176,7 +176,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
if (clone_flags & CLONE_SETTLS)
- ti->tp_value = regs->regs[7];
+ ti->tp_value = tls;
return 0;
}
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 758577861523..7b386d54fd65 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -25,12 +25,6 @@
/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
#undef fp
-/*
- * Offset to the current process status flags, the first 32 bytes of the
- * stack are not used.
- */
-#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
-
#ifndef USE_ALTERNATE_RESUME_IMPL
/*
* task_struct *resume(task_struct *prev, task_struct *next,
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 795b4aaf8927..36954ddd0b9f 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -8,6 +8,7 @@
* option) any later version.
*/
+#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/irqchip/mips-gic.h>
@@ -408,7 +409,6 @@ static int cps_cpu_disable(void)
return 0;
}
-static DECLARE_COMPLETION(cpu_death_chosen);
static unsigned cpu_death_sibling;
static enum {
CPU_DEATH_HALT,
@@ -443,7 +443,7 @@ void play_dead(void)
}
/* This CPU has chosen its way out */
- complete(&cpu_death_chosen);
+ (void)cpu_report_death();
if (cpu_death == CPU_DEATH_HALT) {
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
@@ -492,8 +492,7 @@ static void cps_cpu_die(unsigned int cpu)
int err;
/* Wait for the cpu to choose its way out */
- if (!wait_for_completion_timeout(&cpu_death_chosen,
- msecs_to_jiffies(5000))) {
+ if (!cpu_wait_death(cpu, 5)) {
pr_err("CPU%u: didn't offline\n", cpu);
return;
}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index e398cbc3d776..ed6b4df583ea 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -83,6 +83,8 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
if (tc != 0)
smvp_copy_vpe_config();
+ cpu_data[ncpu].vpe_id = tc;
+
return ncpu;
}
@@ -114,49 +116,6 @@ static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
write_tc_c0_tchalt(TCHALT_H);
}
-static void vsmp_send_ipi_single(int cpu, unsigned int action)
-{
- int i;
- unsigned long flags;
- int vpflags;
-
-#ifdef CONFIG_MIPS_GIC
- if (gic_present) {
- mips_smp_send_ipi_single(cpu, action);
- return;
- }
-#endif
- local_irq_save(flags);
-
- vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */
-
- switch (action) {
- case SMP_CALL_FUNCTION:
- i = C_SW1;
- break;
-
- case SMP_RESCHEDULE_YOURSELF:
- default:
- i = C_SW0;
- break;
- }
-
- /* 1:1 mapping of vpe and tc... */
- settc(cpu);
- write_vpe_c0_cause(read_vpe_c0_cause() | i);
- evpe(vpflags);
-
- local_irq_restore(flags);
-}
-
-static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
-{
- unsigned int i;
-
- for_each_cpu(i, mask)
- vsmp_send_ipi_single(i, action);
-}
-
static void vsmp_init_secondary(void)
{
#ifdef CONFIG_MIPS_GIC
@@ -281,8 +240,8 @@ static void __init vsmp_prepare_cpus(unsigned int max_cpus)
}
struct plat_smp_ops vsmp_smp_ops = {
- .send_ipi_single = vsmp_send_ipi_single,
- .send_ipi_mask = vsmp_send_ipi_mask,
+ .send_ipi_single = mips_smp_send_ipi_single,
+ .send_ipi_mask = mips_smp_send_ipi_mask,
.init_secondary = vsmp_init_secondary,
.smp_finish = vsmp_smp_finish,
.boot_secondary = vsmp_boot_secondary,
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 6e71130549ea..aba1afb64b62 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -261,16 +261,20 @@ int mips_smp_ipi_allocate(const struct cpumask *mask)
ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
/*
- * There are systems which only use IPI domains some of the time,
- * depending upon configuration we don't know until runtime. An
- * example is Malta where we may compile in support for GIC & the
- * MT ASE, but run on a system which has multiple VPEs in a single
- * core and doesn't include a GIC. Until all IPI implementations
- * have been converted to use IPI domains the best we can do here
- * is to return & hope some other code sets up the IPIs.
+ * There are systems which use IPI IRQ domains, but only have one
+ * registered when some runtime condition is met. For example a Malta
+ * kernel may include support for GIC & CPU interrupt controller IPI
+ * IRQ domains, but if run on a system with no GIC & no MT ASE then
+ * neither will be supported or registered.
+ *
+ * We only have a problem if we're actually using multiple CPUs so fail
+ * loudly if that is the case. Otherwise simply return, skipping IPI
+ * setup, if we're running with only a single CPU.
*/
- if (!ipidomain)
+ if (!ipidomain) {
+ BUG_ON(num_present_cpus() > 1);
return 0;
+ }
virq = irq_reserve_ipi(ipidomain, mask);
BUG_ON(!virq);
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 0ddf3698b85d..33728b7af426 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -274,47 +274,6 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
}
-#ifdef CONFIG_MIPS_MT_SMP
-void __init arch_init_ipiirq(int irq, struct irqaction *action)
-{
- setup_irq(irq, action);
- irq_set_handler(irq, handle_percpu_irq);
-}
-
-static void ltq_sw0_irqdispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
-}
-
-static void ltq_sw1_irqdispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
-}
-static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
-{
- scheduler_ipi();
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
-{
- generic_smp_call_function_interrupt();
- return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
- .handler = ipi_resched_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI_resched"
-};
-
-static struct irqaction irq_call = {
- .handler = ipi_call_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI_call"
-};
-#endif
-
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
@@ -402,17 +361,6 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
&irq_domain_ops, 0);
-#if defined(CONFIG_MIPS_MT_SMP)
- if (cpu_has_vint) {
- pr_info("Setting up IPI vectored interrupts\n");
- set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch);
- set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch);
- }
- arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
- &irq_resched);
- arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
-#endif
-
#ifndef CONFIG_MIPS_MT_SMP
set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index a298ac93edcc..f12fde10c8ad 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -439,6 +439,8 @@ int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
unsigned int fcr31;
unsigned int bit = 0;
+ unsigned int bit0;
+ union fpureg *fpr;
switch (insn.i_format.opcode) {
case spec_op:
@@ -706,14 +708,14 @@ int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
((insn.i_format.rs == bc1eqz_op) ||
(insn.i_format.rs == bc1nez_op))) {
bit = 0;
+ fpr = &current->thread.fpu.fpr[insn.i_format.rt];
+ bit0 = get_fpr32(fpr, 0) & 0x1;
switch (insn.i_format.rs) {
case bc1eqz_op:
- if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)
- bit = 1;
+ bit = bit0 == 0;
break;
case bc1nez_op:
- if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1))
- bit = 1;
+ bit = bit0 != 0;
break;
}
if (bit)
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 3bef306cdfdb..4f8f5bf46977 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -267,19 +267,19 @@ do_sigbus:
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
- else
+
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
#if 0
- printk("do_page_fault() #3: sending SIGBUS to %s for "
- "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
- tsk->comm,
- write ? "write access to" : "read access from",
- field, address,
- field, (unsigned long) regs->cp0_epc,
- field, (unsigned long) regs->regs[31]);
+ printk("do_page_fault() #3: sending SIGBUS to %s for "
+ "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
+ tsk->comm,
+ write ? "write access to" : "read access from",
+ field, address,
+ field, (unsigned long) regs->cp0_epc,
+ field, (unsigned long) regs->regs[31]);
#endif
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
tsk->thread.cp0_badvaddr = address;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 3ca20283b31e..8ce2983a7015 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -537,6 +537,9 @@ unsigned long pgd_current[NR_CPUS];
* it in the linker script.
*/
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
+#ifndef __PAGETABLE_PUD_FOLDED
+pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
EXPORT_SYMBOL_GPL(invalid_pmd_table);
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 0ae7b28b4db5..6fd6e96fdebb 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -19,10 +19,12 @@ void pgd_init(unsigned long page)
unsigned long *p, *end;
unsigned long entry;
-#ifdef __PAGETABLE_PMD_FOLDED
- entry = (unsigned long)invalid_pte_table;
-#else
+#if !defined(__PAGETABLE_PUD_FOLDED)
+ entry = (unsigned long)invalid_pud_table;
+#elif !defined(__PAGETABLE_PMD_FOLDED)
entry = (unsigned long)invalid_pmd_table;
+#else
+ entry = (unsigned long)invalid_pte_table;
#endif
p = (unsigned long *) page;
@@ -64,6 +66,28 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
EXPORT_SYMBOL_GPL(pmd_init);
#endif
+#ifndef __PAGETABLE_PUD_FOLDED
+void pud_init(unsigned long addr, unsigned long pagetable)
+{
+ unsigned long *p, *end;
+
+ p = (unsigned long *)addr;
+ end = p + PTRS_PER_PUD;
+
+ do {
+ p[0] = pagetable;
+ p[1] = pagetable;
+ p[2] = pagetable;
+ p[3] = pagetable;
+ p[4] = pagetable;
+ p += 8;
+ p[-3] = pagetable;
+ p[-2] = pagetable;
+ p[-1] = pagetable;
+ } while (p != end);
+}
+#endif
+
pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
pmd_t pmd;
@@ -87,6 +111,9 @@ void __init pagetable_init(void)
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
+#ifndef __PAGETABLE_PUD_FOLDED
+ pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table);
+#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
#endif
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 4f642e07c2b1..ed1c5297547a 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -865,6 +865,13 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
+#ifndef __PAGETABLE_PUD_FOLDED
+ uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+ uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */
+ uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */
+ uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3);
+ uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */
+#endif
#ifndef __PAGETABLE_PMD_FOLDED
uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
@@ -1184,6 +1191,21 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
}
+#ifndef __PAGETABLE_PUD_FOLDED
+ /* get pud offset in bytes */
+ uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3);
+ uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3);
+
+ if (use_lwx_insns()) {
+ UASM_i_LWX(p, ptr, scratch, ptr);
+ } else {
+ uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
+ UASM_i_LW(p, ptr, 0, ptr);
+ }
+ /* ptr contains a pointer to PMD entry */
+ /* tmp contains the address */
+#endif
+
#ifndef __PAGETABLE_PMD_FOLDED
/* get pmd offset in bytes */
uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 763d3f1edb8a..2277499fe6ae 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -103,6 +103,7 @@ static struct insn insn_table[] = {
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
{ insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lhu, M(lhu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
#ifndef CONFIG_CPU_MIPSR6
{ insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index a82970442b8a..730363b59bac 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -61,7 +61,7 @@ enum opcode {
insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra, insn_srl,
insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp,
insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor,
- insn_xori, insn_yield, insn_lddir, insn_ldpte,
+ insn_xori, insn_yield, insn_lddir, insn_ldpte, insn_lhu,
};
struct insn {
@@ -297,6 +297,7 @@ I_u1(_jr)
I_u2s3u1(_lb)
I_u2s3u1(_ld)
I_u2s3u1(_lh)
+I_u2s3u1(_lhu)
I_u2s3u1(_ll)
I_u2s3u1(_lld)
I_u1s2(_lui)
@@ -349,7 +350,7 @@ I_u2u1u3(_lddir)
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#include <asm/octeon/octeon.h>
-void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
+void uasm_i_pref(u32 **buf, unsigned int a, signed int b,
unsigned int c)
{
if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5)
@@ -361,26 +362,26 @@ void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
else
build_insn(buf, insn_pref, c, a, b);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
+UASM_EXPORT_SYMBOL(uasm_i_pref);
#else
I_u2s3u1(_pref)
#endif
/* Handle labels. */
-void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
+void uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
{
(*lab)->addr = addr;
(*lab)->lab = lid;
(*lab)++;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
+UASM_EXPORT_SYMBOL(uasm_build_label);
-int ISAFUNC(uasm_in_compat_space_p)(long addr)
+int uasm_in_compat_space_p(long addr)
{
/* Is this address in 32bit compat space? */
return addr == (int)addr;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
+UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
static int uasm_rel_highest(long val)
{
@@ -400,64 +401,64 @@ static int uasm_rel_higher(long val)
#endif
}
-int ISAFUNC(uasm_rel_hi)(long val)
+int uasm_rel_hi(long val)
{
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
+UASM_EXPORT_SYMBOL(uasm_rel_hi);
-int ISAFUNC(uasm_rel_lo)(long val)
+int uasm_rel_lo(long val)
{
return ((val & 0xffff) ^ 0x8000) - 0x8000;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
+UASM_EXPORT_SYMBOL(uasm_rel_lo);
-void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
+void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
{
- if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
- ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
+ if (!uasm_in_compat_space_p(addr)) {
+ uasm_i_lui(buf, rs, uasm_rel_highest(addr));
if (uasm_rel_higher(addr))
- ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
- if (ISAFUNC(uasm_rel_hi(addr))) {
- ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
- ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
- ISAFUNC(uasm_rel_hi)(addr));
- ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
+ uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
+ if (uasm_rel_hi(addr)) {
+ uasm_i_dsll(buf, rs, rs, 16);
+ uasm_i_daddiu(buf, rs, rs,
+ uasm_rel_hi(addr));
+ uasm_i_dsll(buf, rs, rs, 16);
} else
- ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
+ uasm_i_dsll32(buf, rs, rs, 0);
} else
- ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
+ uasm_i_lui(buf, rs, uasm_rel_hi(addr));
}
-UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
+UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
-void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
+void UASM_i_LA(u32 **buf, unsigned int rs, long addr)
{
- ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
- if (ISAFUNC(uasm_rel_lo(addr))) {
- if (!ISAFUNC(uasm_in_compat_space_p)(addr))
- ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
- ISAFUNC(uasm_rel_lo(addr)));
+ UASM_i_LA_mostly(buf, rs, addr);
+ if (uasm_rel_lo(addr)) {
+ if (!uasm_in_compat_space_p(addr))
+ uasm_i_daddiu(buf, rs, rs,
+ uasm_rel_lo(addr));
else
- ISAFUNC(uasm_i_addiu)(buf, rs, rs,
- ISAFUNC(uasm_rel_lo(addr)));
+ uasm_i_addiu(buf, rs, rs,
+ uasm_rel_lo(addr));
}
}
-UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
+UASM_EXPORT_SYMBOL(UASM_i_LA);
/* Handle relocations. */
-void ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
+void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
{
(*rel)->addr = addr;
(*rel)->type = R_MIPS_PC16;
(*rel)->lab = lid;
(*rel)++;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
+UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
static inline void __resolve_relocs(struct uasm_reloc *rel,
struct uasm_label *lab);
-void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
+void uasm_resolve_relocs(struct uasm_reloc *rel,
struct uasm_label *lab)
{
struct uasm_label *l;
@@ -467,39 +468,39 @@ void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
if (rel->lab == l->lab)
__resolve_relocs(rel, l);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
+UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
-void ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end,
+void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end,
long off)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++)
if (rel->addr >= first && rel->addr < end)
rel->addr += off;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
+UASM_EXPORT_SYMBOL(uasm_move_relocs);
-void ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end,
+void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end,
long off)
{
for (; lab->lab != UASM_LABEL_INVALID; lab++)
if (lab->addr >= first && lab->addr < end)
lab->addr += off;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
+UASM_EXPORT_SYMBOL(uasm_move_labels);
-void ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab,
+void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
u32 *first, u32 *end, u32 *target)
{
long off = (long)(target - first);
memcpy(target, first, (end - first) * sizeof(u32));
- ISAFUNC(uasm_move_relocs(rel, first, end, off));
- ISAFUNC(uasm_move_labels(lab, first, end, off));
+ uasm_move_relocs(rel, first, end, off);
+ uasm_move_labels(lab, first, end, off);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
+UASM_EXPORT_SYMBOL(uasm_copy_handler);
-int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
+int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr
@@ -510,92 +511,92 @@ int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
return 0;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
+UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
/* Convenience functions for labeled branches. */
-void ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bltz)(p, reg, 0);
+ uasm_i_bltz(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
+UASM_EXPORT_SYMBOL(uasm_il_bltz);
-void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
+void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_b)(p, 0);
+ uasm_i_b(p, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
+UASM_EXPORT_SYMBOL(uasm_il_b);
-void ISAFUNC(uasm_il_beq)(u32 **p, struct uasm_reloc **r, unsigned int r1,
+void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1,
unsigned int r2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_beq)(p, r1, r2, 0);
+ uasm_i_beq(p, r1, r2, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beq));
+UASM_EXPORT_SYMBOL(uasm_il_beq);
-void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_beqz)(p, reg, 0);
+ uasm_i_beqz(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
+UASM_EXPORT_SYMBOL(uasm_il_beqz);
-void ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_beqzl)(p, reg, 0);
+ uasm_i_beqzl(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
+UASM_EXPORT_SYMBOL(uasm_il_beqzl);
-void ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
unsigned int reg2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
+ uasm_i_bne(p, reg1, reg2, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
+UASM_EXPORT_SYMBOL(uasm_il_bne);
-void ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bnez)(p, reg, 0);
+ uasm_i_bnez(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
+UASM_EXPORT_SYMBOL(uasm_il_bnez);
-void ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bgezl)(p, reg, 0);
+ uasm_i_bgezl(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
+UASM_EXPORT_SYMBOL(uasm_il_bgezl);
-void ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bgez)(p, reg, 0);
+ uasm_i_bgez(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
+UASM_EXPORT_SYMBOL(uasm_il_bgez);
-void ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
+ uasm_i_bbit0(p, reg, bit, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
+UASM_EXPORT_SYMBOL(uasm_il_bbit0);
-void ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
+ uasm_i_bbit1(p, reg, bit, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));
+UASM_EXPORT_SYMBOL(uasm_il_bbit1);
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 54f56d5a96c4..b0f9b188e833 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -145,56 +145,6 @@ static irqreturn_t corehi_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_MIPS_MT_SMP
-
-#define MIPS_CPU_IPI_RESCHED_IRQ 0 /* SW int 0 for resched */
-#define C_RESCHED C_SW0
-#define MIPS_CPU_IPI_CALL_IRQ 1 /* SW int 1 for resched */
-#define C_CALL C_SW1
-static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
-
-static void ipi_resched_dispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
-}
-
-static void ipi_call_dispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
-}
-
-static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
-{
-#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
- if (aprp_hook)
- aprp_hook();
-#endif
-
- scheduler_ipi();
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
-{
- generic_smp_call_function_interrupt();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
- .handler = ipi_resched_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI_resched"
-};
-
-static struct irqaction irq_call = {
- .handler = ipi_call_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI_call"
-};
-#endif /* CONFIG_MIPS_MT_SMP */
-
static struct irqaction corehi_irqaction = {
.handler = corehi_handler,
.name = "CoreHi",
@@ -222,12 +172,6 @@ static msc_irqmap_t msc_eicirqmap[] __initdata = {
static int msc_nr_eicirqs __initdata = ARRAY_SIZE(msc_eicirqmap);
-void __init arch_init_ipiirq(int irq, struct irqaction *action)
-{
- setup_irq(irq, action);
- irq_set_handler(irq, handle_percpu_irq);
-}
-
void __init arch_init_irq(void)
{
int corehi_irq;
@@ -273,30 +217,11 @@ void __init arch_init_irq(void)
if (gic_present) {
corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
+ } else if (cpu_has_veic) {
+ set_vi_handler(MSC01E_INT_COREHI, corehi_irqdispatch);
+ corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
} else {
-#if defined(CONFIG_MIPS_MT_SMP)
- /* set up ipi interrupts */
- if (cpu_has_veic) {
- set_vi_handler (MSC01E_INT_SW0, ipi_resched_dispatch);
- set_vi_handler (MSC01E_INT_SW1, ipi_call_dispatch);
- cpu_ipi_resched_irq = MSC01E_INT_SW0;
- cpu_ipi_call_irq = MSC01E_INT_SW1;
- } else {
- cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE +
- MIPS_CPU_IPI_RESCHED_IRQ;
- cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE +
- MIPS_CPU_IPI_CALL_IRQ;
- }
- arch_init_ipiirq(cpu_ipi_resched_irq, &irq_resched);
- arch_init_ipiirq(cpu_ipi_call_irq, &irq_call);
-#endif
- if (cpu_has_veic) {
- set_vi_handler(MSC01E_INT_COREHI,
- corehi_irqdispatch);
- corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
- } else {
- corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
- }
+ corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
}
setup_irq(corehi_irq, &corehi_irqaction);
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 49a2e2226fee..44b925005dd3 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -365,6 +365,12 @@ static inline void emit_half_load(unsigned int reg, unsigned int base,
emit_instr(ctx, lh, reg, offset, base);
}
+static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
+ unsigned int offset, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, lhu, reg, offset, base);
+}
+
static inline void emit_mul(unsigned int dst, unsigned int src1,
unsigned int src2, struct jit_ctx *ctx)
{
@@ -526,7 +532,8 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
u32 sflags, tmp_flags;
/* Adjust the stack pointer */
- emit_stack_offset(-align_sp(offset), ctx);
+ if (offset)
+ emit_stack_offset(-align_sp(offset), ctx);
tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
/* sflags is essentially a bitmap */
@@ -578,7 +585,8 @@ static void restore_bpf_jit_regs(struct jit_ctx *ctx,
emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
/* Restore the sp and discard the scrach memory */
- emit_stack_offset(align_sp(offset), ctx);
+ if (offset)
+ emit_stack_offset(align_sp(offset), ctx);
}
static unsigned int get_stack_depth(struct jit_ctx *ctx)
@@ -625,8 +633,14 @@ static void build_prologue(struct jit_ctx *ctx)
if (ctx->flags & SEEN_X)
emit_jit_reg_move(r_X, r_zero, ctx);
- /* Do not leak kernel data to userspace */
- if (bpf_needs_clear_a(&ctx->skf->insns[0]))
+ /*
+ * Do not leak kernel data to userspace, we only need to clear
+ * r_A if it is ever used. In fact if it is never used, we
+ * will not save/restore it, so clearing it in this case would
+ * corrupt the state of the caller.
+ */
+ if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
+ (ctx->flags & SEEN_A))
emit_jit_reg_move(r_A, r_zero, ctx);
}
@@ -1112,6 +1126,8 @@ jmp_cmp:
break;
case BPF_ANC | SKF_AD_IFINDEX:
/* A = skb->dev->ifindex */
+ case BPF_ANC | SKF_AD_HATYPE:
+ /* A = skb->dev->type */
ctx->flags |= SEEN_SKB | SEEN_A;
off = offsetof(struct sk_buff, dev);
/* Load *dev pointer */
@@ -1120,10 +1136,15 @@ jmp_cmp:
emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
b_imm(prog->len, ctx), ctx);
emit_reg_move(r_ret, r_zero, ctx);
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
- ifindex) != 4);
- off = offsetof(struct net_device, ifindex);
- emit_load(r_A, r_s0, off, ctx);
+ if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
+ off = offsetof(struct net_device, ifindex);
+ emit_load(r_A, r_s0, off, ctx);
+ } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
+ off = offsetof(struct net_device, type);
+ emit_half_load_unsigned(r_A, r_s0, off, ctx);
+ }
break;
case BPF_ANC | SKF_AD_MARK:
ctx->flags |= SEEN_SKB | SEEN_A;
@@ -1143,7 +1164,7 @@ jmp_cmp:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
vlan_tci) != 2);
off = offsetof(struct sk_buff, vlan_tci);
- emit_half_load(r_s0, r_skb, off, ctx);
+ emit_half_load_unsigned(r_s0, r_skb, off, ctx);
if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
} else {
@@ -1170,7 +1191,7 @@ jmp_cmp:
BUILD_BUG_ON(offsetof(struct sk_buff,
queue_mapping) > 0xff);
off = offsetof(struct sk_buff, queue_mapping);
- emit_half_load(r_A, r_skb, off, ctx);
+ emit_half_load_unsigned(r_A, r_skb, off, ctx);
break;
default:
pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
index 5d2e0c8d29c0..88a2075305d1 100644
--- a/arch/mips/net/bpf_jit_asm.S
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -90,18 +90,14 @@ FEXPORT(sk_load_half_positive)
is_offset_in_header(2, half)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
- .set reorder
- lh $r_A, 0(t1)
- .set noreorder
+ lhu $r_A, 0(t1)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
- wsbh t0, $r_A
- seh $r_A, t0
+ wsbh $r_A, $r_A
# else
- sll t0, $r_A, 24
- andi t1, $r_A, 0xff00
- sra t0, t0, 16
- srl t1, t1, 8
+ sll t0, $r_A, 8
+ srl t1, $r_A, 8
+ andi t0, t0, 0xff00
or $r_A, t0, t1
# endif
#endif
@@ -115,7 +111,7 @@ FEXPORT(sk_load_byte_positive)
is_offset_in_header(1, byte)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
- lb $r_A, 0(t1)
+ lbu $r_A, 0(t1)
jr $r_ra
move $r_ret, zero
END(sk_load_byte)
@@ -139,6 +135,11 @@ FEXPORT(sk_load_byte_positive)
* (void *to) is returned in r_s0
*
*/
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define DS_OFFSET(SIZE) (4 * SZREG)
+#else
+#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
+#endif
#define bpf_slow_path_common(SIZE) \
/* Quick check. Are we within reasonable boundaries? */ \
LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
@@ -150,7 +151,7 @@ FEXPORT(sk_load_byte_positive)
PTR_LA t0, skb_copy_bits; \
PTR_S $r_ra, (5 * SZREG)($r_sp); \
/* Assign low slot to a2 */ \
- move a2, $r_sp; \
+ PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
jalr t0; \
/* Reset our destination slot (DS but it's ok) */ \
INT_S zero, (4 * SZREG)($r_sp); \
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 9f672ceb089b..ad3584dbc9d7 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -679,7 +679,7 @@ static void __cvmx_increment_ba(union cvmx_sli_mem_access_subidx *pmas)
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
pmas->cn68xx.ba++;
else
- pmas->cn63xx.ba++;
+ pmas->s.ba++;
}
/**
@@ -1351,7 +1351,7 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
mem_access_subid.cn68xx.ba = 0;
else
- mem_access_subid.cn63xx.ba = 0;
+ mem_access_subid.s.ba = 0;
/*
* Setup mem access 12-15 for port 0, 16-19 for port 1,
diff --git a/arch/mips/sibyte/bcm1480/setup.c b/arch/mips/sibyte/bcm1480/setup.c
index a05246cbf54c..2035aaec8514 100644
--- a/arch/mips/sibyte/bcm1480/setup.c
+++ b/arch/mips/sibyte/bcm1480/setup.c
@@ -36,6 +36,7 @@ unsigned int soc_pass;
unsigned int soc_type;
EXPORT_SYMBOL(soc_type);
unsigned int periph_rev;
+EXPORT_SYMBOL_GPL(periph_rev);
unsigned int zbbus_mhz;
EXPORT_SYMBOL(zbbus_mhz);
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index 90e43782342b..aa7713adfa58 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -34,6 +34,7 @@ unsigned int soc_pass;
unsigned int soc_type;
EXPORT_SYMBOL(soc_type);
unsigned int periph_rev;
+EXPORT_SYMBOL_GPL(periph_rev);
unsigned int zbbus_mhz;
EXPORT_SYMBOL(zbbus_mhz);
diff --git a/arch/mn10300/include/uapi/asm/Kbuild b/arch/mn10300/include/uapi/asm/Kbuild
index 040178cdb3eb..b15bf6bc0e94 100644
--- a/arch/mn10300/include/uapi/asm/Kbuild
+++ b/arch/mn10300/include/uapi/asm/Kbuild
@@ -1,34 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 51a56c8b04b4..a72d5f0de692 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -6,6 +6,8 @@ config NIOS2
select GENERIC_CPU_DEVICES
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_KGDB
select IRQ_DOMAIN
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug
index 2fd08cbfdddb..55105220370c 100644
--- a/arch/nios2/Kconfig.debug
+++ b/arch/nios2/Kconfig.debug
@@ -18,7 +18,6 @@ config EARLY_PRINTK
bool "Activate early kernel debugging"
default y
select SERIAL_CORE_CONSOLE
- depends on SERIAL_ALTERA_JTAGUART_CONSOLE || SERIAL_ALTERA_UART_CONSOLE
help
Enable early printk on console
This is useful for kernel debugging when your machine crashes very
diff --git a/arch/nios2/Makefile b/arch/nios2/Makefile
index e74afc12d516..8673a79dca9c 100644
--- a/arch/nios2/Makefile
+++ b/arch/nios2/Makefile
@@ -22,10 +22,15 @@ export MMU
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
+KBUILD_AFLAGS += -march=r$(CONFIG_NIOS2_ARCH_REVISION)
+
KBUILD_CFLAGS += -pipe -D__linux__ -D__ELF__
+KBUILD_CFLAGS += -march=r$(CONFIG_NIOS2_ARCH_REVISION)
KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_MUL_SUPPORT),-mhw-mul,-mno-hw-mul)
KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_MULX_SUPPORT),-mhw-mulx,-mno-hw-mulx)
KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_DIV_SUPPORT),-mhw-div,-mno-hw-div)
+KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_BMX_SUPPORT),-mbmx,-mno-bmx)
+KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_CDX_SUPPORT),-mcdx,-mno-cdx)
KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_FPU_SUPPORT),-mcustom-fpu-cfg=60-1,)
KBUILD_CFLAGS += -fno-optimize-sibling-calls
diff --git a/arch/nios2/boot/.gitignore b/arch/nios2/boot/.gitignore
new file mode 100644
index 000000000000..109279ca5a4d
--- /dev/null
+++ b/arch/nios2/boot/.gitignore
@@ -0,0 +1,2 @@
+*.dtb
+vmImage
diff --git a/arch/nios2/boot/dts/10m50_devboard.dts b/arch/nios2/boot/dts/10m50_devboard.dts
index f362b2224ee7..4bb4dc1b52e9 100644
--- a/arch/nios2/boot/dts/10m50_devboard.dts
+++ b/arch/nios2/boot/dts/10m50_devboard.dts
@@ -244,6 +244,7 @@
};
chosen {
- bootargs = "debug console=ttyS0,115200";
+ bootargs = "debug earlycon console=ttyS0,115200";
+ stdout-path = &a_16550_uart_0;
};
};
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 87e70f2b463f..727dbb333f60 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += bitsperlong.h
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
+generic-y += cmpxchg.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
diff --git a/arch/nios2/include/asm/cacheflush.h b/arch/nios2/include/asm/cacheflush.h
index 52abba973dc2..55e383c173f7 100644
--- a/arch/nios2/include/asm/cacheflush.h
+++ b/arch/nios2/include/asm/cacheflush.h
@@ -46,7 +46,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
extern void flush_dcache_range(unsigned long start, unsigned long end);
extern void invalidate_dcache_range(unsigned long start, unsigned long end);
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) \
+ spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) \
+ spin_unlock_irq(&(mapping)->tree_lock)
#endif /* _ASM_NIOS2_CACHEFLUSH_H */
diff --git a/arch/nios2/include/asm/cmpxchg.h b/arch/nios2/include/asm/cmpxchg.h
deleted file mode 100644
index a7978f14d157..000000000000
--- a/arch/nios2/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) 2004 Microtronix Datacom Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_NIOS2_CMPXCHG_H
-#define _ASM_NIOS2_CMPXCHG_H
-
-#include <asm-generic/cmpxchg.h>
-
-#endif /* _ASM_NIOS2_CMPXCHG_H */
diff --git a/arch/nios2/include/asm/cpuinfo.h b/arch/nios2/include/asm/cpuinfo.h
index 348bb228fec9..dbdaf96f28d4 100644
--- a/arch/nios2/include/asm/cpuinfo.h
+++ b/arch/nios2/include/asm/cpuinfo.h
@@ -29,6 +29,8 @@ struct cpuinfo {
bool has_div;
bool has_mul;
bool has_mulx;
+ bool has_bmx;
+ bool has_cdx;
/* CPU caches */
u32 icache_line_size;
diff --git a/arch/nios2/include/asm/prom.h b/arch/nios2/include/asm/prom.h
deleted file mode 100644
index 75fffb42cfa5..000000000000
--- a/arch/nios2/include/asm/prom.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright Altera Corporation (C) <2015>. All rights reserved
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ASM_NIOS2_PROM_H__
-#define __ASM_NIOS2_PROM_H__
-
-extern unsigned long __init of_early_console(void);
-
-#endif
diff --git a/arch/nios2/include/asm/setup.h b/arch/nios2/include/asm/setup.h
index dcbf8cf1a344..ac9bff248e6d 100644
--- a/arch/nios2/include/asm/setup.h
+++ b/arch/nios2/include/asm/setup.h
@@ -30,8 +30,6 @@ extern char fast_handler_end[];
extern void pagetable_init(void);
-extern void setup_early_printk(void);
-
#endif/* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
index 727bd9504899..dfa3c7cb30b4 100644
--- a/arch/nios2/include/asm/uaccess.h
+++ b/arch/nios2/include/asm/uaccess.h
@@ -42,6 +42,8 @@
# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
+#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
+
/*
* Zero Userspace
*/
@@ -81,8 +83,9 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
#define INLINE_COPY_TO_USER
extern long strncpy_from_user(char *__to, const char __user *__from,
- long __len);
-extern long strnlen_user(const char __user *s, long n);
+ long __len);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *s, long n);
/* Optimized macros */
#define __get_user_asm(val, insn, addr, err) \
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index e0bb972a50d7..374bd123329f 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -1,5 +1,5 @@
+# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += elf.h
-
+generic-y += setup.h
generic-y += ucontext.h
diff --git a/arch/nios2/kernel/.gitignore b/arch/nios2/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/nios2/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/nios2/kernel/Makefile b/arch/nios2/kernel/Makefile
index 1aae25703657..06d07432b38d 100644
--- a/arch/nios2/kernel/Makefile
+++ b/arch/nios2/kernel/Makefile
@@ -20,7 +20,6 @@ obj-y += syscall_table.o
obj-y += time.o
obj-y += traps.o
-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_NIOS2_ALIGNMENT_TRAP) += misaligned.o
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
index 1cccc36877bc..93207718bb22 100644
--- a/arch/nios2/kernel/cpuinfo.c
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -67,6 +67,8 @@ void __init setup_cpuinfo(void)
cpuinfo.has_div = of_property_read_bool(cpu, "altr,has-div");
cpuinfo.has_mul = of_property_read_bool(cpu, "altr,has-mul");
cpuinfo.has_mulx = of_property_read_bool(cpu, "altr,has-mulx");
+ cpuinfo.has_bmx = of_property_read_bool(cpu, "altr,has-bmx");
+ cpuinfo.has_cdx = of_property_read_bool(cpu, "altr,has-cdx");
cpuinfo.mmu = of_property_read_bool(cpu, "altr,has-mmu");
if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
@@ -78,6 +80,12 @@ void __init setup_cpuinfo(void)
if (IS_ENABLED(CONFIG_NIOS2_HW_MULX_SUPPORT) && !cpuinfo.has_mulx)
err_cpu("MULX");
+ if (IS_ENABLED(CONFIG_NIOS2_BMX_SUPPORT) && !cpuinfo.has_bmx)
+ err_cpu("BMX");
+
+ if (IS_ENABLED(CONFIG_NIOS2_CDX_SUPPORT) && !cpuinfo.has_cdx)
+ err_cpu("CDX");
+
cpuinfo.tlb_num_ways = fcpu(cpu, "altr,tlb-num-ways");
if (!cpuinfo.tlb_num_ways)
panic("altr,tlb-num-ways can't be 0. Please check your hardware "
@@ -125,12 +133,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m,
"CPU:\t\tNios II/%s\n"
+ "REV:\t\t%i\n"
"MMU:\t\t%s\n"
"FPU:\t\tnone\n"
"Clocking:\t%u.%02u MHz\n"
"BogoMips:\t%lu.%02lu\n"
"Calibration:\t%lu loops\n",
cpuinfo.cpu_impl,
+ CONFIG_NIOS2_ARCH_REVISION,
cpuinfo.mmu ? "present" : "none",
clockfreq / 1000000, (clockfreq / 100000) % 10,
(loops_per_jiffy * HZ) / 500000,
@@ -141,10 +151,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"HW:\n"
" MUL:\t\t%s\n"
" MULX:\t\t%s\n"
- " DIV:\t\t%s\n",
+ " DIV:\t\t%s\n"
+ " BMX:\t\t%s\n"
+ " CDX:\t\t%s\n",
cpuinfo.has_mul ? "yes" : "no",
cpuinfo.has_mulx ? "yes" : "no",
- cpuinfo.has_div ? "yes" : "no");
+ cpuinfo.has_div ? "yes" : "no",
+ cpuinfo.has_bmx ? "yes" : "no",
+ cpuinfo.has_cdx ? "yes" : "no");
seq_printf(m,
"Icache:\t\t%ukB, line length: %u\n",
diff --git a/arch/nios2/kernel/early_printk.c b/arch/nios2/kernel/early_printk.c
deleted file mode 100644
index c08e4c1486fc..000000000000
--- a/arch/nios2/kernel/early_printk.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Early printk for Nios2.
- *
- * Copyright (C) 2015, Altera Corporation
- * Copyright (C) 2010, Tobias Klauser <tklauser@distanz.ch>
- * Copyright (C) 2009, Wind River Systems Inc
- * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/console.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/io.h>
-
-#include <asm/prom.h>
-
-static unsigned long base_addr;
-
-#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE)
-
-#define ALTERA_JTAGUART_DATA_REG 0
-#define ALTERA_JTAGUART_CONTROL_REG 4
-#define ALTERA_JTAGUART_CONTROL_WSPACE_MSK 0xFFFF0000
-#define ALTERA_JTAGUART_CONTROL_AC_MSK 0x00000400
-
-#define JUART_GET_CR() \
- __builtin_ldwio((void *)(base_addr + ALTERA_JTAGUART_CONTROL_REG))
-#define JUART_SET_CR(v) \
- __builtin_stwio((void *)(base_addr + ALTERA_JTAGUART_CONTROL_REG), v)
-#define JUART_SET_TX(v) \
- __builtin_stwio((void *)(base_addr + ALTERA_JTAGUART_DATA_REG), v)
-
-static void early_console_write(struct console *con, const char *s, unsigned n)
-{
- unsigned long status;
-
- while (n-- && *s) {
- while (((status = JUART_GET_CR())
- & ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
-#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS)
- if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0)
- return; /* no connection activity */
-#endif
- }
- JUART_SET_TX(*s);
- s++;
- }
-}
-
-#elif defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE)
-
-#define ALTERA_UART_TXDATA_REG 4
-#define ALTERA_UART_STATUS_REG 8
-#define ALTERA_UART_STATUS_TRDY 0x0040
-
-#define UART_GET_SR() \
- __builtin_ldwio((void *)(base_addr + ALTERA_UART_STATUS_REG))
-#define UART_SET_TX(v) \
- __builtin_stwio((void *)(base_addr + ALTERA_UART_TXDATA_REG), v)
-
-static void early_console_putc(char c)
-{
- while (!(UART_GET_SR() & ALTERA_UART_STATUS_TRDY))
- ;
-
- UART_SET_TX(c);
-}
-
-static void early_console_write(struct console *con, const char *s, unsigned n)
-{
- while (n-- && *s) {
- early_console_putc(*s);
- if (*s == '\n')
- early_console_putc('\r');
- s++;
- }
-}
-
-#else
-# error Neither SERIAL_ALTERA_JTAGUART_CONSOLE nor SERIAL_ALTERA_UART_CONSOLE \
-selected
-#endif
-
-static struct console early_console_prom = {
- .name = "early",
- .write = early_console_write,
- .flags = CON_PRINTBUFFER | CON_BOOT,
- .index = -1
-};
-
-void __init setup_early_printk(void)
-{
-#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE) || \
- defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE)
- base_addr = of_early_console();
-#else
- base_addr = 0;
-#endif
-
- if (!base_addr)
- return;
-
-#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS)
- /* Clear activity bit so BYPASS doesn't stall if we've used JTAG for
- * downloading the kernel. This might cause early data to be lost even
- * if the JTAG terminal is running.
- */
- JUART_SET_CR(JUART_GET_CR() | ALTERA_JTAGUART_CONTROL_AC_MSK);
-#endif
-
- early_console = &early_console_prom;
- register_console(early_console);
- pr_info("early_console initialized at 0x%08lx\n", base_addr);
-}
diff --git a/arch/nios2/kernel/irq.c b/arch/nios2/kernel/irq.c
index f5b74ae69b5b..6c833a9d4eab 100644
--- a/arch/nios2/kernel/irq.c
+++ b/arch/nios2/kernel/irq.c
@@ -67,7 +67,7 @@ static int irq_map(struct irq_domain *h, unsigned int virq,
return 0;
}
-static struct irq_domain_ops irq_ops = {
+static const struct irq_domain_ops irq_ops = {
.map = irq_map,
.xlate = irq_domain_xlate_onecell,
};
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 3901b80d4420..6688576b3a47 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -30,7 +30,6 @@
#include <linux/of_fdt.h>
#include <linux/io.h>
-#include <asm/prom.h>
#include <asm/sections.h>
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
@@ -71,51 +70,3 @@ void __init early_init_devtree(void *params)
early_init_dt_scan(params);
}
-
-#ifdef CONFIG_EARLY_PRINTK
-static int __init early_init_dt_scan_serial(unsigned long node,
- const char *uname, int depth, void *data)
-{
- u64 *addr64 = (u64 *) data;
- const char *p;
-
- /* only consider serial nodes */
- if (strncmp(uname, "serial", 6) != 0)
- return 0;
-
- p = of_get_flat_dt_prop(node, "compatible", NULL);
- if (!p)
- return 0;
-
- /*
- * We found an altera_jtaguart but it wasn't configured for console, so
- * skip it.
- */
-#ifndef CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE
- if (strncmp(p, "altr,juart", 10) == 0)
- return 0;
-#endif
-
- /*
- * Same for altera_uart.
- */
-#ifndef CONFIG_SERIAL_ALTERA_UART_CONSOLE
- if (strncmp(p, "altr,uart", 9) == 0)
- return 0;
-#endif
-
- *addr64 = of_flat_dt_translate_address(node);
-
- return *addr64 == OF_BAD_ADDR ? 0 : 1;
-}
-
-unsigned long __init of_early_console(void)
-{
- u64 base = 0;
-
- if (of_scan_flat_dt(early_init_dt_scan_serial, &base))
- return (u32)ioremap(base, 32);
- else
- return 0;
-}
-#endif /* CONFIG_EARLY_PRINTK */
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index 6044d9be28b4..926a02b17b31 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -137,6 +137,8 @@ asmlinkage void __init nios2_boot_init(unsigned r4, unsigned r5, unsigned r6,
strncpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif
#endif
+
+ parse_early_param();
}
void __init setup_arch(char **cmdline_p)
@@ -145,10 +147,6 @@ void __init setup_arch(char **cmdline_p)
console_verbose();
-#ifdef CONFIG_EARLY_PRINTK
- setup_early_printk();
-#endif
-
memory_start = PAGE_ALIGN((unsigned long)__pa(_end));
memory_end = (unsigned long) CONFIG_NIOS2_MEM_BASE + memory_size;
diff --git a/arch/nios2/mm/uaccess.c b/arch/nios2/mm/uaccess.c
index 804983317766..34f10af8ea40 100644
--- a/arch/nios2/mm/uaccess.c
+++ b/arch/nios2/mm/uaccess.c
@@ -128,36 +128,3 @@ asm(
".word 12b,13b\n"
".previous\n");
EXPORT_SYMBOL(raw_copy_to_user);
-
-long strncpy_from_user(char *__to, const char __user *__from, long __len)
-{
- int l = strnlen_user(__from, __len);
- int is_zt = 1;
-
- if (l > __len) {
- is_zt = 0;
- l = __len;
- }
-
- if (l == 0 || copy_from_user(__to, __from, l))
- return -EFAULT;
-
- if (is_zt)
- l--;
- return l;
-}
-
-long strnlen_user(const char __user *s, long n)
-{
- long i;
-
- for (i = 0; i < n; i++) {
- char c;
-
- if (get_user(c, s + i) == -EFAULT)
- return 0;
- if (c == 0)
- return i + 1;
- }
- return n + 1;
-}
diff --git a/arch/nios2/platform/Kconfig.platform b/arch/nios2/platform/Kconfig.platform
index d3e5df9fb36b..74c1aaf588b8 100644
--- a/arch/nios2/platform/Kconfig.platform
+++ b/arch/nios2/platform/Kconfig.platform
@@ -52,6 +52,14 @@ config NIOS2_DTB_SOURCE
comment "Nios II instructions"
+config NIOS2_ARCH_REVISION
+ int "Select Nios II architecture revision"
+ range 1 2
+ default 1
+ help
+ Select between Nios II R1 and Nios II R2 . The architectures
+ are binary incompatible. Default is R1 .
+
config NIOS2_HW_MUL_SUPPORT
bool "Enable MUL instruction"
default n
@@ -73,6 +81,24 @@ config NIOS2_HW_DIV_SUPPORT
Set to true if you configured the Nios II to include the DIV
instruction. Enables the -mhw-div compiler flag.
+config NIOS2_BMX_SUPPORT
+ bool "Enable BMX instructions"
+ depends on NIOS2_ARCH_REVISION = 2
+ default n
+ help
+ Set to true if you configured the Nios II R2 to include
+ the BMX Bit Manipulation Extension instructions. Enables
+ the -mbmx compiler flag.
+
+config NIOS2_CDX_SUPPORT
+ bool "Enable CDX instructions"
+ depends on NIOS2_ARCH_REVISION = 2
+ default n
+ help
+ Set to true if you configured the Nios II R2 to include
+ the CDX Bit Manipulation Extension instructions. Enables
+ the -mcdx compiler flag.
+
config NIOS2_FPU_SUPPORT
bool "Custom floating point instr support"
default n
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index df8e2f7bc7dd..fdbcf0bf44a4 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -1,6 +1,3 @@
-
-header-y += ucontext.h
-
generic-y += auxvec.h
generic-y += barrier.h
generic-y += bitsperlong.h
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild
index 80761eb82b5f..b15bf6bc0e94 100644
--- a/arch/openrisc/include/uapi/asm/Kbuild
+++ b/arch/openrisc/include/uapi/asm/Kbuild
@@ -1,10 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += byteorder.h
-header-y += elf.h
-header-y += kvm_para.h
-header-y += param.h
-header-y += ptrace.h
-header-y += sigcontext.h
-header-y += unistd.h
diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild
index 348356c99514..3971c60a7e7f 100644
--- a/arch/parisc/include/uapi/asm/Kbuild
+++ b/arch/parisc/include/uapi/asm/Kbuild
@@ -2,31 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += resource.h
-
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += pdc.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d8834e8bfb05..f7c8f9972f61 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -146,6 +146,7 @@ config PPC
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
@@ -379,6 +380,22 @@ source "arch/powerpc/platforms/Kconfig"
menu "Kernel options"
+config PPC_DT_CPU_FTRS
+ bool "Device-tree based CPU feature discovery & setup"
+ depends on PPC_BOOK3S_64
+ default n
+ help
+ This enables code to use a new device tree binding for describing CPU
+ compatibility and features. Saying Y here will attempt to use the new
+ binding if the firmware provides it. Currently only the skiboot
+ firmware provides this binding.
+ If you're not sure say Y.
+
+config PPC_CPUFEATURES_ENABLE_UNKNOWN
+ bool "cpufeatures pass through unknown features to guest/userspace"
+ depends on PPC_DT_CPU_FTRS
+ default y
+
config HIGHMEM
bool "High memory support"
depends on PPC32
diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink
index 3c22d64b2de9..eccfcc88afae 100644
--- a/arch/powerpc/Makefile.postlink
+++ b/arch/powerpc/Makefile.postlink
@@ -7,7 +7,7 @@
PHONY := __archpost
__archpost:
-include include/config/auto.conf
+-include include/config/auto.conf
include scripts/Kbuild.include
quiet_cmd_relocs_check = CHKREL $@
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 214219dff87c..9732837aaae8 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -2,9 +2,9 @@
#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
#define H_PTE_INDEX_SIZE 8
-#define H_PMD_INDEX_SIZE 5
-#define H_PUD_INDEX_SIZE 5
-#define H_PGD_INDEX_SIZE 15
+#define H_PMD_INDEX_SIZE 10
+#define H_PUD_INDEX_SIZE 7
+#define H_PGD_INDEX_SIZE 8
/*
* 64k aligned address free up few of the lower bits of RPN for us
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index 8ee4211ca0c6..14ad37865000 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -560,6 +560,8 @@ typedef struct risc_timer_pram {
#define CPM_PIN_SECONDARY 2
#define CPM_PIN_GPIO 4
#define CPM_PIN_OPENDRAIN 8
+#define CPM_PIN_FALLEDGE 16
+#define CPM_PIN_ANYEDGE 0
enum cpm_port {
CPM_PORTA,
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index 6e834caa3720..0d1df02bf99d 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_POWERPC_CPUFEATURES_H
-#define __ASM_POWERPC_CPUFEATURES_H
+#ifndef __ASM_POWERPC_CPU_HAS_FEATURE_H
+#define __ASM_POWERPC_CPU_HAS_FEATURE_H
#ifndef __ASSEMBLY__
@@ -52,4 +52,4 @@ static inline bool cpu_has_feature(unsigned long feature)
#endif
#endif /* __ASSEMBLY__ */
-#endif /* __ASM_POWERPC_CPUFEATURE_H */
+#endif /* __ASM_POWERPC_CPU_HAS_FEATURE_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 1f6847b107e4..c2d509584a98 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -118,7 +118,9 @@ extern struct cpu_spec *cur_cpu_spec;
extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
+extern void set_cur_cpu_spec(struct cpu_spec *s);
extern struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr);
+extern void identify_cpu_name(unsigned int pvr);
extern void do_feature_fixups(unsigned long value, void *fixup_start,
void *fixup_end);
diff --git a/arch/powerpc/include/asm/dt_cpu_ftrs.h b/arch/powerpc/include/asm/dt_cpu_ftrs.h
new file mode 100644
index 000000000000..7a34fc11bf63
--- /dev/null
+++ b/arch/powerpc/include/asm/dt_cpu_ftrs.h
@@ -0,0 +1,26 @@
+#ifndef __ASM_POWERPC_DT_CPU_FTRS_H
+#define __ASM_POWERPC_DT_CPU_FTRS_H
+
+/*
+ * Copyright 2017, IBM Corporation
+ * cpufeatures is the new way to discover CPU features with /cpus/features
+ * devicetree. This supersedes PVR based discovery ("cputable"), and older
+ * device tree feature advertisement.
+ */
+
+#include <linux/types.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+#include <uapi/asm/cputable.h>
+
+#ifdef CONFIG_PPC_DT_CPU_FTRS
+bool dt_cpu_ftrs_init(void *fdt);
+void dt_cpu_ftrs_scan(void);
+bool dt_cpu_ftrs_in_use(void);
+#else
+static inline bool dt_cpu_ftrs_init(void *fdt) { return false; }
+static inline void dt_cpu_ftrs_scan(void) { }
+static inline bool dt_cpu_ftrs_in_use(void) { return false; }
+#endif
+
+#endif /* __ASM_POWERPC_DT_CPU_FTRS_H */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 0593d9479f74..b148496ffe36 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -111,6 +111,8 @@ struct kvmppc_host_state {
struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore;
void __iomem *xics_phys;
+ void __iomem *xive_tima_phys;
+ void __iomem *xive_tima_virt;
u32 saved_xirr;
u64 dabr;
u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 77c60826d145..9c51ac4b8f36 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -210,6 +210,12 @@ struct kvmppc_spapr_tce_table {
/* XICS components, defined in book3s_xics.c */
struct kvmppc_xics;
struct kvmppc_icp;
+extern struct kvm_device_ops kvm_xics_ops;
+
+/* XIVE components, defined in book3s_xive.c */
+struct kvmppc_xive;
+struct kvmppc_xive_vcpu;
+extern struct kvm_device_ops kvm_xive_ops;
struct kvmppc_passthru_irqmap;
@@ -298,6 +304,7 @@ struct kvm_arch {
#endif
#ifdef CONFIG_KVM_XICS
struct kvmppc_xics *xics;
+ struct kvmppc_xive *xive;
struct kvmppc_passthru_irqmap *pimap;
#endif
struct kvmppc_ops *kvm_ops;
@@ -427,7 +434,7 @@ struct kvmppc_passthru_irqmap {
#define KVMPPC_IRQ_DEFAULT 0
#define KVMPPC_IRQ_MPIC 1
-#define KVMPPC_IRQ_XICS 2
+#define KVMPPC_IRQ_XICS 2 /* Includes a XIVE option */
#define MMIO_HPTE_CACHE_SIZE 4
@@ -454,6 +461,21 @@ struct mmio_hpte_cache {
struct openpic;
+/* W0 and W1 of a XIVE thread management context */
+union xive_tma_w01 {
+ struct {
+ u8 nsr;
+ u8 cppr;
+ u8 ipb;
+ u8 lsmfb;
+ u8 ack;
+ u8 inc;
+ u8 age;
+ u8 pipr;
+ };
+ __be64 w01;
+};
+
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
@@ -714,6 +736,10 @@ struct kvm_vcpu_arch {
struct openpic *mpic; /* KVM_IRQ_MPIC */
#ifdef CONFIG_KVM_XICS
struct kvmppc_icp *icp; /* XICS presentation controller */
+ struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */
+ __be32 xive_cam_word; /* Cooked W2 in proper endian with valid bit */
+ u32 xive_pushed; /* Is the VP pushed on the physical CPU ? */
+ union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 76e940a3c145..e0d88c38602b 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -240,6 +240,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
+
extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
u32 priority);
extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
@@ -428,6 +429,14 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
paca[cpu].kvm_hstate.xics_phys = (void __iomem *)addr;
}
+static inline void kvmppc_set_xive_tima(int cpu,
+ unsigned long phys_addr,
+ void __iomem *virt_addr)
+{
+ paca[cpu].kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
+ paca[cpu].kvm_hstate.xive_tima_virt = virt_addr;
+}
+
static inline u32 kvmppc_get_xics_latch(void)
{
u32 xirr;
@@ -458,6 +467,11 @@ static inline void __init kvm_cma_reserve(void)
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{}
+static inline void kvmppc_set_xive_tima(int cpu,
+ unsigned long phys_addr,
+ void __iomem *virt_addr)
+{}
+
static inline u32 kvmppc_get_xics_latch(void)
{
return 0;
@@ -508,6 +522,10 @@ extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
struct kvmppc_irq_map *irq_map,
struct kvmppc_passthru_irqmap *pimap,
bool *again);
+
+extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level, bool line_status);
+
extern int h_ipi_redirect;
#else
static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
@@ -525,6 +543,60 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{ return 0; }
#endif
+#ifdef CONFIG_KVM_XIVE
+/*
+ * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
+ * ie. P9 new interrupt controller, while the second "xive" is the legacy
+ * "eXternal Interrupt Vector Entry" which is the configuration of an
+ * interrupt on the "xics" interrupt controller on P8 and earlier. Those
+ * two function consume or produce a legacy "XIVE" state from the
+ * new "XIVE" interrupt controller.
+ */
+extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority);
+extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority);
+extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
+extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
+extern void kvmppc_xive_init_module(void);
+extern void kvmppc_xive_exit_module(void);
+
+extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu);
+extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
+extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc);
+extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc);
+extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
+
+extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level, bool line_status);
+#else
+static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority) { return -1; }
+static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority) { return -1; }
+static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
+static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
+static inline void kvmppc_xive_init_module(void) { }
+static inline void kvmppc_xive_exit_module(void) { }
+
+static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
+static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
+static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc) { return -ENODEV; }
+static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc) { return -ENODEV; }
+static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
+static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
+
+static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level, bool line_status) { return -ENODEV; }
+#endif /* CONFIG_KVM_XIVE */
+
/*
* Prototypes for functions called only from assembler code.
* Having prototypes reduces sparse errors.
@@ -562,6 +634,8 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
unsigned long slb_v, unsigned int status, bool data);
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
+unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
+unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr);
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index a4b1d8d6b793..a2123f291ab0 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -151,8 +151,13 @@ void release_thread(struct task_struct *);
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3S_64
/* Limit stack to 128TB */
#define STACK_TOP_USER64 TASK_SIZE_128TB
+#else
+#define STACK_TOP_USER64 TASK_SIZE_USER64
+#endif
+
#define STACK_TOP_USER32 TASK_SIZE_USER32
#define STACK_TOP (is_32bit_task() ? \
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index d4f653c9259a..7e50e47375d6 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1229,6 +1229,7 @@
#define PVR_POWER8E 0x004B
#define PVR_POWER8NVL 0x004C
#define PVR_POWER8 0x004D
+#define PVR_POWER9 0x004E
#define PVR_BE 0x0070
#define PVR_PA6T 0x0090
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index 3cdbeaeac397..c8a822acf962 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -99,7 +99,6 @@ struct xive_q {
#define XIVE_ESB_SET_PQ_01 0xd00
#define XIVE_ESB_SET_PQ_10 0xe00
#define XIVE_ESB_SET_PQ_11 0xf00
-#define XIVE_ESB_MASK XIVE_ESB_SET_PQ_01
#define XIVE_ESB_VAL_P 0x2
#define XIVE_ESB_VAL_Q 0x1
@@ -136,11 +135,11 @@ extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
__be32 *qpage, u32 order, bool can_escalate);
extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
-extern bool __xive_irq_trigger(struct xive_irq_data *xd);
-extern bool __xive_irq_retrigger(struct xive_irq_data *xd);
-extern void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd);
-
+extern void xive_native_sync_source(u32 hw_irq);
extern bool is_xive_irq(struct irq_chip *chip);
+extern int xive_native_enable_vp(u32 vp_id);
+extern int xive_native_disable_vp(u32 vp_id);
+extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
#else
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index dab3717e3ea0..b15bf6bc0e94 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -1,47 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += bootx.h
-header-y += byteorder.h
-header-y += cputable.h
-header-y += eeh.h
-header-y += elf.h
-header-y += epapr_hcalls.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += nvram.h
-header-y += opal-prd.h
-header-y += param.h
-header-y += perf_event.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ps3fb.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += spu_info.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += tm.h
-header-y += types.h
-header-y += ucontext.h
-header-y += unistd.h
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index f63c96cd3608..3e7ce86d5c13 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -47,4 +47,11 @@
#define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */
#define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */
+/*
+ * IMPORTANT!
+ * All future PPC_FEATURE definitions should be allocated in cooperation with
+ * OPAL / skiboot firmware, in accordance with the ibm,powerpc-cpu-features
+ * device tree binding.
+ */
+
#endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b9db46ae545b..e132902e1f14 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y)
obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
+obj-$(CONFIG_PPC_DT_CPU_FTRS) += dt_cpu_ftrs.o
obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
eeh_driver.o eeh_event.o eeh_sysfs.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 439c257dec4a..709e23425317 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -634,6 +634,8 @@ int main(void)
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
+ HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys);
+ HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt);
HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
@@ -719,6 +721,14 @@ int main(void)
OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6);
#endif
+#ifdef CONFIG_KVM_XICS
+ DEFINE(VCPU_XIVE_SAVED_STATE, offsetof(struct kvm_vcpu,
+ arch.xive_saved_state));
+ DEFINE(VCPU_XIVE_CAM_WORD, offsetof(struct kvm_vcpu,
+ arch.xive_cam_word));
+ DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed));
+#endif
+
#ifdef CONFIG_KVM_EXIT_TIMING
OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu);
OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl);
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index e79b9daa873c..9b3e88b1a9c8 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -23,7 +23,9 @@
#include <asm/mmu.h>
#include <asm/setup.h>
-struct cpu_spec* cur_cpu_spec = NULL;
+static struct cpu_spec the_cpu_spec __read_mostly;
+
+struct cpu_spec* cur_cpu_spec __read_mostly = NULL;
EXPORT_SYMBOL(cur_cpu_spec);
/* The platform string corresponding to the real PVR */
@@ -2179,7 +2181,15 @@ static struct cpu_spec __initdata cpu_specs[] = {
#endif /* CONFIG_E500 */
};
-static struct cpu_spec the_cpu_spec;
+void __init set_cur_cpu_spec(struct cpu_spec *s)
+{
+ struct cpu_spec *t = &the_cpu_spec;
+
+ t = PTRRELOC(t);
+ *t = *s;
+
+ *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
+}
static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
struct cpu_spec *s)
@@ -2266,6 +2276,29 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
return NULL;
}
+/*
+ * Used by cpufeatures to get the name for CPUs with a PVR table.
+ * If they don't hae a PVR table, cpufeatures gets the name from
+ * cpu device-tree node.
+ */
+void __init identify_cpu_name(unsigned int pvr)
+{
+ struct cpu_spec *s = cpu_specs;
+ struct cpu_spec *t = &the_cpu_spec;
+ int i;
+
+ s = PTRRELOC(s);
+ t = PTRRELOC(t);
+
+ for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) {
+ if ((pvr & s->pvr_mask) == s->pvr_value) {
+ t->cpu_name = s->cpu_name;
+ return;
+ }
+ }
+}
+
+
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS] = {
[0 ... NUM_CPU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
new file mode 100644
index 000000000000..fcc7588a96d6
--- /dev/null
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -0,0 +1,1031 @@
+/*
+ * Copyright 2017, Nicholas Piggin, IBM Corporation
+ * Licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/jump_label.h>
+#include <linux/memblock.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/threads.h>
+
+#include <asm/cputable.h>
+#include <asm/dt_cpu_ftrs.h>
+#include <asm/mmu.h>
+#include <asm/oprofile_impl.h>
+#include <asm/prom.h>
+#include <asm/setup.h>
+
+
+/* Device-tree visible constants follow */
+#define ISA_V2_07B 2070
+#define ISA_V3_0B 3000
+
+#define USABLE_PR (1U << 0)
+#define USABLE_OS (1U << 1)
+#define USABLE_HV (1U << 2)
+
+#define HV_SUPPORT_HFSCR (1U << 0)
+#define OS_SUPPORT_FSCR (1U << 0)
+
+/* For parsing, we define all bits set as "NONE" case */
+#define HV_SUPPORT_NONE 0xffffffffU
+#define OS_SUPPORT_NONE 0xffffffffU
+
+struct dt_cpu_feature {
+ const char *name;
+ uint32_t isa;
+ uint32_t usable_privilege;
+ uint32_t hv_support;
+ uint32_t os_support;
+ uint32_t hfscr_bit_nr;
+ uint32_t fscr_bit_nr;
+ uint32_t hwcap_bit_nr;
+ /* fdt parsing */
+ unsigned long node;
+ int enabled;
+ int disabled;
+};
+
+#define CPU_FTRS_BASE \
+ (CPU_FTR_USE_TB | \
+ CPU_FTR_LWSYNC | \
+ CPU_FTR_FPU_UNAVAILABLE |\
+ CPU_FTR_NODSISRALIGN |\
+ CPU_FTR_NOEXECUTE |\
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_STCX_CHECKS_ADDRESS |\
+ CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_DAWR | \
+ CPU_FTR_ARCH_206 |\
+ CPU_FTR_ARCH_207S)
+
+#define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
+
+#define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
+ PPC_FEATURE_ARCH_2_06 |\
+ PPC_FEATURE_ICACHE_SNOOP)
+#define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \
+ PPC_FEATURE2_ISEL)
+/*
+ * Set up the base CPU
+ */
+
+extern void __flush_tlb_power8(unsigned int action);
+extern void __flush_tlb_power9(unsigned int action);
+extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
+extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
+
+static int hv_mode;
+
+static struct {
+ u64 lpcr;
+ u64 hfscr;
+ u64 fscr;
+} system_registers;
+
+static void (*init_pmu_registers)(void);
+
+static void cpufeatures_flush_tlb(void)
+{
+ unsigned long rb;
+ unsigned int i, num_sets;
+
+ /*
+ * This is a temporary measure to keep equivalent TLB flush as the
+ * cputable based setup code.
+ */
+ switch (PVR_VER(mfspr(SPRN_PVR))) {
+ case PVR_POWER8:
+ case PVR_POWER8E:
+ case PVR_POWER8NVL:
+ num_sets = POWER8_TLB_SETS;
+ break;
+ case PVR_POWER9:
+ num_sets = POWER9_TLB_SETS_HASH;
+ break;
+ default:
+ num_sets = 1;
+ pr_err("unknown CPU version for boot TLB flush\n");
+ break;
+ }
+
+ asm volatile("ptesync" : : : "memory");
+ rb = TLBIEL_INVAL_SET;
+ for (i = 0; i < num_sets; i++) {
+ asm volatile("tlbiel %0" : : "r" (rb));
+ rb += 1 << TLBIEL_INVAL_SET_SHIFT;
+ }
+ asm volatile("ptesync" : : : "memory");
+}
+
+static void __restore_cpu_cpufeatures(void)
+{
+ /*
+ * LPCR is restored by the power on engine already. It can be changed
+ * after early init e.g., by radix enable, and we have no unified API
+ * for saving and restoring such SPRs.
+ *
+ * This ->restore hook should really be removed from idle and register
+ * restore moved directly into the idle restore code, because this code
+ * doesn't know how idle is implemented or what it needs restored here.
+ *
+ * The best we can do to accommodate secondary boot and idle restore
+ * for now is "or" LPCR with existing.
+ */
+
+ mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR));
+ if (hv_mode) {
+ mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_HFSCR, system_registers.hfscr);
+ }
+ mtspr(SPRN_FSCR, system_registers.fscr);
+
+ if (init_pmu_registers)
+ init_pmu_registers();
+
+ cpufeatures_flush_tlb();
+}
+
+static char dt_cpu_name[64];
+
+static struct cpu_spec __initdata base_cpu_spec = {
+ .cpu_name = NULL,
+ .cpu_features = CPU_FTRS_BASE,
+ .cpu_user_features = COMMON_USER_BASE,
+ .cpu_user_features2 = COMMON_USER2_BASE,
+ .mmu_features = 0,
+ .icache_bsize = 32, /* minimum block size, fixed by */
+ .dcache_bsize = 32, /* cache info init. */
+ .num_pmcs = 0,
+ .pmc_type = PPC_PMC_DEFAULT,
+ .oprofile_cpu_type = NULL,
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = NULL,
+ .cpu_restore = __restore_cpu_cpufeatures,
+ .flush_tlb = NULL,
+ .machine_check_early = NULL,
+ .platform = NULL,
+};
+
+static void __init cpufeatures_setup_cpu(void)
+{
+ set_cur_cpu_spec(&base_cpu_spec);
+
+ cur_cpu_spec->pvr_mask = -1;
+ cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
+
+ /* Initialize the base environment -- clear FSCR/HFSCR. */
+ hv_mode = !!(mfmsr() & MSR_HV);
+ if (hv_mode) {
+ /* CPU_FTR_HVMODE is used early in PACA setup */
+ cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
+ mtspr(SPRN_HFSCR, 0);
+ }
+ mtspr(SPRN_FSCR, 0);
+
+ /*
+ * LPCR does not get cleared, to match behaviour with secondaries
+ * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
+ * could clear LPCR too.
+ */
+}
+
+static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
+{
+ if (f->hv_support == HV_SUPPORT_NONE) {
+ } else if (f->hv_support & HV_SUPPORT_HFSCR) {
+ u64 hfscr = mfspr(SPRN_HFSCR);
+ hfscr |= 1UL << f->hfscr_bit_nr;
+ mtspr(SPRN_HFSCR, hfscr);
+ } else {
+ /* Does not have a known recipe */
+ return 0;
+ }
+
+ if (f->os_support == OS_SUPPORT_NONE) {
+ } else if (f->os_support & OS_SUPPORT_FSCR) {
+ u64 fscr = mfspr(SPRN_FSCR);
+ fscr |= 1UL << f->fscr_bit_nr;
+ mtspr(SPRN_FSCR, fscr);
+ } else {
+ /* Does not have a known recipe */
+ return 0;
+ }
+
+ if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
+ uint32_t word = f->hwcap_bit_nr / 32;
+ uint32_t bit = f->hwcap_bit_nr % 32;
+
+ if (word == 0)
+ cur_cpu_spec->cpu_user_features |= 1U << bit;
+ else if (word == 1)
+ cur_cpu_spec->cpu_user_features2 |= 1U << bit;
+ else
+ pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
+ }
+
+ return 1;
+}
+
+static int __init feat_enable(struct dt_cpu_feature *f)
+{
+ if (f->hv_support != HV_SUPPORT_NONE) {
+ if (f->hfscr_bit_nr != -1) {
+ u64 hfscr = mfspr(SPRN_HFSCR);
+ hfscr |= 1UL << f->hfscr_bit_nr;
+ mtspr(SPRN_HFSCR, hfscr);
+ }
+ }
+
+ if (f->os_support != OS_SUPPORT_NONE) {
+ if (f->fscr_bit_nr != -1) {
+ u64 fscr = mfspr(SPRN_FSCR);
+ fscr |= 1UL << f->fscr_bit_nr;
+ mtspr(SPRN_FSCR, fscr);
+ }
+ }
+
+ if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
+ uint32_t word = f->hwcap_bit_nr / 32;
+ uint32_t bit = f->hwcap_bit_nr % 32;
+
+ if (word == 0)
+ cur_cpu_spec->cpu_user_features |= 1U << bit;
+ else if (word == 1)
+ cur_cpu_spec->cpu_user_features2 |= 1U << bit;
+ else
+ pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
+ }
+
+ return 1;
+}
+
+static int __init feat_disable(struct dt_cpu_feature *f)
+{
+ return 0;
+}
+
+static int __init feat_enable_hv(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ if (!hv_mode) {
+ pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
+ return 0;
+ }
+
+ mtspr(SPRN_LPID, 0);
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr &= ~LPCR_LPES0; /* HV external interrupts */
+ mtspr(SPRN_LPCR, lpcr);
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
+
+ return 1;
+}
+
+static int __init feat_enable_le(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
+ return 1;
+}
+
+static int __init feat_enable_smt(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
+ return 1;
+}
+
+static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ /* Set PECE wakeup modes for ISA 207 */
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= LPCR_PECE0;
+ lpcr |= LPCR_PECE1;
+ lpcr |= LPCR_PECE2;
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN;
+
+ return 1;
+}
+
+static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ /* Set PECE wakeup modes for ISAv3.0B */
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= LPCR_PECE0;
+ lpcr |= LPCR_PECE1;
+ lpcr |= LPCR_PECE2;
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr &= ~LPCR_ISL;
+
+ /* VRMASD */
+ lpcr |= LPCR_VPM0;
+ lpcr &= ~LPCR_VPM1;
+ lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
+ mtspr(SPRN_LPCR, lpcr);
+
+ cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
+
+ return 1;
+}
+
+static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr &= ~LPCR_ISL;
+ mtspr(SPRN_LPCR, lpcr);
+
+ cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
+
+ return 1;
+}
+
+
+static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
+{
+#ifdef CONFIG_PPC_RADIX_MMU
+ cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
+ cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
+
+ return 1;
+#endif
+ return 0;
+}
+
+static int __init feat_enable_dscr(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ feat_enable(f);
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr &= ~LPCR_DPFD;
+ lpcr |= (4UL << LPCR_DPFD_SH);
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static void hfscr_pmu_enable(void)
+{
+ u64 hfscr = mfspr(SPRN_HFSCR);
+ hfscr |= PPC_BIT(60);
+ mtspr(SPRN_HFSCR, hfscr);
+}
+
+static void init_pmu_power8(void)
+{
+ if (hv_mode) {
+ mtspr(SPRN_MMCRC, 0);
+ mtspr(SPRN_MMCRH, 0);
+ }
+
+ mtspr(SPRN_MMCRA, 0);
+ mtspr(SPRN_MMCR0, 0);
+ mtspr(SPRN_MMCR1, 0);
+ mtspr(SPRN_MMCR2, 0);
+ mtspr(SPRN_MMCRS, 0);
+}
+
+static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->platform = "power8";
+ cur_cpu_spec->flush_tlb = __flush_tlb_power8;
+ cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
+
+ return 1;
+}
+
+static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
+{
+ hfscr_pmu_enable();
+
+ init_pmu_power8();
+ init_pmu_registers = init_pmu_power8;
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
+ if (pvr_version_is(PVR_POWER8E))
+ cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
+
+ cur_cpu_spec->num_pmcs = 6;
+ cur_cpu_spec->pmc_type = PPC_PMC_IBM;
+ cur_cpu_spec->oprofile_cpu_type = "ppc64/power8";
+
+ return 1;
+}
+
+static void init_pmu_power9(void)
+{
+ if (hv_mode)
+ mtspr(SPRN_MMCRC, 0);
+
+ mtspr(SPRN_MMCRA, 0);
+ mtspr(SPRN_MMCR0, 0);
+ mtspr(SPRN_MMCR1, 0);
+ mtspr(SPRN_MMCR2, 0);
+}
+
+static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->platform = "power9";
+ cur_cpu_spec->flush_tlb = __flush_tlb_power9;
+ cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
+
+ return 1;
+}
+
+static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
+{
+ hfscr_pmu_enable();
+
+ init_pmu_power9();
+ init_pmu_registers = init_pmu_power9;
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
+
+ cur_cpu_spec->num_pmcs = 6;
+ cur_cpu_spec->pmc_type = PPC_PMC_IBM;
+ cur_cpu_spec->oprofile_cpu_type = "ppc64/power9";
+
+ return 1;
+}
+
+static int __init feat_enable_tm(struct dt_cpu_feature *f)
+{
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ feat_enable(f);
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
+ return 1;
+#endif
+ return 0;
+}
+
+static int __init feat_enable_fp(struct dt_cpu_feature *f)
+{
+ feat_enable(f);
+ cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
+
+ return 1;
+}
+
+static int __init feat_enable_vector(struct dt_cpu_feature *f)
+{
+#ifdef CONFIG_ALTIVEC
+ feat_enable(f);
+ cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
+ cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
+
+ return 1;
+#endif
+ return 0;
+}
+
+static int __init feat_enable_vsx(struct dt_cpu_feature *f)
+{
+#ifdef CONFIG_VSX
+ feat_enable(f);
+ cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
+
+ return 1;
+#endif
+ return 0;
+}
+
+static int __init feat_enable_purr(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
+
+ return 1;
+}
+
+static int __init feat_enable_ebb(struct dt_cpu_feature *f)
+{
+ /*
+ * PPC_FEATURE2_EBB is enabled in PMU init code because it has
+ * historically been related to the PMU facility. This may have
+ * to be decoupled if EBB becomes more generic. For now, follow
+ * existing convention.
+ */
+ f->hwcap_bit_nr = -1;
+ feat_enable(f);
+
+ return 1;
+}
+
+static int __init feat_enable_dbell(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ /* P9 has an HFSCR for privileged state */
+ feat_enable(f);
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= LPCR_PECEDH; /* hyp doorbell wakeup */
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static int __init feat_enable_hvi(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ /*
+ * POWER9 XIVE interrupts including in OPAL XICS compatibility
+ * are always delivered as hypervisor virtualization interrupts (HVI)
+ * rather than EE.
+ *
+ * However LPES0 is not set here, in the chance that an EE does get
+ * delivered to the host somehow, the EE handler would not expect it
+ * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
+ * happen if there is a bug in interrupt controller code, or IC is
+ * misconfigured in systemsim.
+ */
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= LPCR_HVICE; /* enable hvi interrupts */
+ lpcr |= LPCR_HEIC; /* disable ee interrupts when MSR_HV */
+ lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
+
+ return 1;
+}
+
+struct dt_cpu_feature_match {
+ const char *name;
+ int (*enable)(struct dt_cpu_feature *f);
+ u64 cpu_ftr_bit_mask;
+};
+
+static struct dt_cpu_feature_match __initdata
+ dt_cpu_feature_match_table[] = {
+ {"hypervisor", feat_enable_hv, 0},
+ {"big-endian", feat_enable, 0},
+ {"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
+ {"smt", feat_enable_smt, 0},
+ {"interrupt-facilities", feat_enable, 0},
+ {"timer-facilities", feat_enable, 0},
+ {"timer-facilities-v3", feat_enable, 0},
+ {"debug-facilities", feat_enable, 0},
+ {"come-from-address-register", feat_enable, CPU_FTR_CFAR},
+ {"branch-tracing", feat_enable, 0},
+ {"floating-point", feat_enable_fp, 0},
+ {"vector", feat_enable_vector, 0},
+ {"vector-scalar", feat_enable_vsx, 0},
+ {"vector-scalar-v3", feat_enable, 0},
+ {"decimal-floating-point", feat_enable, 0},
+ {"decimal-integer", feat_enable, 0},
+ {"quadword-load-store", feat_enable, 0},
+ {"vector-crypto", feat_enable, 0},
+ {"mmu-hash", feat_enable_mmu_hash, 0},
+ {"mmu-radix", feat_enable_mmu_radix, 0},
+ {"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
+ {"virtual-page-class-key-protection", feat_enable, 0},
+ {"transactional-memory", feat_enable_tm, CPU_FTR_TM},
+ {"transactional-memory-v3", feat_enable_tm, 0},
+ {"idle-nap", feat_enable_idle_nap, 0},
+ {"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
+ {"idle-stop", feat_enable_idle_stop, 0},
+ {"machine-check-power8", feat_enable_mce_power8, 0},
+ {"performance-monitor-power8", feat_enable_pmu_power8, 0},
+ {"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
+ {"event-based-branch", feat_enable_ebb, 0},
+ {"target-address-register", feat_enable, 0},
+ {"branch-history-rolling-buffer", feat_enable, 0},
+ {"control-register", feat_enable, CPU_FTR_CTRL},
+ {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
+ {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
+ {"processor-utilization-of-resources-register", feat_enable_purr, 0},
+ {"subcore", feat_enable, CPU_FTR_SUBCORE},
+ {"no-execute", feat_enable, 0},
+ {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
+ {"cache-inhibited-large-page", feat_enable_large_ci, 0},
+ {"coprocessor-icswx", feat_enable, CPU_FTR_ICSWX},
+ {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
+ {"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
+ {"wait", feat_enable, 0},
+ {"atomic-memory-operations", feat_enable, 0},
+ {"branch-v3", feat_enable, 0},
+ {"copy-paste", feat_enable, 0},
+ {"decimal-floating-point-v3", feat_enable, 0},
+ {"decimal-integer-v3", feat_enable, 0},
+ {"fixed-point-v3", feat_enable, 0},
+ {"floating-point-v3", feat_enable, 0},
+ {"group-start-register", feat_enable, 0},
+ {"pc-relative-addressing", feat_enable, 0},
+ {"machine-check-power9", feat_enable_mce_power9, 0},
+ {"performance-monitor-power9", feat_enable_pmu_power9, 0},
+ {"event-based-branch-v3", feat_enable, 0},
+ {"random-number-generator", feat_enable, 0},
+ {"system-call-vectored", feat_disable, 0},
+ {"trace-interrupt-v3", feat_enable, 0},
+ {"vector-v3", feat_enable, 0},
+ {"vector-binary128", feat_enable, 0},
+ {"vector-binary16", feat_enable, 0},
+ {"wait-v3", feat_enable, 0},
+};
+
+/* XXX: how to configure this? Default + boot time? */
+#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
+#define CPU_FEATURE_ENABLE_UNKNOWN 1
+#else
+#define CPU_FEATURE_ENABLE_UNKNOWN 0
+#endif
+
+static void __init cpufeatures_setup_start(u32 isa)
+{
+ pr_info("setup for ISA %d\n", isa);
+
+ if (isa >= 3000) {
+ cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
+ }
+}
+
+static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
+{
+ const struct dt_cpu_feature_match *m;
+ bool known = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
+ m = &dt_cpu_feature_match_table[i];
+ if (!strcmp(f->name, m->name)) {
+ known = true;
+ if (m->enable(f))
+ break;
+
+ pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
+ f->name);
+ return false;
+ }
+ }
+
+ if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
+ if (!feat_try_enable_unknown(f)) {
+ pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
+ f->name);
+ return false;
+ }
+ }
+
+ if (m->cpu_ftr_bit_mask)
+ cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
+
+ if (known)
+ pr_debug("enabling: %s\n", f->name);
+ else
+ pr_debug("enabling: %s (unknown)\n", f->name);
+
+ return true;
+}
+
+static __init void cpufeatures_cpu_quirks(void)
+{
+ int version = mfspr(SPRN_PVR);
+
+ /*
+ * Not all quirks can be derived from the cpufeatures device tree.
+ */
+ if ((version & 0xffffff00) == 0x004e0100)
+ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
+}
+
+static void __init cpufeatures_setup_finished(void)
+{
+ cpufeatures_cpu_quirks();
+
+ if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
+ pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
+ cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
+ }
+
+ system_registers.lpcr = mfspr(SPRN_LPCR);
+ system_registers.hfscr = mfspr(SPRN_HFSCR);
+ system_registers.fscr = mfspr(SPRN_FSCR);
+
+ cpufeatures_flush_tlb();
+
+ pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
+ cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
+}
+
+static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
+ && of_get_flat_dt_prop(node, "isa", NULL))
+ return 1;
+
+ return 0;
+}
+
+static bool __initdata using_dt_cpu_ftrs = false;
+
+bool __init dt_cpu_ftrs_in_use(void)
+{
+ return using_dt_cpu_ftrs;
+}
+
+bool __init dt_cpu_ftrs_init(void *fdt)
+{
+ /* Setup and verify the FDT, if it fails we just bail */
+ if (!early_init_dt_verify(fdt))
+ return false;
+
+ if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
+ return false;
+
+ cpufeatures_setup_cpu();
+
+ using_dt_cpu_ftrs = true;
+ return true;
+}
+
+static int nr_dt_cpu_features;
+static struct dt_cpu_feature *dt_cpu_features;
+
+static int __init process_cpufeatures_node(unsigned long node,
+ const char *uname, int i)
+{
+ const __be32 *prop;
+ struct dt_cpu_feature *f;
+ int len;
+
+ f = &dt_cpu_features[i];
+ memset(f, 0, sizeof(struct dt_cpu_feature));
+
+ f->node = node;
+
+ f->name = uname;
+
+ prop = of_get_flat_dt_prop(node, "isa", &len);
+ if (!prop) {
+ pr_warn("%s: missing isa property\n", uname);
+ return 0;
+ }
+ f->isa = be32_to_cpup(prop);
+
+ prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
+ if (!prop) {
+ pr_warn("%s: missing usable-privilege property", uname);
+ return 0;
+ }
+ f->usable_privilege = be32_to_cpup(prop);
+
+ prop = of_get_flat_dt_prop(node, "hv-support", &len);
+ if (prop)
+ f->hv_support = be32_to_cpup(prop);
+ else
+ f->hv_support = HV_SUPPORT_NONE;
+
+ prop = of_get_flat_dt_prop(node, "os-support", &len);
+ if (prop)
+ f->os_support = be32_to_cpup(prop);
+ else
+ f->os_support = OS_SUPPORT_NONE;
+
+ prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
+ if (prop)
+ f->hfscr_bit_nr = be32_to_cpup(prop);
+ else
+ f->hfscr_bit_nr = -1;
+ prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
+ if (prop)
+ f->fscr_bit_nr = be32_to_cpup(prop);
+ else
+ f->fscr_bit_nr = -1;
+ prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
+ if (prop)
+ f->hwcap_bit_nr = be32_to_cpup(prop);
+ else
+ f->hwcap_bit_nr = -1;
+
+ if (f->usable_privilege & USABLE_HV) {
+ if (!(mfmsr() & MSR_HV)) {
+ pr_warn("%s: HV feature passed to guest\n", uname);
+ return 0;
+ }
+
+ if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
+ pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
+ return 0;
+ }
+
+ if (f->hv_support == HV_SUPPORT_HFSCR) {
+ if (f->hfscr_bit_nr == -1) {
+ pr_warn("%s: missing hfscr_bit_nr\n", uname);
+ return 0;
+ }
+ }
+ } else {
+ if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
+ pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
+ return 0;
+ }
+ }
+
+ if (f->usable_privilege & USABLE_OS) {
+ if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
+ pr_warn("%s: unwanted fscr_bit_nr\n", uname);
+ return 0;
+ }
+
+ if (f->os_support == OS_SUPPORT_FSCR) {
+ if (f->fscr_bit_nr == -1) {
+ pr_warn("%s: missing fscr_bit_nr\n", uname);
+ return 0;
+ }
+ }
+ } else {
+ if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
+ pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
+ return 0;
+ }
+ }
+
+ if (!(f->usable_privilege & USABLE_PR)) {
+ if (f->hwcap_bit_nr != -1) {
+ pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
+ return 0;
+ }
+ }
+
+ /* Do all the independent features in the first pass */
+ if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
+ if (cpufeatures_process_feature(f))
+ f->enabled = 1;
+ else
+ f->disabled = 1;
+ }
+
+ return 0;
+}
+
+static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
+{
+ const __be32 *prop;
+ int len;
+ int nr_deps;
+ int i;
+
+ if (f->enabled || f->disabled)
+ return;
+
+ prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
+ if (!prop) {
+ pr_warn("%s: missing dependencies property", f->name);
+ return;
+ }
+
+ nr_deps = len / sizeof(int);
+
+ for (i = 0; i < nr_deps; i++) {
+ unsigned long phandle = be32_to_cpu(prop[i]);
+ int j;
+
+ for (j = 0; j < nr_dt_cpu_features; j++) {
+ struct dt_cpu_feature *d = &dt_cpu_features[j];
+
+ if (of_get_flat_dt_phandle(d->node) == phandle) {
+ cpufeatures_deps_enable(d);
+ if (d->disabled) {
+ f->disabled = 1;
+ return;
+ }
+ }
+ }
+ }
+
+ if (cpufeatures_process_feature(f))
+ f->enabled = 1;
+ else
+ f->disabled = 1;
+}
+
+static int __init scan_cpufeatures_subnodes(unsigned long node,
+ const char *uname,
+ void *data)
+{
+ int *count = data;
+
+ process_cpufeatures_node(node, uname, *count);
+
+ (*count)++;
+
+ return 0;
+}
+
+static int __init count_cpufeatures_subnodes(unsigned long node,
+ const char *uname,
+ void *data)
+{
+ int *count = data;
+
+ (*count)++;
+
+ return 0;
+}
+
+static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
+ *uname, int depth, void *data)
+{
+ const __be32 *prop;
+ int count, i;
+ u32 isa;
+
+ /* We are scanning "ibm,powerpc-cpu-features" nodes only */
+ if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
+ return 0;
+
+ prop = of_get_flat_dt_prop(node, "isa", NULL);
+ if (!prop)
+ /* We checked before, "can't happen" */
+ return 0;
+
+ isa = be32_to_cpup(prop);
+
+ /* Count and allocate space for cpu features */
+ of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
+ &nr_dt_cpu_features);
+ dt_cpu_features = __va(
+ memblock_alloc(sizeof(struct dt_cpu_feature)*
+ nr_dt_cpu_features, PAGE_SIZE));
+
+ cpufeatures_setup_start(isa);
+
+ /* Scan nodes into dt_cpu_features and enable those without deps */
+ count = 0;
+ of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
+
+ /* Recursive enable remaining features with dependencies */
+ for (i = 0; i < nr_dt_cpu_features; i++) {
+ struct dt_cpu_feature *f = &dt_cpu_features[i];
+
+ cpufeatures_deps_enable(f);
+ }
+
+ prop = of_get_flat_dt_prop(node, "display-name", NULL);
+ if (prop && strlen((char *)prop) != 0) {
+ strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
+ cur_cpu_spec->cpu_name = dt_cpu_name;
+ }
+
+ cpufeatures_setup_finished();
+
+ memblock_free(__pa(dt_cpu_features),
+ sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
+
+ return 0;
+}
+
+void __init dt_cpu_ftrs_scan(void)
+{
+ of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
+}
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 45b453e4d0c8..acd8ca76233e 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -735,8 +735,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
andis. r15,r14,(DBSR_IC|DBSR_BT)@h
beq+ 1f
+#ifdef CONFIG_RELOCATABLE
+ ld r15,PACATOC(r13)
+ ld r14,interrupt_base_book3e@got(r15)
+ ld r15,__end_interrupts@got(r15)
+#else
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
cmpld cr0,r10,r14
cmpld cr1,r10,r15
blt+ cr0,1f
@@ -799,8 +805,14 @@ kernel_dbg_exc:
andis. r15,r14,(DBSR_IC|DBSR_BT)@h
beq+ 1f
+#ifdef CONFIG_RELOCATABLE
+ ld r15,PACATOC(r13)
+ ld r14,interrupt_base_book3e@got(r15)
+ ld r15,__end_interrupts@got(r15)
+#else
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
cmpld cr0,r10,r14
cmpld cr1,r10,r15
blt+ cr0,1f
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a9312b52fe6f..ae418b85c17c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -391,9 +391,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
*/
BEGIN_FTR_SECTION
rlwinm. r11,r12,47-31,30,31
- beq- 4f
- BRANCH_TO_COMMON(r10, machine_check_idle_common)
-4:
+ bne machine_check_idle_common
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index d2f0afeae5a0..40c4887c27b6 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -57,6 +57,7 @@
#include <asm/fadump.h>
#include <asm/epapr_hcalls.h>
#include <asm/firmware.h>
+#include <asm/dt_cpu_ftrs.h>
#include <mm/mmu_decl.h>
@@ -375,23 +376,31 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
* A POWER6 partition in "POWER6 architected" mode
* uses the 0x0f000002 PVR value; in POWER5+ mode
* it uses 0x0f000001.
+ *
+ * If we're using device tree CPU feature discovery then we don't
+ * support the cpu-version property, and it's the responsibility of the
+ * firmware/hypervisor to provide the correct feature set for the
+ * architecture level via the ibm,powerpc-cpu-features binding.
*/
- prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
- if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
- identify_cpu(0, be32_to_cpup(prop));
+ if (!dt_cpu_ftrs_in_use()) {
+ prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
+ if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
+ identify_cpu(0, be32_to_cpup(prop));
- identical_pvr_fixup(node);
+ check_cpu_feature_properties(node);
+ check_cpu_pa_features(node);
+ }
- check_cpu_feature_properties(node);
- check_cpu_pa_features(node);
+ identical_pvr_fixup(node);
init_mmu_slb_size(node);
#ifdef CONFIG_PPC64
- if (nthreads > 1)
- cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
- else
+ if (nthreads == 1)
cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
+ else if (!dt_cpu_ftrs_in_use())
+ cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
#endif
+
return 0;
}
@@ -721,6 +730,8 @@ void __init early_init_devtree(void *params)
DBG("Scanning CPUs ...\n");
+ dt_cpu_ftrs_scan();
+
/* Retrieve CPU related informations from the flat tree
* (altivec support, boot CPU ID, ...)
*/
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 69e077180db6..71dcda91755d 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -261,7 +261,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "processor\t: %lu\n", cpu_id);
seq_printf(m, "cpu\t\t: ");
- if (cur_cpu_spec->pvr_mask)
+ if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name)
seq_printf(m, "%s", cur_cpu_spec->cpu_name);
else
seq_printf(m, "unknown (%08x)", pvr);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 0d4dcaeaafcb..f35ff9dea4fb 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -49,6 +49,7 @@
#include <asm/paca.h>
#include <asm/time.h>
#include <asm/cputable.h>
+#include <asm/dt_cpu_ftrs.h>
#include <asm/sections.h>
#include <asm/btext.h>
#include <asm/nvram.h>
@@ -274,8 +275,10 @@ void __init early_setup(unsigned long dt_ptr)
/* -------- printk is _NOT_ safe to use here ! ------- */
- /* Identify CPU type */
- identify_cpu(0, mfspr(SPRN_PVR));
+ /* Try new device tree based feature discovery ... */
+ if (!dt_cpu_ftrs_init(__va(dt_ptr)))
+ /* Otherwise use the old style CPU table */
+ identify_cpu(0, mfspr(SPRN_PVR));
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
initialise_paca(&boot_paca, 0);
@@ -541,6 +544,9 @@ void __init initialize_cache_info(void)
dcache_bsize = ppc64_caches.l1d.block_size;
icache_bsize = ppc64_caches.l1i.block_size;
+ cur_cpu_spec->dcache_bsize = dcache_bsize;
+ cur_cpu_spec->icache_bsize = icache_bsize;
+
DBG(" <- initialize_cache_info()\n");
}
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 65a471de96de..24de532c1736 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -197,6 +197,11 @@ config KVM_XICS
Specification) interrupt controller architecture used on
IBM POWER (pSeries) servers.
+config KVM_XIVE
+ bool
+ default y
+ depends on KVM_XICS && PPC_XIVE_NATIVE && KVM_BOOK3S_HV_POSSIBLE
+
source drivers/vhost/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index b87ccde2137a..d91a2604c496 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -74,7 +74,7 @@ kvm-hv-y += \
book3s_64_mmu_radix.o
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
- book3s_hv_rm_xics.o
+ book3s_hv_rm_xics.o book3s_hv_rm_xive.o
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
@@ -89,6 +89,8 @@ endif
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
book3s_xics.o
+kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o
+
kvm-book3s_64-module-objs := \
$(common-objs-y) \
book3s.o \
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 8c4d7e9d27d2..72d977e30952 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -35,6 +35,7 @@
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
+#include <asm/xive.h>
#include "book3s.h"
#include "trace.h"
@@ -596,11 +597,14 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
#ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
- if (!vcpu->arch.icp) {
+ if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
r = -ENXIO;
break;
}
- *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
+ if (xive_enabled())
+ *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
+ else
+ *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
break;
#endif /* CONFIG_KVM_XICS */
case KVM_REG_PPC_FSCR:
@@ -666,12 +670,14 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
#endif /* CONFIG_VSX */
#ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
- if (!vcpu->arch.icp) {
+ if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
r = -ENXIO;
break;
}
- r = kvmppc_xics_set_icp(vcpu,
- set_reg_val(id, *val));
+ if (xive_enabled())
+ r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
+ else
+ r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
break;
#endif /* CONFIG_KVM_XICS */
case KVM_REG_PPC_FSCR:
@@ -942,6 +948,50 @@ int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
return kvm->arch.kvm_ops->hcall_implemented(hcall);
}
+#ifdef CONFIG_KVM_XICS
+int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ bool line_status)
+{
+ if (xive_enabled())
+ return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
+ line_status);
+ else
+ return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
+ line_status);
+}
+
+int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
+ struct kvm *kvm, int irq_source_id,
+ int level, bool line_status)
+{
+ return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
+ level, line_status);
+}
+static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status)
+{
+ return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
+}
+
+int kvm_irq_map_gsi(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *entries, int gsi)
+{
+ entries->gsi = gsi;
+ entries->type = KVM_IRQ_ROUTING_IRQCHIP;
+ entries->set = kvmppc_book3s_set_irq;
+ entries->irqchip.irqchip = 0;
+ entries->irqchip.pin = gsi;
+ return 1;
+}
+
+int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
+{
+ return pin;
+}
+
+#endif /* CONFIG_KVM_XICS */
+
static int kvmppc_book3s_init(void)
{
int r;
@@ -952,12 +1002,25 @@ static int kvmppc_book3s_init(void)
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
r = kvmppc_book3s_init_pr();
#endif
- return r;
+#ifdef CONFIG_KVM_XICS
+#ifdef CONFIG_KVM_XIVE
+ if (xive_enabled()) {
+ kvmppc_xive_init_module();
+ kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
+ } else
+#endif
+ kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
+#endif
+ return r;
}
static void kvmppc_book3s_exit(void)
{
+#ifdef CONFIG_KVM_XICS
+ if (xive_enabled())
+ kvmppc_xive_exit_module();
+#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kvmppc_book3s_exit_pr();
#endif
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 549dd6070dee..42b7a4fd57d9 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -67,6 +67,7 @@
#include <asm/mmu.h>
#include <asm/opal.h>
#include <asm/xics.h>
+#include <asm/xive.h>
#include "book3s.h"
@@ -837,6 +838,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_IPOLL:
case H_XIRR_X:
if (kvmppc_xics_enabled(vcpu)) {
+ if (xive_enabled()) {
+ ret = H_NOT_AVAILABLE;
+ return RESUME_GUEST;
+ }
ret = kvmppc_xics_hcall(vcpu, req);
break;
}
@@ -2947,8 +2952,12 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
r = kvmppc_book3s_hv_page_fault(run, vcpu,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
- } else if (r == RESUME_PASSTHROUGH)
- r = kvmppc_xics_rm_complete(vcpu, 0);
+ } else if (r == RESUME_PASSTHROUGH) {
+ if (WARN_ON(xive_enabled()))
+ r = H_SUCCESS;
+ else
+ r = kvmppc_xics_rm_complete(vcpu, 0);
+ }
} while (is_kvmppc_resume_guest(r));
out:
@@ -3400,10 +3409,20 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/*
* On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
* Set HVICE bit to enable hypervisor virtualization interrupts.
+ * Set HEIC to prevent OS interrupts to go to hypervisor (should
+ * be unnecessary but better safe than sorry in case we re-enable
+ * EE in HV mode with this LPCR still set)
*/
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
lpcr &= ~LPCR_VPM0;
- lpcr |= LPCR_HVICE;
+ lpcr |= LPCR_HVICE | LPCR_HEIC;
+
+ /*
+ * If xive is enabled, we route 0x500 interrupts directly
+ * to the guest.
+ */
+ if (xive_enabled())
+ lpcr |= LPCR_LPES;
}
/*
@@ -3533,7 +3552,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
struct kvmppc_irq_map *irq_map;
struct kvmppc_passthru_irqmap *pimap;
struct irq_chip *chip;
- int i;
+ int i, rc = 0;
if (!kvm_irq_bypass)
return 1;
@@ -3558,10 +3577,10 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
/*
* For now, we only support interrupts for which the EOI operation
* is an OPAL call followed by a write to XIRR, since that's
- * what our real-mode EOI code does.
+ * what our real-mode EOI code does, or a XIVE interrupt
*/
chip = irq_data_get_irq_chip(&desc->irq_data);
- if (!chip || !is_pnv_opal_msi(chip)) {
+ if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) {
pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
host_irq, guest_gsi);
mutex_unlock(&kvm->lock);
@@ -3603,7 +3622,12 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
if (i == pimap->n_mapped)
pimap->n_mapped++;
- kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
+ if (xive_enabled())
+ rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
+ else
+ kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
+ if (rc)
+ irq_map->r_hwirq = 0;
mutex_unlock(&kvm->lock);
@@ -3614,7 +3638,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
{
struct irq_desc *desc;
struct kvmppc_passthru_irqmap *pimap;
- int i;
+ int i, rc = 0;
if (!kvm_irq_bypass)
return 0;
@@ -3639,9 +3663,12 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
return -ENODEV;
}
- kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
+ if (xive_enabled())
+ rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
+ else
+ kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
- /* invalidate the entry */
+ /* invalidate the entry (what do do on error from the above ?) */
pimap->mapped[i].r_hwirq = 0;
/*
@@ -3650,7 +3677,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
*/
unlock:
mutex_unlock(&kvm->lock);
- return 0;
+ return rc;
}
static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons,
@@ -3928,7 +3955,7 @@ static int kvmppc_book3s_init_hv(void)
* indirectly, via OPAL.
*/
#ifdef CONFIG_SMP
- if (!get_paca()->kvm_hstate.xics_phys) {
+ if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) {
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 9c71c72e65ce..88a65923c649 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -32,6 +32,24 @@
#define KVM_CMA_CHUNK_ORDER 18
+#include "book3s_xics.h"
+#include "book3s_xive.h"
+
+/*
+ * The XIVE module will populate these when it loads
+ */
+unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
+unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
+int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
+int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
+EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
+EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
+EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
+EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
+EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
+
/*
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
* should be power of 2.
@@ -211,6 +229,7 @@ void kvmhv_rm_send_ipi(int cpu)
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
return;
}
+
/* On POWER8 for IPIs to threads in the same core, use msgsnd. */
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
cpu_first_thread_sibling(cpu) ==
@@ -407,6 +426,9 @@ static long kvmppc_read_one_intr(bool *again)
u8 host_ipi;
int64_t rc;
+ if (xive_enabled())
+ return 1;
+
/* see if a host IPI is pending */
host_ipi = local_paca->kvm_hstate.host_ipi;
if (host_ipi)
@@ -491,3 +513,84 @@ static long kvmppc_read_one_intr(bool *again)
return kvmppc_check_passthru(xisr, xirr, again);
}
+
+#ifdef CONFIG_KVM_XICS
+static inline bool is_rm(void)
+{
+ return !(mfmsr() & MSR_DR);
+}
+
+unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_xirr(vcpu);
+ if (unlikely(!__xive_vm_h_xirr))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_xirr(vcpu);
+ } else
+ return xics_rm_h_xirr(vcpu);
+}
+
+unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.gpr[5] = get_tb();
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_xirr(vcpu);
+ if (unlikely(!__xive_vm_h_xirr))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_xirr(vcpu);
+ } else
+ return xics_rm_h_xirr(vcpu);
+}
+
+unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_ipoll(vcpu, server);
+ if (unlikely(!__xive_vm_h_ipoll))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_ipoll(vcpu, server);
+ } else
+ return H_TOO_HARD;
+}
+
+int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_ipi(vcpu, server, mfrr);
+ if (unlikely(!__xive_vm_h_ipi))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_ipi(vcpu, server, mfrr);
+ } else
+ return xics_rm_h_ipi(vcpu, server, mfrr);
+}
+
+int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_cppr(vcpu, cppr);
+ if (unlikely(!__xive_vm_h_cppr))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_cppr(vcpu, cppr);
+ } else
+ return xics_rm_h_cppr(vcpu, cppr);
+}
+
+int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_eoi(vcpu, xirr);
+ if (unlikely(!__xive_vm_h_eoi))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_eoi(vcpu, xirr);
+ } else
+ return xics_rm_h_eoi(vcpu, xirr);
+}
+#endif /* CONFIG_KVM_XICS */
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index ffde4507ddfd..2a862618f072 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -484,7 +484,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
}
-unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
+unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
{
union kvmppc_icp_state old_state, new_state;
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
@@ -522,8 +522,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
return check_too_hard(xics, icp);
}
-int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr)
+int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
{
union kvmppc_icp_state old_state, new_state;
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
@@ -609,7 +609,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
return check_too_hard(xics, this_icp);
}
-int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
{
union kvmppc_icp_state old_state, new_state;
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
@@ -729,7 +729,7 @@ static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq)
return check_too_hard(xics, icp);
}
-int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
{
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
struct kvmppc_icp *icp = vcpu->arch.icp;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c
new file mode 100644
index 000000000000..abf5f01b6eb1
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rm_xive.c
@@ -0,0 +1,47 @@
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/xics.h>
+#include <asm/debug.h>
+#include <asm/synch.h>
+#include <asm/cputhreads.h>
+#include <asm/pgtable.h>
+#include <asm/ppc-opcode.h>
+#include <asm/pnv-pci.h>
+#include <asm/opal.h>
+#include <asm/smp.h>
+#include <asm/asm-prototypes.h>
+#include <asm/xive.h>
+#include <asm/xive-regs.h>
+
+#include "book3s_xive.h"
+
+/* XXX */
+#include <asm/udbg.h>
+//#define DBG(fmt...) udbg_printf(fmt)
+#define DBG(fmt...) do { } while(0)
+
+static inline void __iomem *get_tima_phys(void)
+{
+ return local_paca->kvm_hstate.xive_tima_phys;
+}
+
+#undef XIVE_RUNTIME_CHECKS
+#define X_PFX xive_rm_
+#define X_STATIC
+#define X_STAT_PFX stat_rm_
+#define __x_tima get_tima_phys()
+#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page))
+#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page))
+#define __x_readb __raw_rm_readb
+#define __x_writeb __raw_rm_writeb
+#define __x_readw __raw_rm_readw
+#define __x_readq __raw_rm_readq
+#define __x_writeq __raw_rm_writeq
+
+#include "book3s_xive_template.c"
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 7c6477d1840a..bdb3f76ceb6b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -30,6 +30,7 @@
#include <asm/book3s/64/mmu-hash.h>
#include <asm/tm.h>
#include <asm/opal.h>
+#include <asm/xive-regs.h>
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
@@ -970,6 +971,23 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
cmpwi r3, 512 /* 1 microsecond */
blt hdec_soon
+#ifdef CONFIG_KVM_XICS
+ /* We are entering the guest on that thread, push VCPU to XIVE */
+ ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
+ cmpldi cr0, r10, r0
+ beq no_xive
+ ld r11, VCPU_XIVE_SAVED_STATE(r4)
+ li r9, TM_QW1_OS
+ stdcix r11,r9,r10
+ eieio
+ lwz r11, VCPU_XIVE_CAM_WORD(r4)
+ li r9, TM_QW1_OS + TM_WORD2
+ stwcix r11,r9,r10
+ li r9, 1
+ stw r9, VCPU_XIVE_PUSHED(r4)
+no_xive:
+#endif /* CONFIG_KVM_XICS */
+
deliver_guest_interrupt:
ld r6, VCPU_CTR(r4)
ld r7, VCPU_XER(r4)
@@ -1307,6 +1325,42 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
blt deliver_guest_interrupt
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
+#ifdef CONFIG_KVM_XICS
+ /* We are exiting, pull the VP from the XIVE */
+ lwz r0, VCPU_XIVE_PUSHED(r9)
+ cmpwi cr0, r0, 0
+ beq 1f
+ li r7, TM_SPC_PULL_OS_CTX
+ li r6, TM_QW1_OS
+ mfmsr r0
+ andi. r0, r0, MSR_IR /* in real mode? */
+ beq 2f
+ ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
+ cmpldi cr0, r10, 0
+ beq 1f
+ /* First load to pull the context, we ignore the value */
+ lwzx r11, r7, r10
+ eieio
+ /* Second load to recover the context state (Words 0 and 1) */
+ ldx r11, r6, r10
+ b 3f
+2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
+ cmpldi cr0, r10, 0
+ beq 1f
+ /* First load to pull the context, we ignore the value */
+ lwzcix r11, r7, r10
+ eieio
+ /* Second load to recover the context state (Words 0 and 1) */
+ ldcix r11, r6, r10
+3: std r11, VCPU_XIVE_SAVED_STATE(r9)
+ /* Fixup some of the state for the next load */
+ li r10, 0
+ li r0, 0xff
+ stw r10, VCPU_XIVE_PUSHED(r9)
+ stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
+ stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
+1:
+#endif /* CONFIG_KVM_XICS */
/* Save more register state */
mfdar r6
mfdsisr r7
@@ -2011,7 +2065,7 @@ hcall_real_table:
.long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
- .long 0 /* 0x70 - H_IPOLL */
+ .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
#else
.long 0 /* 0x64 - H_EOI */
@@ -2181,7 +2235,11 @@ hcall_real_table:
.long 0 /* 0x2f0 */
.long 0 /* 0x2f4 */
.long 0 /* 0x2f8 */
- .long 0 /* 0x2fc */
+#ifdef CONFIG_KVM_XICS
+ .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
+#else
+ .long 0 /* 0x2fc - H_XIRR_X*/
+#endif
.long DOTSYM(kvmppc_h_random) - hcall_real_table
.globl hcall_real_table_end
hcall_real_table_end:
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 20528701835b..2d3b2b1cc272 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -16,6 +16,7 @@
#include <asm/kvm_ppc.h>
#include <asm/hvcall.h>
#include <asm/rtas.h>
+#include <asm/xive.h>
#ifdef CONFIG_KVM_XICS
static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -32,7 +33,10 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
server = be32_to_cpu(args->args[1]);
priority = be32_to_cpu(args->args[2]);
- rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
+ if (xive_enabled())
+ rc = kvmppc_xive_set_xive(vcpu->kvm, irq, server, priority);
+ else
+ rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
if (rc)
rc = -3;
out:
@@ -52,7 +56,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
irq = be32_to_cpu(args->args[0]);
server = priority = 0;
- rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
+ if (xive_enabled())
+ rc = kvmppc_xive_get_xive(vcpu->kvm, irq, &server, &priority);
+ else
+ rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
if (rc) {
rc = -3;
goto out;
@@ -76,7 +83,10 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
irq = be32_to_cpu(args->args[0]);
- rc = kvmppc_xics_int_off(vcpu->kvm, irq);
+ if (xive_enabled())
+ rc = kvmppc_xive_int_off(vcpu->kvm, irq);
+ else
+ rc = kvmppc_xics_int_off(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
@@ -95,7 +105,10 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
irq = be32_to_cpu(args->args[0]);
- rc = kvmppc_xics_int_on(vcpu->kvm, irq);
+ if (xive_enabled())
+ rc = kvmppc_xive_int_on(vcpu->kvm, irq);
+ else
+ rc = kvmppc_xics_int_on(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 459b72cb617a..d329b2add7e2 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -1306,8 +1306,8 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
return 0;
}
-int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
- bool line_status)
+int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ bool line_status)
{
struct kvmppc_xics *xics = kvm->arch.xics;
@@ -1316,14 +1316,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
return ics_deliver_irq(xics, irq, level);
}
-int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
- struct kvm *kvm, int irq_source_id,
- int level, bool line_status)
-{
- return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
- level, line_status);
-}
-
static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
struct kvmppc_xics *xics = dev->private;
@@ -1457,29 +1449,6 @@ void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
}
-static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level,
- bool line_status)
-{
- return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
-}
-
-int kvm_irq_map_gsi(struct kvm *kvm,
- struct kvm_kernel_irq_routing_entry *entries, int gsi)
-{
- entries->gsi = gsi;
- entries->type = KVM_IRQ_ROUTING_IRQCHIP;
- entries->set = xics_set_irq;
- entries->irqchip.irqchip = 0;
- entries->irqchip.pin = gsi;
- return 1;
-}
-
-int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
-{
- return pin;
-}
-
void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
unsigned long host_irq)
{
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index ec5474cf70c6..453c9e518c19 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -10,6 +10,7 @@
#ifndef _KVM_PPC_BOOK3S_XICS_H
#define _KVM_PPC_BOOK3S_XICS_H
+#ifdef CONFIG_KVM_XICS
/*
* We use a two-level tree to store interrupt source information.
* There are up to 1024 ICS nodes, each of which can represent
@@ -144,5 +145,11 @@ static inline struct kvmppc_ics *kvmppc_xics_find_ics(struct kvmppc_xics *xics,
return ics;
}
+extern unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu);
+extern int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+extern int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
+extern int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
+#endif /* CONFIG_KVM_XICS */
#endif /* _KVM_PPC_BOOK3S_XICS_H */
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
new file mode 100644
index 000000000000..ffe1da95033a
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -0,0 +1,1894 @@
+/*
+ * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "xive-kvm: " fmt
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/xics.h>
+#include <asm/xive.h>
+#include <asm/xive-regs.h>
+#include <asm/debug.h>
+#include <asm/debugfs.h>
+#include <asm/time.h>
+#include <asm/opal.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "book3s_xive.h"
+
+
+/*
+ * Virtual mode variants of the hcalls for use on radix/radix
+ * with AIL. They require the VCPU's VP to be "pushed"
+ *
+ * We still instanciate them here because we use some of the
+ * generated utility functions as well in this file.
+ */
+#define XIVE_RUNTIME_CHECKS
+#define X_PFX xive_vm_
+#define X_STATIC static
+#define X_STAT_PFX stat_vm_
+#define __x_tima xive_tima
+#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
+#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
+#define __x_readb __raw_readb
+#define __x_writeb __raw_writeb
+#define __x_readw __raw_readw
+#define __x_readq __raw_readq
+#define __x_writeq __raw_writeq
+
+#include "book3s_xive_template.c"
+
+/*
+ * We leave a gap of a couple of interrupts in the queue to
+ * account for the IPI and additional safety guard.
+ */
+#define XIVE_Q_GAP 2
+
+/*
+ * This is a simple trigger for a generic XIVE IRQ. This must
+ * only be called for interrupts that support a trigger page
+ */
+static bool xive_irq_trigger(struct xive_irq_data *xd)
+{
+ /* This should be only for MSIs */
+ if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
+ return false;
+
+ /* Those interrupts should always have a trigger page */
+ if (WARN_ON(!xd->trig_mmio))
+ return false;
+
+ out_be64(xd->trig_mmio, 0);
+
+ return true;
+}
+
+static irqreturn_t xive_esc_irq(int irq, void *data)
+{
+ struct kvm_vcpu *vcpu = data;
+
+ /* We use the existing H_PROD mechanism to wake up the target */
+ vcpu->arch.prodded = 1;
+ smp_mb();
+ if (vcpu->arch.ceded)
+ kvmppc_fast_vcpu_kick(vcpu);
+
+ return IRQ_HANDLED;
+}
+
+static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_q *q = &xc->queues[prio];
+ char *name = NULL;
+ int rc;
+
+ /* Already there ? */
+ if (xc->esc_virq[prio])
+ return 0;
+
+ /* Hook up the escalation interrupt */
+ xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
+ if (!xc->esc_virq[prio]) {
+ pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
+ prio, xc->server_num);
+ return -EIO;
+ }
+
+ /*
+ * Future improvement: start with them disabled
+ * and handle DD2 and later scheme of merged escalation
+ * interrupts
+ */
+ name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
+ vcpu->kvm->arch.lpid, xc->server_num, prio);
+ if (!name) {
+ pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
+ prio, xc->server_num);
+ rc = -ENOMEM;
+ goto error;
+ }
+ rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
+ IRQF_NO_THREAD, name, vcpu);
+ if (rc) {
+ pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
+ prio, xc->server_num);
+ goto error;
+ }
+ xc->esc_virq_names[prio] = name;
+ return 0;
+error:
+ irq_dispose_mapping(xc->esc_virq[prio]);
+ xc->esc_virq[prio] = 0;
+ kfree(name);
+ return rc;
+}
+
+static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = xc->xive;
+ struct xive_q *q = &xc->queues[prio];
+ void *qpage;
+ int rc;
+
+ if (WARN_ON(q->qpage))
+ return 0;
+
+ /* Allocate the queue and retrieve infos on current node for now */
+ qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
+ if (!qpage) {
+ pr_err("Failed to allocate queue %d for VCPU %d\n",
+ prio, xc->server_num);
+ return -ENOMEM;;
+ }
+ memset(qpage, 0, 1 << xive->q_order);
+
+ /*
+ * Reconfigure the queue. This will set q->qpage only once the
+ * queue is fully configured. This is a requirement for prio 0
+ * as we will stop doing EOIs for every IPI as soon as we observe
+ * qpage being non-NULL, and instead will only EOI when we receive
+ * corresponding queue 0 entries
+ */
+ rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
+ xive->q_order, true);
+ if (rc)
+ pr_err("Failed to configure queue %d for VCPU %d\n",
+ prio, xc->server_num);
+ return rc;
+}
+
+/* Called with kvm_lock held */
+static int xive_check_provisioning(struct kvm *kvm, u8 prio)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvm_vcpu *vcpu;
+ int i, rc;
+
+ lockdep_assert_held(&kvm->lock);
+
+ /* Already provisioned ? */
+ if (xive->qmap & (1 << prio))
+ return 0;
+
+ pr_devel("Provisioning prio... %d\n", prio);
+
+ /* Provision each VCPU and enable escalations */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!vcpu->arch.xive_vcpu)
+ continue;
+ rc = xive_provision_queue(vcpu, prio);
+ if (rc == 0)
+ xive_attach_escalation(vcpu, prio);
+ if (rc)
+ return rc;
+ }
+
+ /* Order previous stores and mark it as provisioned */
+ mb();
+ xive->qmap |= (1 << prio);
+ return 0;
+}
+
+static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvmppc_xive_vcpu *xc;
+ struct xive_q *q;
+
+ /* Locate target server */
+ vcpu = kvmppc_xive_find_server(kvm, server);
+ if (!vcpu) {
+ pr_warn("%s: Can't find server %d\n", __func__, server);
+ return;
+ }
+ xc = vcpu->arch.xive_vcpu;
+ if (WARN_ON(!xc))
+ return;
+
+ q = &xc->queues[prio];
+ atomic_inc(&q->pending_count);
+}
+
+static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_q *q;
+ u32 max;
+
+ if (WARN_ON(!xc))
+ return -ENXIO;
+ if (!xc->valid)
+ return -ENXIO;
+
+ q = &xc->queues[prio];
+ if (WARN_ON(!q->qpage))
+ return -ENXIO;
+
+ /* Calculate max number of interrupts in that queue. */
+ max = (q->msk + 1) - XIVE_Q_GAP;
+ return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
+}
+
+static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
+{
+ struct kvm_vcpu *vcpu;
+ int i, rc;
+
+ /* Locate target server */
+ vcpu = kvmppc_xive_find_server(kvm, *server);
+ if (!vcpu) {
+ pr_devel("Can't find server %d\n", *server);
+ return -EINVAL;
+ }
+
+ pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
+
+ /* Try pick it */
+ rc = xive_try_pick_queue(vcpu, prio);
+ if (rc == 0)
+ return rc;
+
+ pr_devel(" .. failed, looking up candidate...\n");
+
+ /* Failed, pick another VCPU */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!vcpu->arch.xive_vcpu)
+ continue;
+ rc = xive_try_pick_queue(vcpu, prio);
+ if (rc == 0) {
+ *server = vcpu->arch.xive_vcpu->server_num;
+ pr_devel(" found on 0x%x/%d\n", *server, prio);
+ return rc;
+ }
+ }
+ pr_devel(" no available target !\n");
+
+ /* No available target ! */
+ return -EBUSY;
+}
+
+static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ struct kvmppc_xive_irq_state *state)
+{
+ struct xive_irq_data *xd;
+ u32 hw_num;
+ u8 old_prio;
+ u64 val;
+
+ /*
+ * Take the lock, set masked, try again if racing
+ * with H_EOI
+ */
+ for (;;) {
+ arch_spin_lock(&sb->lock);
+ old_prio = state->guest_priority;
+ state->guest_priority = MASKED;
+ mb();
+ if (!state->in_eoi)
+ break;
+ state->guest_priority = old_prio;
+ arch_spin_unlock(&sb->lock);
+ }
+
+ /* No change ? Bail */
+ if (old_prio == MASKED)
+ return old_prio;
+
+ /* Get the right irq */
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ /*
+ * If the interrupt is marked as needing masking via
+ * firmware, we do it here. Firmware masking however
+ * is "lossy", it won't return the old p and q bits
+ * and won't set the interrupt to a state where it will
+ * record queued ones. If this is an issue we should do
+ * lazy masking instead.
+ *
+ * For now, we work around this in unmask by forcing
+ * an interrupt whenever we unmask a non-LSI via FW
+ * (if ever).
+ */
+ if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
+ xive_native_configure_irq(hw_num,
+ xive->vp_base + state->act_server,
+ MASKED, state->number);
+ /* set old_p so we can track if an H_EOI was done */
+ state->old_p = true;
+ state->old_q = false;
+ } else {
+ /* Set PQ to 10, return old P and old Q and remember them */
+ val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
+ state->old_p = !!(val & 2);
+ state->old_q = !!(val & 1);
+
+ /*
+ * Synchronize hardware to sensure the queues are updated
+ * when masking
+ */
+ xive_native_sync_source(hw_num);
+ }
+
+ return old_prio;
+}
+
+static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
+ struct kvmppc_xive_irq_state *state)
+{
+ /*
+ * Take the lock try again if racing with H_EOI
+ */
+ for (;;) {
+ arch_spin_lock(&sb->lock);
+ if (!state->in_eoi)
+ break;
+ arch_spin_unlock(&sb->lock);
+ }
+}
+
+static void xive_finish_unmask(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ struct kvmppc_xive_irq_state *state,
+ u8 prio)
+{
+ struct xive_irq_data *xd;
+ u32 hw_num;
+
+ /* If we aren't changing a thing, move on */
+ if (state->guest_priority != MASKED)
+ goto bail;
+
+ /* Get the right irq */
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ /*
+ * See command in xive_lock_and_mask() concerning masking
+ * via firmware.
+ */
+ if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
+ xive_native_configure_irq(hw_num,
+ xive->vp_base + state->act_server,
+ state->act_priority, state->number);
+ /* If an EOI is needed, do it here */
+ if (!state->old_p)
+ xive_vm_source_eoi(hw_num, xd);
+ /* If this is not an LSI, force a trigger */
+ if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
+ xive_irq_trigger(xd);
+ goto bail;
+ }
+
+ /* Old Q set, set PQ to 11 */
+ if (state->old_q)
+ xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
+
+ /*
+ * If not old P, then perform an "effective" EOI,
+ * on the source. This will handle the cases where
+ * FW EOI is needed.
+ */
+ if (!state->old_p)
+ xive_vm_source_eoi(hw_num, xd);
+
+ /* Synchronize ordering and mark unmasked */
+ mb();
+bail:
+ state->guest_priority = prio;
+}
+
+/*
+ * Target an interrupt to a given server/prio, this will fallback
+ * to another server if necessary and perform the HW targetting
+ * updates as needed
+ *
+ * NOTE: Must be called with the state lock held
+ */
+static int xive_target_interrupt(struct kvm *kvm,
+ struct kvmppc_xive_irq_state *state,
+ u32 server, u8 prio)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ u32 hw_num;
+ int rc;
+
+ /*
+ * This will return a tentative server and actual
+ * priority. The count for that new target will have
+ * already been incremented.
+ */
+ rc = xive_select_target(kvm, &server, prio);
+
+ /*
+ * We failed to find a target ? Not much we can do
+ * at least until we support the GIQ.
+ */
+ if (rc)
+ return rc;
+
+ /*
+ * Increment the old queue pending count if there
+ * was one so that the old queue count gets adjusted later
+ * when observed to be empty.
+ */
+ if (state->act_priority != MASKED)
+ xive_inc_q_pending(kvm,
+ state->act_server,
+ state->act_priority);
+ /*
+ * Update state and HW
+ */
+ state->act_priority = prio;
+ state->act_server = server;
+
+ /* Get the right irq */
+ kvmppc_xive_select_irq(state, &hw_num, NULL);
+
+ return xive_native_configure_irq(hw_num,
+ xive->vp_base + server,
+ prio, state->number);
+}
+
+/*
+ * Targetting rules: In order to avoid losing track of
+ * pending interrupts accross mask and unmask, which would
+ * allow queue overflows, we implement the following rules:
+ *
+ * - Unless it was never enabled (or we run out of capacity)
+ * an interrupt is always targetted at a valid server/queue
+ * pair even when "masked" by the guest. This pair tends to
+ * be the last one used but it can be changed under some
+ * circumstances. That allows us to separate targetting
+ * from masking, we only handle accounting during (re)targetting,
+ * this also allows us to let an interrupt drain into its target
+ * queue after masking, avoiding complex schemes to remove
+ * interrupts out of remote processor queues.
+ *
+ * - When masking, we set PQ to 10 and save the previous value
+ * of P and Q.
+ *
+ * - When unmasking, if saved Q was set, we set PQ to 11
+ * otherwise we leave PQ to the HW state which will be either
+ * 10 if nothing happened or 11 if the interrupt fired while
+ * masked. Effectively we are OR'ing the previous Q into the
+ * HW Q.
+ *
+ * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
+ * which will unmask the interrupt and shoot a new one if Q was
+ * set.
+ *
+ * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
+ * effectively meaning an H_EOI from the guest is still expected
+ * for that interrupt).
+ *
+ * - If H_EOI occurs while masked, we clear the saved P.
+ *
+ * - When changing target, we account on the new target and
+ * increment a separate "pending" counter on the old one.
+ * This pending counter will be used to decrement the old
+ * target's count when its queue has been observed empty.
+ */
+
+int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u8 new_act_prio;
+ int rc = 0;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
+ irq, server, priority);
+
+ /* First, check provisioning of queues */
+ if (priority != MASKED)
+ rc = xive_check_provisioning(xive->kvm,
+ xive_prio_from_guest(priority));
+ if (rc) {
+ pr_devel(" provisioning failure %d !\n", rc);
+ return rc;
+ }
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ /*
+ * We first handle masking/unmasking since the locking
+ * might need to be retried due to EOIs, we'll handle
+ * targetting changes later. These functions will return
+ * with the SB lock held.
+ *
+ * xive_lock_and_mask() will also set state->guest_priority
+ * but won't otherwise change other fields of the state.
+ *
+ * xive_lock_for_unmask will not actually unmask, this will
+ * be done later by xive_finish_unmask() once the targetting
+ * has been done, so we don't try to unmask an interrupt
+ * that hasn't yet been targetted.
+ */
+ if (priority == MASKED)
+ xive_lock_and_mask(xive, sb, state);
+ else
+ xive_lock_for_unmask(sb, state);
+
+
+ /*
+ * Then we handle targetting.
+ *
+ * First calculate a new "actual priority"
+ */
+ new_act_prio = state->act_priority;
+ if (priority != MASKED)
+ new_act_prio = xive_prio_from_guest(priority);
+
+ pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
+ new_act_prio, state->act_server, state->act_priority);
+
+ /*
+ * Then check if we actually need to change anything,
+ *
+ * The condition for re-targetting the interrupt is that
+ * we have a valid new priority (new_act_prio is not 0xff)
+ * and either the server or the priority changed.
+ *
+ * Note: If act_priority was ff and the new priority is
+ * also ff, we don't do anything and leave the interrupt
+ * untargetted. An attempt of doing an int_on on an
+ * untargetted interrupt will fail. If that is a problem
+ * we could initialize interrupts with valid default
+ */
+
+ if (new_act_prio != MASKED &&
+ (state->act_server != server ||
+ state->act_priority != new_act_prio))
+ rc = xive_target_interrupt(kvm, state, server, new_act_prio);
+
+ /*
+ * Perform the final unmasking of the interrupt source
+ * if necessary
+ */
+ if (priority != MASKED)
+ xive_finish_unmask(xive, sb, state, priority);
+
+ /*
+ * Finally Update saved_priority to match. Only int_on/off
+ * set this field to a different value.
+ */
+ state->saved_priority = priority;
+
+ arch_spin_unlock(&sb->lock);
+ return rc;
+}
+
+int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+ arch_spin_lock(&sb->lock);
+ *server = state->guest_server;
+ *priority = state->guest_priority;
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+
+int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ pr_devel("int_on(irq=0x%x)\n", irq);
+
+ /*
+ * Check if interrupt was not targetted
+ */
+ if (state->act_priority == MASKED) {
+ pr_devel("int_on on untargetted interrupt\n");
+ return -EINVAL;
+ }
+
+ /* If saved_priority is 0xff, do nothing */
+ if (state->saved_priority == MASKED)
+ return 0;
+
+ /*
+ * Lock and unmask it.
+ */
+ xive_lock_for_unmask(sb, state);
+ xive_finish_unmask(xive, sb, state, state->saved_priority);
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+
+int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ pr_devel("int_off(irq=0x%x)\n", irq);
+
+ /*
+ * Lock and mask
+ */
+ state->saved_priority = xive_lock_and_mask(xive, sb, state);
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+
+static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return false;
+ state = &sb->irq_state[idx];
+ if (!state->valid)
+ return false;
+
+ /*
+ * Trigger the IPI. This assumes we never restore a pass-through
+ * interrupt which should be safe enough
+ */
+ xive_irq_trigger(&state->ipi_data);
+
+ return true;
+}
+
+u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ if (!xc)
+ return 0;
+
+ /* Return the per-cpu state for state saving/migration */
+ return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
+ (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT;
+}
+
+int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
+ u8 cppr, mfrr;
+ u32 xisr;
+
+ if (!xc || !xive)
+ return -ENOENT;
+
+ /* Grab individual state fields. We don't use pending_pri */
+ cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
+ xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
+ KVM_REG_PPC_ICP_XISR_MASK;
+ mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
+
+ pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
+ xc->server_num, cppr, mfrr, xisr);
+
+ /*
+ * We can't update the state of a "pushed" VCPU, but that
+ * shouldn't happen.
+ */
+ if (WARN_ON(vcpu->arch.xive_pushed))
+ return -EIO;
+
+ /* Update VCPU HW saved state */
+ vcpu->arch.xive_saved_state.cppr = cppr;
+ xc->hw_cppr = xc->cppr = cppr;
+
+ /*
+ * Update MFRR state. If it's not 0xff, we mark the VCPU as
+ * having a pending MFRR change, which will re-evaluate the
+ * target. The VCPU will thus potentially get a spurious
+ * interrupt but that's not a big deal.
+ */
+ xc->mfrr = mfrr;
+ if (mfrr < cppr)
+ xive_irq_trigger(&xc->vp_ipi_data);
+
+ /*
+ * Now saved XIRR is "interesting". It means there's something in
+ * the legacy "1 element" queue... for an IPI we simply ignore it,
+ * as the MFRR restore will handle that. For anything else we need
+ * to force a resend of the source.
+ * However the source may not have been setup yet. If that's the
+ * case, we keep that info and increment a counter in the xive to
+ * tell subsequent xive_set_source() to go look.
+ */
+ if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
+ xc->delayed_irq = xisr;
+ xive->delayed_irqs++;
+ pr_devel(" xisr restore delayed\n");
+ }
+
+ return 0;
+}
+
+int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
+ unsigned int host_irq = irq_desc_get_irq(host_desc);
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
+ u16 idx;
+ u8 prio;
+ int rc;
+
+ if (!xive)
+ return -ENODEV;
+
+ pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
+
+ sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ /*
+ * Mark the passed-through interrupt as going to a VCPU,
+ * this will prevent further EOIs and similar operations
+ * from the XIVE code. It will also mask the interrupt
+ * to either PQ=10 or 11 state, the latter if the interrupt
+ * is pending. This will allow us to unmask or retrigger it
+ * after routing it to the guest with a simple EOI.
+ *
+ * The "state" argument is a "token", all it needs is to be
+ * non-NULL to switch to passed-through or NULL for the
+ * other way around. We may not yet have an actual VCPU
+ * target here and we don't really care.
+ */
+ rc = irq_set_vcpu_affinity(host_irq, state);
+ if (rc) {
+ pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
+ return rc;
+ }
+
+ /*
+ * Mask and read state of IPI. We need to know if its P bit
+ * is set as that means it's potentially already using a
+ * queue entry in the target
+ */
+ prio = xive_lock_and_mask(xive, sb, state);
+ pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
+ state->old_p, state->old_q);
+
+ /* Turn the IPI hard off */
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
+
+ /* Grab info about irq */
+ state->pt_number = hw_irq;
+ state->pt_data = irq_data_get_irq_handler_data(host_data);
+
+ /*
+ * Configure the IRQ to match the existing configuration of
+ * the IPI if it was already targetted. Otherwise this will
+ * mask the interrupt in a lossy way (act_priority is 0xff)
+ * which is fine for a never started interrupt.
+ */
+ xive_native_configure_irq(hw_irq,
+ xive->vp_base + state->act_server,
+ state->act_priority, state->number);
+
+ /*
+ * We do an EOI to enable the interrupt (and retrigger if needed)
+ * if the guest has the interrupt unmasked and the P bit was *not*
+ * set in the IPI. If it was set, we know a slot may still be in
+ * use in the target queue thus we have to wait for a guest
+ * originated EOI
+ */
+ if (prio != MASKED && !state->old_p)
+ xive_vm_source_eoi(hw_irq, state->pt_data);
+
+ /* Clear old_p/old_q as they are no longer relevant */
+ state->old_p = state->old_q = false;
+
+ /* Restore guest prio (unlocks EOI) */
+ mb();
+ state->guest_priority = prio;
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
+
+int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ unsigned int host_irq = irq_desc_get_irq(host_desc);
+ u16 idx;
+ u8 prio;
+ int rc;
+
+ if (!xive)
+ return -ENODEV;
+
+ pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
+
+ sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ /*
+ * Mask and read state of IRQ. We need to know if its P bit
+ * is set as that means it's potentially already using a
+ * queue entry in the target
+ */
+ prio = xive_lock_and_mask(xive, sb, state);
+ pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
+ state->old_p, state->old_q);
+
+ /*
+ * If old_p is set, the interrupt is pending, we switch it to
+ * PQ=11. This will force a resend in the host so the interrupt
+ * isn't lost to whatver host driver may pick it up
+ */
+ if (state->old_p)
+ xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
+
+ /* Release the passed-through interrupt to the host */
+ rc = irq_set_vcpu_affinity(host_irq, NULL);
+ if (rc) {
+ pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
+ return rc;
+ }
+
+ /* Forget about the IRQ */
+ state->pt_number = 0;
+ state->pt_data = NULL;
+
+ /* Reconfigure the IPI */
+ xive_native_configure_irq(state->ipi_number,
+ xive->vp_base + state->act_server,
+ state->act_priority, state->number);
+
+ /*
+ * If old_p is set (we have a queue entry potentially
+ * occupied) or the interrupt is masked, we set the IPI
+ * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
+ */
+ if (prio == MASKED || state->old_p)
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
+ else
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
+
+ /* Restore guest prio (unlocks EOI) */
+ mb();
+ state->guest_priority = prio;
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
+
+static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ int i, j;
+
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+
+ if (!sb)
+ continue;
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
+
+ if (!state->valid)
+ continue;
+ if (state->act_priority == MASKED)
+ continue;
+ if (state->act_server != xc->server_num)
+ continue;
+
+ /* Clean it up */
+ arch_spin_lock(&sb->lock);
+ state->act_priority = MASKED;
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
+ if (state->pt_number) {
+ xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
+ }
+ arch_spin_unlock(&sb->lock);
+ }
+ }
+}
+
+void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = xc->xive;
+ int i;
+
+ pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
+
+ /* Ensure no interrupt is still routed to that VP */
+ xc->valid = false;
+ kvmppc_xive_disable_vcpu_interrupts(vcpu);
+
+ /* Mask the VP IPI */
+ xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
+
+ /* Disable the VP */
+ xive_native_disable_vp(xc->vp_id);
+
+ /* Free the queues & associated interrupts */
+ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+ struct xive_q *q = &xc->queues[i];
+
+ /* Free the escalation irq */
+ if (xc->esc_virq[i]) {
+ free_irq(xc->esc_virq[i], vcpu);
+ irq_dispose_mapping(xc->esc_virq[i]);
+ kfree(xc->esc_virq_names[i]);
+ }
+ /* Free the queue */
+ xive_native_disable_queue(xc->vp_id, q, i);
+ if (q->qpage) {
+ free_pages((unsigned long)q->qpage,
+ xive->q_page_order);
+ q->qpage = NULL;
+ }
+ }
+
+ /* Free the IPI */
+ if (xc->vp_ipi) {
+ xive_cleanup_irq_data(&xc->vp_ipi_data);
+ xive_native_free_irq(xc->vp_ipi);
+ }
+ /* Free the VP */
+ kfree(xc);
+}
+
+int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu)
+{
+ struct kvmppc_xive *xive = dev->private;
+ struct kvmppc_xive_vcpu *xc;
+ int i, r = -EBUSY;
+
+ pr_devel("connect_vcpu(cpu=%d)\n", cpu);
+
+ if (dev->ops != &kvm_xive_ops) {
+ pr_devel("Wrong ops !\n");
+ return -EPERM;
+ }
+ if (xive->kvm != vcpu->kvm)
+ return -EPERM;
+ if (vcpu->arch.irq_type)
+ return -EBUSY;
+ if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
+ pr_devel("Duplicate !\n");
+ return -EEXIST;
+ }
+ if (cpu >= KVM_MAX_VCPUS) {
+ pr_devel("Out of bounds !\n");
+ return -EINVAL;
+ }
+ xc = kzalloc(sizeof(*xc), GFP_KERNEL);
+ if (!xc)
+ return -ENOMEM;
+
+ /* We need to synchronize with queue provisioning */
+ mutex_lock(&vcpu->kvm->lock);
+ vcpu->arch.xive_vcpu = xc;
+ xc->xive = xive;
+ xc->vcpu = vcpu;
+ xc->server_num = cpu;
+ xc->vp_id = xive->vp_base + cpu;
+ xc->mfrr = 0xff;
+ xc->valid = true;
+
+ r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
+ if (r)
+ goto bail;
+
+ /* Configure VCPU fields for use by assembly push/pull */
+ vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
+ vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
+
+ /* Allocate IPI */
+ xc->vp_ipi = xive_native_alloc_irq();
+ if (!xc->vp_ipi) {
+ r = -EIO;
+ goto bail;
+ }
+ pr_devel(" IPI=0x%x\n", xc->vp_ipi);
+
+ r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
+ if (r)
+ goto bail;
+
+ /*
+ * Initialize queues. Initially we set them all for no queueing
+ * and we enable escalation for queue 0 only which we'll use for
+ * our mfrr change notifications. If the VCPU is hot-plugged, we
+ * do handle provisioning however.
+ */
+ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+ struct xive_q *q = &xc->queues[i];
+
+ /* Is queue already enabled ? Provision it */
+ if (xive->qmap & (1 << i)) {
+ r = xive_provision_queue(vcpu, i);
+ if (r == 0)
+ xive_attach_escalation(vcpu, i);
+ if (r)
+ goto bail;
+ } else {
+ r = xive_native_configure_queue(xc->vp_id,
+ q, i, NULL, 0, true);
+ if (r) {
+ pr_err("Failed to configure queue %d for VCPU %d\n",
+ i, cpu);
+ goto bail;
+ }
+ }
+ }
+
+ /* If not done above, attach priority 0 escalation */
+ r = xive_attach_escalation(vcpu, 0);
+ if (r)
+ goto bail;
+
+ /* Enable the VP */
+ r = xive_native_enable_vp(xc->vp_id);
+ if (r)
+ goto bail;
+
+ /* Route the IPI */
+ r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
+ if (!r)
+ xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
+
+bail:
+ mutex_unlock(&vcpu->kvm->lock);
+ if (r) {
+ kvmppc_xive_cleanup_vcpu(vcpu);
+ return r;
+ }
+
+ vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
+ return 0;
+}
+
+/*
+ * Scanning of queues before/after migration save
+ */
+static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return;
+
+ state = &sb->irq_state[idx];
+
+ /* Some sanity checking */
+ if (!state->valid) {
+ pr_err("invalid irq 0x%x in cpu queue!\n", irq);
+ return;
+ }
+
+ /*
+ * If the interrupt is in a queue it should have P set.
+ * We warn so that gets reported. A backtrace isn't useful
+ * so no need to use a WARN_ON.
+ */
+ if (!state->saved_p)
+ pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
+
+ /* Set flag */
+ state->in_queue = true;
+}
+
+static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ u32 irq)
+{
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
+
+ if (!state->valid)
+ return;
+
+ /* Mask and save state, this will also sync HW queues */
+ state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
+
+ /* Transfer P and Q */
+ state->saved_p = state->old_p;
+ state->saved_q = state->old_q;
+
+ /* Unlock */
+ arch_spin_unlock(&sb->lock);
+}
+
+static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ u32 irq)
+{
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
+
+ if (!state->valid)
+ return;
+
+ /*
+ * Lock / exclude EOI (not technically necessary if the
+ * guest isn't running concurrently. If this becomes a
+ * performance issue we can probably remove the lock.
+ */
+ xive_lock_for_unmask(sb, state);
+
+ /* Restore mask/prio if it wasn't masked */
+ if (state->saved_scan_prio != MASKED)
+ xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
+
+ /* Unlock */
+ arch_spin_unlock(&sb->lock);
+}
+
+static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
+{
+ u32 idx = q->idx;
+ u32 toggle = q->toggle;
+ u32 irq;
+
+ do {
+ irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
+ if (irq > XICS_IPI)
+ xive_pre_save_set_queued(xive, irq);
+ } while(irq);
+}
+
+static void xive_pre_save_scan(struct kvmppc_xive *xive)
+{
+ struct kvm_vcpu *vcpu = NULL;
+ int i, j;
+
+ /*
+ * See comment in xive_get_source() about how this
+ * work. Collect a stable state for all interrupts
+ */
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+ if (!sb)
+ continue;
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
+ xive_pre_save_mask_irq(xive, sb, j);
+ }
+
+ /* Then scan the queues and update the "in_queue" flag */
+ kvm_for_each_vcpu(i, vcpu, xive->kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ if (!xc)
+ continue;
+ for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
+ if (xc->queues[i].qpage)
+ xive_pre_save_queue(xive, &xc->queues[i]);
+ }
+ }
+
+ /* Finally restore interrupt states */
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+ if (!sb)
+ continue;
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
+ xive_pre_save_unmask_irq(xive, sb, j);
+ }
+}
+
+static void xive_post_save_scan(struct kvmppc_xive *xive)
+{
+ u32 i, j;
+
+ /* Clear all the in_queue flags */
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+ if (!sb)
+ continue;
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
+ sb->irq_state[j].in_queue = false;
+ }
+
+ /* Next get_source() will do a new scan */
+ xive->saved_src_count = 0;
+}
+
+/*
+ * This returns the source configuration and state to user space.
+ */
+static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u64 val, prio;
+ u16 idx;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -ENOENT;
+
+ state = &sb->irq_state[idx];
+
+ if (!state->valid)
+ return -ENOENT;
+
+ pr_devel("get_source(%ld)...\n", irq);
+
+ /*
+ * So to properly save the state into something that looks like a
+ * XICS migration stream we cannot treat interrupts individually.
+ *
+ * We need, instead, mask them all (& save their previous PQ state)
+ * to get a stable state in the HW, then sync them to ensure that
+ * any interrupt that had already fired hits its queue, and finally
+ * scan all the queues to collect which interrupts are still present
+ * in the queues, so we can set the "pending" flag on them and
+ * they can be resent on restore.
+ *
+ * So we do it all when the "first" interrupt gets saved, all the
+ * state is collected at that point, the rest of xive_get_source()
+ * will merely collect and convert that state to the expected
+ * userspace bit mask.
+ */
+ if (xive->saved_src_count == 0)
+ xive_pre_save_scan(xive);
+ xive->saved_src_count++;
+
+ /* Convert saved state into something compatible with xics */
+ val = state->guest_server;
+ prio = state->saved_scan_prio;
+
+ if (prio == MASKED) {
+ val |= KVM_XICS_MASKED;
+ prio = state->saved_priority;
+ }
+ val |= prio << KVM_XICS_PRIORITY_SHIFT;
+ if (state->lsi) {
+ val |= KVM_XICS_LEVEL_SENSITIVE;
+ if (state->saved_p)
+ val |= KVM_XICS_PENDING;
+ } else {
+ if (state->saved_p)
+ val |= KVM_XICS_PRESENTED;
+
+ if (state->saved_q)
+ val |= KVM_XICS_QUEUED;
+
+ /*
+ * We mark it pending (which will attempt a re-delivery)
+ * if we are in a queue *or* we were masked and had
+ * Q set which is equivalent to the XICS "masked pending"
+ * state
+ */
+ if (state->in_queue || (prio == MASKED && state->saved_q))
+ val |= KVM_XICS_PENDING;
+ }
+
+ /*
+ * If that was the last interrupt saved, reset the
+ * in_queue flags
+ */
+ if (xive->saved_src_count == xive->src_count)
+ xive_post_save_scan(xive);
+
+ /* Copy the result to userspace */
+ if (put_user(val, ubufp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
+ int irq)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvmppc_xive_src_block *sb;
+ int i, bid;
+
+ bid = irq >> KVMPPC_XICS_ICS_SHIFT;
+
+ mutex_lock(&kvm->lock);
+
+ /* block already exists - somebody else got here first */
+ if (xive->src_blocks[bid])
+ goto out;
+
+ /* Create the ICS */
+ sb = kzalloc(sizeof(*sb), GFP_KERNEL);
+ if (!sb)
+ goto out;
+
+ sb->id = bid;
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
+ sb->irq_state[i].guest_priority = MASKED;
+ sb->irq_state[i].saved_priority = MASKED;
+ sb->irq_state[i].act_priority = MASKED;
+ }
+ smp_wmb();
+ xive->src_blocks[bid] = sb;
+
+ if (bid > xive->max_sbid)
+ xive->max_sbid = bid;
+
+out:
+ mutex_unlock(&kvm->lock);
+ return xive->src_blocks[bid];
+}
+
+static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu = NULL;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ if (!xc)
+ continue;
+
+ if (xc->delayed_irq == irq) {
+ xc->delayed_irq = 0;
+ xive->delayed_irqs--;
+ return true;
+ }
+ }
+ return false;
+}
+
+static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u16 idx;
+ u64 val;
+ u8 act_prio, guest_prio;
+ u32 server;
+ int rc = 0;
+
+ if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
+ return -ENOENT;
+
+ pr_devel("set_source(irq=0x%lx)\n", irq);
+
+ /* Find the source */
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb) {
+ pr_devel("No source, creating source block...\n");
+ sb = xive_create_src_block(xive, irq);
+ if (!sb) {
+ pr_devel("Failed to create block...\n");
+ return -ENOMEM;
+ }
+ }
+ state = &sb->irq_state[idx];
+
+ /* Read user passed data */
+ if (get_user(val, ubufp)) {
+ pr_devel("fault getting user info !\n");
+ return -EFAULT;
+ }
+
+ server = val & KVM_XICS_DESTINATION_MASK;
+ guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
+
+ pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
+ val, server, guest_prio);
+ /*
+ * If the source doesn't already have an IPI, allocate
+ * one and get the corresponding data
+ */
+ if (!state->ipi_number) {
+ state->ipi_number = xive_native_alloc_irq();
+ if (state->ipi_number == 0) {
+ pr_devel("Failed to allocate IPI !\n");
+ return -ENOMEM;
+ }
+ xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
+ pr_devel(" src_ipi=0x%x\n", state->ipi_number);
+ }
+
+ /*
+ * We use lock_and_mask() to set us in the right masked
+ * state. We will override that state from the saved state
+ * further down, but this will handle the cases of interrupts
+ * that need FW masking. We set the initial guest_priority to
+ * 0 before calling it to ensure it actually performs the masking.
+ */
+ state->guest_priority = 0;
+ xive_lock_and_mask(xive, sb, state);
+
+ /*
+ * Now, we select a target if we have one. If we don't we
+ * leave the interrupt untargetted. It means that an interrupt
+ * can become "untargetted" accross migration if it was masked
+ * by set_xive() but there is little we can do about it.
+ */
+
+ /* First convert prio and mark interrupt as untargetted */
+ act_prio = xive_prio_from_guest(guest_prio);
+ state->act_priority = MASKED;
+ state->guest_server = server;
+
+ /*
+ * We need to drop the lock due to the mutex below. Hopefully
+ * nothing is touching that interrupt yet since it hasn't been
+ * advertized to a running guest yet
+ */
+ arch_spin_unlock(&sb->lock);
+
+ /* If we have a priority target the interrupt */
+ if (act_prio != MASKED) {
+ /* First, check provisioning of queues */
+ mutex_lock(&xive->kvm->lock);
+ rc = xive_check_provisioning(xive->kvm, act_prio);
+ mutex_unlock(&xive->kvm->lock);
+
+ /* Target interrupt */
+ if (rc == 0)
+ rc = xive_target_interrupt(xive->kvm, state,
+ server, act_prio);
+ /*
+ * If provisioning or targetting failed, leave it
+ * alone and masked. It will remain disabled until
+ * the guest re-targets it.
+ */
+ }
+
+ /*
+ * Find out if this was a delayed irq stashed in an ICP,
+ * in which case, treat it as pending
+ */
+ if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
+ val |= KVM_XICS_PENDING;
+ pr_devel(" Found delayed ! forcing PENDING !\n");
+ }
+
+ /* Cleanup the SW state */
+ state->old_p = false;
+ state->old_q = false;
+ state->lsi = false;
+ state->asserted = false;
+
+ /* Restore LSI state */
+ if (val & KVM_XICS_LEVEL_SENSITIVE) {
+ state->lsi = true;
+ if (val & KVM_XICS_PENDING)
+ state->asserted = true;
+ pr_devel(" LSI ! Asserted=%d\n", state->asserted);
+ }
+
+ /*
+ * Restore P and Q. If the interrupt was pending, we
+ * force both P and Q, which will trigger a resend.
+ *
+ * That means that a guest that had both an interrupt
+ * pending (queued) and Q set will restore with only
+ * one instance of that interrupt instead of 2, but that
+ * is perfectly fine as coalescing interrupts that haven't
+ * been presented yet is always allowed.
+ */
+ if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
+ state->old_p = true;
+ if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
+ state->old_q = true;
+
+ pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
+
+ /*
+ * If the interrupt was unmasked, update guest priority and
+ * perform the appropriate state transition and do a
+ * re-trigger if necessary.
+ */
+ if (val & KVM_XICS_MASKED) {
+ pr_devel(" masked, saving prio\n");
+ state->guest_priority = MASKED;
+ state->saved_priority = guest_prio;
+ } else {
+ pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
+ xive_finish_unmask(xive, sb, state, guest_prio);
+ state->saved_priority = guest_prio;
+ }
+
+ /* Increment the number of valid sources and mark this one valid */
+ if (!state->valid)
+ xive->src_count++;
+ state->valid = true;
+
+ return 0;
+}
+
+int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ bool line_status)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+
+ /* Perform locklessly .... (we need to do some RCUisms here...) */
+ state = &sb->irq_state[idx];
+ if (!state->valid)
+ return -EINVAL;
+
+ /* We don't allow a trigger on a passed-through interrupt */
+ if (state->pt_number)
+ return -EINVAL;
+
+ if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
+ state->asserted = 1;
+ else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
+ state->asserted = 0;
+ return 0;
+ }
+
+ /* Trigger the IPI */
+ xive_irq_trigger(&state->ipi_data);
+
+ return 0;
+}
+
+static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ struct kvmppc_xive *xive = dev->private;
+
+ /* We honor the existing XICS ioctl */
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ return xive_set_source(xive, attr->attr, attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ struct kvmppc_xive *xive = dev->private;
+
+ /* We honor the existing XICS ioctl */
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ return xive_get_source(xive, attr->attr, attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ /* We honor the same limits as XICS, at least for now */
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
+ attr->attr < KVMPPC_XICS_NR_IRQS)
+ return 0;
+ break;
+ }
+ return -ENXIO;
+}
+
+static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
+{
+ xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(hw_num, 0, MASKED, 0);
+ xive_cleanup_irq_data(xd);
+}
+
+static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
+{
+ int i;
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
+
+ if (!state->valid)
+ continue;
+
+ kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
+ xive_native_free_irq(state->ipi_number);
+
+ /* Pass-through, cleanup too */
+ if (state->pt_number)
+ kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
+
+ state->valid = false;
+ }
+}
+
+static void kvmppc_xive_free(struct kvm_device *dev)
+{
+ struct kvmppc_xive *xive = dev->private;
+ struct kvm *kvm = xive->kvm;
+ int i;
+
+ debugfs_remove(xive->dentry);
+
+ if (kvm)
+ kvm->arch.xive = NULL;
+
+ /* Mask and free interrupts */
+ for (i = 0; i <= xive->max_sbid; i++) {
+ if (xive->src_blocks[i])
+ kvmppc_xive_free_sources(xive->src_blocks[i]);
+ kfree(xive->src_blocks[i]);
+ xive->src_blocks[i] = NULL;
+ }
+
+ if (xive->vp_base != XIVE_INVALID_VP)
+ xive_native_free_vp_block(xive->vp_base);
+
+
+ kfree(xive);
+ kfree(dev);
+}
+
+static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
+{
+ struct kvmppc_xive *xive;
+ struct kvm *kvm = dev->kvm;
+ int ret = 0;
+
+ pr_devel("Creating xive for partition\n");
+
+ xive = kzalloc(sizeof(*xive), GFP_KERNEL);
+ if (!xive)
+ return -ENOMEM;
+
+ dev->private = xive;
+ xive->dev = dev;
+ xive->kvm = kvm;
+
+ /* Already there ? */
+ if (kvm->arch.xive)
+ ret = -EEXIST;
+ else
+ kvm->arch.xive = xive;
+
+ /* We use the default queue size set by the host */
+ xive->q_order = xive_native_default_eq_shift();
+ if (xive->q_order < PAGE_SHIFT)
+ xive->q_page_order = 0;
+ else
+ xive->q_page_order = xive->q_order - PAGE_SHIFT;
+
+ /* Allocate a bunch of VPs */
+ xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
+ pr_devel("VP_Base=%x\n", xive->vp_base);
+
+ if (xive->vp_base == XIVE_INVALID_VP)
+ ret = -ENOMEM;
+
+ if (ret) {
+ kfree(xive);
+ return ret;
+ }
+
+ return 0;
+}
+
+
+static int xive_debug_show(struct seq_file *m, void *private)
+{
+ struct kvmppc_xive *xive = m->private;
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ u64 t_rm_h_xirr = 0;
+ u64 t_rm_h_ipoll = 0;
+ u64 t_rm_h_cppr = 0;
+ u64 t_rm_h_eoi = 0;
+ u64 t_rm_h_ipi = 0;
+ u64 t_vm_h_xirr = 0;
+ u64 t_vm_h_ipoll = 0;
+ u64 t_vm_h_cppr = 0;
+ u64 t_vm_h_eoi = 0;
+ u64 t_vm_h_ipi = 0;
+ unsigned int i;
+
+ if (!kvm)
+ return 0;
+
+ seq_printf(m, "=========\nVCPU state\n=========\n");
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ if (!xc)
+ continue;
+
+ seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
+ " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
+ xc->server_num, xc->cppr, xc->hw_cppr,
+ xc->mfrr, xc->pending,
+ xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
+
+ t_rm_h_xirr += xc->stat_rm_h_xirr;
+ t_rm_h_ipoll += xc->stat_rm_h_ipoll;
+ t_rm_h_cppr += xc->stat_rm_h_cppr;
+ t_rm_h_eoi += xc->stat_rm_h_eoi;
+ t_rm_h_ipi += xc->stat_rm_h_ipi;
+ t_vm_h_xirr += xc->stat_vm_h_xirr;
+ t_vm_h_ipoll += xc->stat_vm_h_ipoll;
+ t_vm_h_cppr += xc->stat_vm_h_cppr;
+ t_vm_h_eoi += xc->stat_vm_h_eoi;
+ t_vm_h_ipi += xc->stat_vm_h_ipi;
+ }
+
+ seq_printf(m, "Hcalls totals\n");
+ seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
+ seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
+ seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
+ seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
+ seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
+
+ return 0;
+}
+
+static int xive_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, xive_debug_show, inode->i_private);
+}
+
+static const struct file_operations xive_debug_fops = {
+ .open = xive_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void xive_debugfs_init(struct kvmppc_xive *xive)
+{
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
+ if (!name) {
+ pr_err("%s: no memory for name\n", __func__);
+ return;
+ }
+
+ xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
+ xive, &xive_debug_fops);
+
+ pr_debug("%s: created %s\n", __func__, name);
+ kfree(name);
+}
+
+static void kvmppc_xive_init(struct kvm_device *dev)
+{
+ struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
+
+ /* Register some debug interfaces */
+ xive_debugfs_init(xive);
+}
+
+struct kvm_device_ops kvm_xive_ops = {
+ .name = "kvm-xive",
+ .create = kvmppc_xive_create,
+ .init = kvmppc_xive_init,
+ .destroy = kvmppc_xive_free,
+ .set_attr = xive_set_attr,
+ .get_attr = xive_get_attr,
+ .has_attr = xive_has_attr,
+};
+
+void kvmppc_xive_init_module(void)
+{
+ __xive_vm_h_xirr = xive_vm_h_xirr;
+ __xive_vm_h_ipoll = xive_vm_h_ipoll;
+ __xive_vm_h_ipi = xive_vm_h_ipi;
+ __xive_vm_h_cppr = xive_vm_h_cppr;
+ __xive_vm_h_eoi = xive_vm_h_eoi;
+}
+
+void kvmppc_xive_exit_module(void)
+{
+ __xive_vm_h_xirr = NULL;
+ __xive_vm_h_ipoll = NULL;
+ __xive_vm_h_ipi = NULL;
+ __xive_vm_h_cppr = NULL;
+ __xive_vm_h_eoi = NULL;
+}
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
new file mode 100644
index 000000000000..5938f7644dc1
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _KVM_PPC_BOOK3S_XIVE_H
+#define _KVM_PPC_BOOK3S_XIVE_H
+
+#ifdef CONFIG_KVM_XICS
+#include "book3s_xics.h"
+
+/*
+ * State for one guest irq source.
+ *
+ * For each guest source we allocate a HW interrupt in the XIVE
+ * which we use for all SW triggers. It will be unused for
+ * pass-through but it's easier to keep around as the same
+ * guest interrupt can alternatively be emulated or pass-through
+ * if a physical device is hot unplugged and replaced with an
+ * emulated one.
+ *
+ * This state structure is very similar to the XICS one with
+ * additional XIVE specific tracking.
+ */
+struct kvmppc_xive_irq_state {
+ bool valid; /* Interrupt entry is valid */
+
+ u32 number; /* Guest IRQ number */
+ u32 ipi_number; /* XIVE IPI HW number */
+ struct xive_irq_data ipi_data; /* XIVE IPI associated data */
+ u32 pt_number; /* XIVE Pass-through number if any */
+ struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */
+
+ /* Targetting as set by guest */
+ u32 guest_server; /* Current guest selected target */
+ u8 guest_priority; /* Guest set priority */
+ u8 saved_priority; /* Saved priority when masking */
+
+ /* Actual targetting */
+ u32 act_server; /* Actual server */
+ u8 act_priority; /* Actual priority */
+
+ /* Various state bits */
+ bool in_eoi; /* Synchronize with H_EOI */
+ bool old_p; /* P bit state when masking */
+ bool old_q; /* Q bit state when masking */
+ bool lsi; /* level-sensitive interrupt */
+ bool asserted; /* Only for emulated LSI: current state */
+
+ /* Saved for migration state */
+ bool in_queue;
+ bool saved_p;
+ bool saved_q;
+ u8 saved_scan_prio;
+};
+
+/* Select the "right" interrupt (IPI vs. passthrough) */
+static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state,
+ u32 *out_hw_irq,
+ struct xive_irq_data **out_xd)
+{
+ if (state->pt_number) {
+ if (out_hw_irq)
+ *out_hw_irq = state->pt_number;
+ if (out_xd)
+ *out_xd = state->pt_data;
+ } else {
+ if (out_hw_irq)
+ *out_hw_irq = state->ipi_number;
+ if (out_xd)
+ *out_xd = &state->ipi_data;
+ }
+}
+
+/*
+ * This corresponds to an "ICS" in XICS terminology, we use it
+ * as a mean to break up source information into multiple structures.
+ */
+struct kvmppc_xive_src_block {
+ arch_spinlock_t lock;
+ u16 id;
+ struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
+};
+
+
+struct kvmppc_xive {
+ struct kvm *kvm;
+ struct kvm_device *dev;
+ struct dentry *dentry;
+
+ /* VP block associated with the VM */
+ u32 vp_base;
+
+ /* Blocks of sources */
+ struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1];
+ u32 max_sbid;
+
+ /*
+ * For state save, we lazily scan the queues on the first interrupt
+ * being migrated. We don't have a clean way to reset that flags
+ * so we keep track of the number of valid sources and how many of
+ * them were migrated so we can reset when all of them have been
+ * processed.
+ */
+ u32 src_count;
+ u32 saved_src_count;
+
+ /*
+ * Some irqs are delayed on restore until the source is created,
+ * keep track here of how many of them
+ */
+ u32 delayed_irqs;
+
+ /* Which queues (priorities) are in use by the guest */
+ u8 qmap;
+
+ /* Queue orders */
+ u32 q_order;
+ u32 q_page_order;
+
+};
+
+#define KVMPPC_XIVE_Q_COUNT 8
+
+struct kvmppc_xive_vcpu {
+ struct kvmppc_xive *xive;
+ struct kvm_vcpu *vcpu;
+ bool valid;
+
+ /* Server number. This is the HW CPU ID from a guest perspective */
+ u32 server_num;
+
+ /*
+ * HW VP corresponding to this VCPU. This is the base of the VP
+ * block plus the server number.
+ */
+ u32 vp_id;
+ u32 vp_chip_id;
+ u32 vp_cam;
+
+ /* IPI used for sending ... IPIs */
+ u32 vp_ipi;
+ struct xive_irq_data vp_ipi_data;
+
+ /* Local emulation state */
+ uint8_t cppr; /* guest CPPR */
+ uint8_t hw_cppr;/* Hardware CPPR */
+ uint8_t mfrr;
+ uint8_t pending;
+
+ /* Each VP has 8 queues though we only provision some */
+ struct xive_q queues[KVMPPC_XIVE_Q_COUNT];
+ u32 esc_virq[KVMPPC_XIVE_Q_COUNT];
+ char *esc_virq_names[KVMPPC_XIVE_Q_COUNT];
+
+ /* Stash a delayed irq on restore from migration (see set_icp) */
+ u32 delayed_irq;
+
+ /* Stats */
+ u64 stat_rm_h_xirr;
+ u64 stat_rm_h_ipoll;
+ u64 stat_rm_h_cppr;
+ u64 stat_rm_h_eoi;
+ u64 stat_rm_h_ipi;
+ u64 stat_vm_h_xirr;
+ u64 stat_vm_h_ipoll;
+ u64 stat_vm_h_cppr;
+ u64 stat_vm_h_eoi;
+ u64 stat_vm_h_ipi;
+};
+
+static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
+{
+ struct kvm_vcpu *vcpu = NULL;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
+ return vcpu;
+ }
+ return NULL;
+}
+
+static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive,
+ u32 irq, u16 *source)
+{
+ u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
+ u16 src = irq & KVMPPC_XICS_SRC_MASK;
+
+ if (source)
+ *source = src;
+ if (bid > KVMPPC_XICS_MAX_ICS_ID)
+ return NULL;
+ return xive->src_blocks[bid];
+}
+
+/*
+ * Mapping between guest priorities and host priorities
+ * is as follow.
+ *
+ * Guest request for 0...6 are honored. Guest request for anything
+ * higher results in a priority of 7 being applied.
+ *
+ * However, when XIRR is returned via H_XIRR, 7 is translated to 0xb
+ * in order to match AIX expectations
+ *
+ * Similar mapping is done for CPPR values
+ */
+static inline u8 xive_prio_from_guest(u8 prio)
+{
+ if (prio == 0xff || prio < 8)
+ return prio;
+ return 7;
+}
+
+static inline u8 xive_prio_to_guest(u8 prio)
+{
+ if (prio == 0xff || prio < 7)
+ return prio;
+ return 0xb;
+}
+
+static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
+{
+ u32 cur;
+
+ if (!qpage)
+ return 0;
+ cur = be32_to_cpup(qpage + *idx);
+ if ((cur >> 31) == *toggle)
+ return 0;
+ *idx = (*idx + 1) & msk;
+ if (*idx == 0)
+ (*toggle) ^= 1;
+ return cur & 0x7fffffff;
+}
+
+extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
+extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
+extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
+extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
+
+extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
+extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
+extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
+extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
+
+#endif /* CONFIG_KVM_XICS */
+#endif /* _KVM_PPC_BOOK3S_XICS_H */
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
new file mode 100644
index 000000000000..023a31133c37
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+/* File to be included by other .c files */
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE(a,b)
+
+static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
+{
+ u8 cppr;
+ u16 ack;
+
+ /* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */
+
+ /* Perform the acknowledge OS to register cycle. */
+ ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
+
+ /* Synchronize subsequent queue accesses */
+ mb();
+
+ /* XXX Check grouping level */
+
+ /* Anything ? */
+ if (!((ack >> 8) & TM_QW1_NSR_EO))
+ return;
+
+ /* Grab CPPR of the most favored pending interrupt */
+ cppr = ack & 0xff;
+ if (cppr < 8)
+ xc->pending |= 1 << cppr;
+
+#ifdef XIVE_RUNTIME_CHECKS
+ /* Check consistency */
+ if (cppr >= xc->hw_cppr)
+ pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
+ smp_processor_id(), cppr, xc->hw_cppr);
+#endif
+
+ /*
+ * Update our image of the HW CPPR. We don't yet modify
+ * xc->cppr, this will be done as we scan for interrupts
+ * in the queues.
+ */
+ xc->hw_cppr = cppr;
+}
+
+static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
+{
+ u64 val;
+
+ if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
+ offset |= offset << 4;
+
+ val =__x_readq(__x_eoi_page(xd) + offset);
+#ifdef __LITTLE_ENDIAN__
+ val >>= 64-8;
+#endif
+ return (u8)val;
+}
+
+
+static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
+{
+ /* If the XIVE supports the new "store EOI facility, use it */
+ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
+ __x_writeq(0, __x_eoi_page(xd));
+ else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
+ opal_int_eoi(hw_irq);
+ } else {
+ uint64_t eoi_val;
+
+ /*
+ * Otherwise for EOI, we use the special MMIO that does
+ * a clear of both P and Q and returns the old Q,
+ * except for LSIs where we use the "EOI cycle" special
+ * load.
+ *
+ * This allows us to then do a re-trigger if Q was set
+ * rather than synthetizing an interrupt in software
+ *
+ * For LSIs, using the HW EOI cycle works around a problem
+ * on P9 DD1 PHBs where the other ESB accesses don't work
+ * properly.
+ */
+ if (xd->flags & XIVE_IRQ_FLAG_LSI)
+ __x_readq(__x_eoi_page(xd));
+ else {
+ eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
+
+ /* Re-trigger if needed */
+ if ((eoi_val & 1) && __x_trig_page(xd))
+ __x_writeq(0, __x_trig_page(xd));
+ }
+ }
+}
+
+enum {
+ scan_fetch,
+ scan_poll,
+ scan_eoi,
+};
+
+static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
+ u8 pending, int scan_type)
+{
+ u32 hirq = 0;
+ u8 prio = 0xff;
+
+ /* Find highest pending priority */
+ while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
+ struct xive_q *q;
+ u32 idx, toggle;
+ __be32 *qpage;
+
+ /*
+ * If pending is 0 this will return 0xff which is what
+ * we want
+ */
+ prio = ffs(pending) - 1;
+
+ /*
+ * If the most favoured prio we found pending is less
+ * favored (or equal) than a pending IPI, we return
+ * the IPI instead.
+ *
+ * Note: If pending was 0 and mfrr is 0xff, we will
+ * not spurriously take an IPI because mfrr cannot
+ * then be smaller than cppr.
+ */
+ if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ break;
+ }
+
+ /* Don't scan past the guest cppr */
+ if (prio >= xc->cppr || prio > 7)
+ break;
+
+ /* Grab queue and pointers */
+ q = &xc->queues[prio];
+ idx = q->idx;
+ toggle = q->toggle;
+
+ /*
+ * Snapshot the queue page. The test further down for EOI
+ * must use the same "copy" that was used by __xive_read_eq
+ * since qpage can be set concurrently and we don't want
+ * to miss an EOI.
+ */
+ qpage = READ_ONCE(q->qpage);
+
+skip_ipi:
+ /*
+ * Try to fetch from the queue. Will return 0 for a
+ * non-queueing priority (ie, qpage = 0).
+ */
+ hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
+
+ /*
+ * If this was a signal for an MFFR change done by
+ * H_IPI we skip it. Additionally, if we were fetching
+ * we EOI it now, thus re-enabling reception of a new
+ * such signal.
+ *
+ * We also need to do that if prio is 0 and we had no
+ * page for the queue. In this case, we have non-queued
+ * IPI that needs to be EOId.
+ *
+ * This is safe because if we have another pending MFRR
+ * change that wasn't observed above, the Q bit will have
+ * been set and another occurrence of the IPI will trigger.
+ */
+ if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
+ if (scan_type == scan_fetch)
+ GLUE(X_PFX,source_eoi)(xc->vp_ipi,
+ &xc->vp_ipi_data);
+ /* Loop back on same queue with updated idx/toggle */
+#ifdef XIVE_RUNTIME_CHECKS
+ WARN_ON(hirq && hirq != XICS_IPI);
+#endif
+ if (hirq)
+ goto skip_ipi;
+ }
+
+ /* If fetching, update queue pointers */
+ if (scan_type == scan_fetch) {
+ q->idx = idx;
+ q->toggle = toggle;
+ }
+
+ /* Something found, stop searching */
+ if (hirq)
+ break;
+
+ /* Clear the pending bit on the now empty queue */
+ pending &= ~(1 << prio);
+
+ /*
+ * Check if the queue count needs adjusting due to
+ * interrupts being moved away.
+ */
+ if (atomic_read(&q->pending_count)) {
+ int p = atomic_xchg(&q->pending_count, 0);
+ if (p) {
+#ifdef XIVE_RUNTIME_CHECKS
+ WARN_ON(p > atomic_read(&q->count));
+#endif
+ atomic_sub(p, &q->count);
+ }
+ }
+ }
+
+ /* If we are just taking a "peek", do nothing else */
+ if (scan_type == scan_poll)
+ return hirq;
+
+ /* Update the pending bits */
+ xc->pending = pending;
+
+ /*
+ * If this is an EOI that's it, no CPPR adjustment done here,
+ * all we needed was cleanup the stale pending bits and check
+ * if there's anything left.
+ */
+ if (scan_type == scan_eoi)
+ return hirq;
+
+ /*
+ * If we found an interrupt, adjust what the guest CPPR should
+ * be as if we had just fetched that interrupt from HW.
+ */
+ if (hirq)
+ xc->cppr = prio;
+ /*
+ * If it was an IPI the HW CPPR might have been lowered too much
+ * as the HW interrupt we use for IPIs is routed to priority 0.
+ *
+ * We re-sync it here.
+ */
+ if (xc->cppr != xc->hw_cppr) {
+ xc->hw_cppr = xc->cppr;
+ __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
+ }
+
+ return hirq;
+}
+
+X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 old_cppr;
+ u32 hirq;
+
+ pr_devel("H_XIRR\n");
+
+ xc->GLUE(X_STAT_PFX,h_xirr)++;
+
+ /* First collect pending bits from HW */
+ GLUE(X_PFX,ack_pending)(xc);
+
+ /*
+ * Cleanup the old-style bits if needed (they may have been
+ * set by pull or an escalation interrupts).
+ */
+ if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
+ clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
+ &vcpu->arch.pending_exceptions);
+
+ pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
+ xc->pending, xc->hw_cppr, xc->cppr);
+
+ /* Grab previous CPPR and reverse map it */
+ old_cppr = xive_prio_to_guest(xc->cppr);
+
+ /* Scan for actual interrupts */
+ hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
+
+ pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
+ hirq, xc->hw_cppr, xc->cppr);
+
+#ifdef XIVE_RUNTIME_CHECKS
+ /* That should never hit */
+ if (hirq & 0xff000000)
+ pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
+#endif
+
+ /*
+ * XXX We could check if the interrupt is masked here and
+ * filter it. If we chose to do so, we would need to do:
+ *
+ * if (masked) {
+ * lock();
+ * if (masked) {
+ * old_Q = true;
+ * hirq = 0;
+ * }
+ * unlock();
+ * }
+ */
+
+ /* Return interrupt and old CPPR in GPR4 */
+ vcpu->arch.gpr[4] = hirq | (old_cppr << 24);
+
+ return H_SUCCESS;
+}
+
+X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 pending = xc->pending;
+ u32 hirq;
+ u8 pipr;
+
+ pr_devel("H_IPOLL(server=%ld)\n", server);
+
+ xc->GLUE(X_STAT_PFX,h_ipoll)++;
+
+ /* Grab the target VCPU if not the current one */
+ if (xc->server_num != server) {
+ vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
+ if (!vcpu)
+ return H_PARAMETER;
+ xc = vcpu->arch.xive_vcpu;
+
+ /* Scan all priorities */
+ pending = 0xff;
+ } else {
+ /* Grab pending interrupt if any */
+ pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
+ if (pipr < 8)
+ pending |= 1 << pipr;
+ }
+
+ hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
+
+ /* Return interrupt and old CPPR in GPR4 */
+ vcpu->arch.gpr[4] = hirq | (xc->cppr << 24);
+
+ return H_SUCCESS;
+}
+
+static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
+{
+ u8 pending, prio;
+
+ pending = xc->pending;
+ if (xc->mfrr != 0xff) {
+ if (xc->mfrr < 8)
+ pending |= 1 << xc->mfrr;
+ else
+ pending |= 0x80;
+ }
+ if (!pending)
+ return;
+ prio = ffs(pending) - 1;
+
+ __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
+}
+
+X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 old_cppr;
+
+ pr_devel("H_CPPR(cppr=%ld)\n", cppr);
+
+ xc->GLUE(X_STAT_PFX,h_cppr)++;
+
+ /* Map CPPR */
+ cppr = xive_prio_from_guest(cppr);
+
+ /* Remember old and update SW state */
+ old_cppr = xc->cppr;
+ xc->cppr = cppr;
+
+ /*
+ * We are masking less, we need to look for pending things
+ * to deliver and set VP pending bits accordingly to trigger
+ * a new interrupt otherwise we might miss MFRR changes for
+ * which we have optimized out sending an IPI signal.
+ */
+ if (cppr > old_cppr)
+ GLUE(X_PFX,push_pending_to_hw)(xc);
+
+ /* Apply new CPPR */
+ xc->hw_cppr = cppr;
+ __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
+
+ return H_SUCCESS;
+}
+
+X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
+{
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_irq_data *xd;
+ u8 new_cppr = xirr >> 24;
+ u32 irq = xirr & 0x00ffffff, hw_num;
+ u16 src;
+ int rc = 0;
+
+ pr_devel("H_EOI(xirr=%08lx)\n", xirr);
+
+ xc->GLUE(X_STAT_PFX,h_eoi)++;
+
+ xc->cppr = xive_prio_from_guest(new_cppr);
+
+ /*
+ * IPIs are synthetized from MFRR and thus don't need
+ * any special EOI handling. The underlying interrupt
+ * used to signal MFRR changes is EOId when fetched from
+ * the queue.
+ */
+ if (irq == XICS_IPI || irq == 0)
+ goto bail;
+
+ /* Find interrupt source */
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb) {
+ pr_devel(" source not found !\n");
+ rc = H_PARAMETER;
+ goto bail;
+ }
+ state = &sb->irq_state[src];
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ state->in_eoi = true;
+ mb();
+
+again:
+ if (state->guest_priority == MASKED) {
+ arch_spin_lock(&sb->lock);
+ if (state->guest_priority != MASKED) {
+ arch_spin_unlock(&sb->lock);
+ goto again;
+ }
+ pr_devel(" EOI on saved P...\n");
+
+ /* Clear old_p, that will cause unmask to perform an EOI */
+ state->old_p = false;
+
+ arch_spin_unlock(&sb->lock);
+ } else {
+ pr_devel(" EOI on source...\n");
+
+ /* Perform EOI on the source */
+ GLUE(X_PFX,source_eoi)(hw_num, xd);
+
+ /* If it's an emulated LSI, check level and resend */
+ if (state->lsi && state->asserted)
+ __x_writeq(0, __x_trig_page(xd));
+
+ }
+
+ mb();
+ state->in_eoi = false;
+bail:
+
+ /* Re-evaluate pending IRQs and update HW */
+ GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
+ GLUE(X_PFX,push_pending_to_hw)(xc);
+ pr_devel(" after scan pending=%02x\n", xc->pending);
+
+ /* Apply new CPPR */
+ xc->hw_cppr = xc->cppr;
+ __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
+
+ return rc;
+}
+
+X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
+
+ xc->GLUE(X_STAT_PFX,h_ipi)++;
+
+ /* Find target */
+ vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
+ if (!vcpu)
+ return H_PARAMETER;
+ xc = vcpu->arch.xive_vcpu;
+
+ /* Locklessly write over MFRR */
+ xc->mfrr = mfrr;
+
+ /* Shoot the IPI if most favored than target cppr */
+ if (mfrr < xc->cppr)
+ __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
+
+ return H_SUCCESS;
+}
diff --git a/arch/powerpc/kvm/irq.h b/arch/powerpc/kvm/irq.h
index 5a9a10b90762..3f1be85a83bc 100644
--- a/arch/powerpc/kvm/irq.h
+++ b/arch/powerpc/kvm/irq.h
@@ -12,6 +12,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
#endif
#ifdef CONFIG_KVM_XICS
ret = ret || (kvm->arch.xics != NULL);
+ ret = ret || (kvm->arch.xive != NULL);
#endif
smp_rmb();
return ret;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 1ee22a910074..f7cf2cd564ef 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -38,6 +38,8 @@
#include <asm/irqflags.h>
#include <asm/iommu.h>
#include <asm/switch_to.h>
+#include <asm/xive.h>
+
#include "timing.h"
#include "irq.h"
#include "../mm/mmu_decl.h"
@@ -697,7 +699,10 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
break;
case KVMPPC_IRQ_XICS:
- kvmppc_xics_free_icp(vcpu);
+ if (xive_enabled())
+ kvmppc_xive_cleanup_vcpu(vcpu);
+ else
+ kvmppc_xics_free_icp(vcpu);
break;
}
@@ -1522,8 +1527,12 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = -EPERM;
dev = kvm_device_from_filp(f.file);
- if (dev)
- r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
+ if (dev) {
+ if (xive_enabled())
+ r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
+ else
+ r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
+ }
fdput(f);
break;
@@ -1547,7 +1556,7 @@ bool kvm_arch_intc_initialized(struct kvm *kvm)
return true;
#endif
#ifdef CONFIG_KVM_XICS
- if (kvm->arch.xics)
+ if (kvm->arch.xics || kvm->arch.xive)
return true;
#endif
return false;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index d2f19821d71d..d12ea7b9fd47 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -412,11 +412,14 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
* been set for the PE, we will set EEH_PE_CFG_BLOCKED for
* that PE to block its config space.
*
+ * Broadcom BCM5718 2-ports NICs (14e4:1656)
* Broadcom Austin 4-ports NICs (14e4:1657)
* Broadcom Shiner 4-ports 1G NICs (14e4:168a)
* Broadcom Shiner 2-ports 10G NICs (14e4:168e)
*/
if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
+ pdn->device_id == 0x1656) ||
+ (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
pdn->device_id == 0x1657) ||
(pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
pdn->device_id == 0x168a) ||
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 7925a9d72cca..59684b4af4d1 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -967,3 +967,4 @@ EXPORT_SYMBOL_GPL(opal_leds_set_ind);
EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
/* Export this for KVM */
EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
+EXPORT_SYMBOL_GPL(opal_int_eoi);
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 986cd111d4df..c651e668996b 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -377,6 +377,10 @@ static void cpm1_set_pin16(int port, int pin, int flags)
setbits16(&iop->odr_sor, pin);
else
clrbits16(&iop->odr_sor, pin);
+ if (flags & CPM_PIN_FALLEDGE)
+ setbits16(&iop->intr, pin);
+ else
+ clrbits16(&iop->intr, pin);
}
}
@@ -528,6 +532,9 @@ struct cpm1_gpio16_chip {
/* shadowed data register to clear/set bits safely */
u16 cpdata;
+
+ /* IRQ associated with Pins when relevant */
+ int irq[16];
};
static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
@@ -578,6 +585,14 @@ static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value)
spin_unlock_irqrestore(&cpm1_gc->lock, flags);
}
+static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
+
+ return cpm1_gc->irq[gpio] ? : -ENXIO;
+}
+
static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
@@ -618,6 +633,7 @@ int cpm1_gpiochip_add16(struct device_node *np)
struct cpm1_gpio16_chip *cpm1_gc;
struct of_mm_gpio_chip *mm_gc;
struct gpio_chip *gc;
+ u16 mask;
cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL);
if (!cpm1_gc)
@@ -625,6 +641,14 @@ int cpm1_gpiochip_add16(struct device_node *np)
spin_lock_init(&cpm1_gc->lock);
+ if (!of_property_read_u16(np, "fsl,cpm1-gpio-irq-mask", &mask)) {
+ int i, j;
+
+ for (i = 0, j = 0; i < 16; i++)
+ if (mask & (1 << (15 - i)))
+ cpm1_gc->irq[i] = irq_of_parse_and_map(np, j++);
+ }
+
mm_gc = &cpm1_gc->mm_gc;
gc = &mm_gc->gc;
@@ -634,6 +658,7 @@ int cpm1_gpiochip_add16(struct device_node *np)
gc->direction_output = cpm1_gpio16_dir_out;
gc->get = cpm1_gpio16_get;
gc->set = cpm1_gpio16_set;
+ gc->to_irq = cpm1_gpio16_to_irq;
return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc);
}
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 6a98efb14264..913825086b8d 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -46,13 +46,15 @@
#endif
bool __xive_enabled;
+EXPORT_SYMBOL_GPL(__xive_enabled);
bool xive_cmdline_disabled;
/* We use only one priority for now */
static u8 xive_irq_priority;
-/* TIMA */
+/* TIMA exported to KVM */
void __iomem *xive_tima;
+EXPORT_SYMBOL_GPL(xive_tima);
u32 xive_tima_offset;
/* Backend ops */
@@ -345,8 +347,11 @@ static void xive_irq_eoi(struct irq_data *d)
DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
d->irq, irqd_to_hwirq(d), xc->pending_prio);
- /* EOI the source if it hasn't been disabled */
- if (!irqd_irq_disabled(d))
+ /*
+ * EOI the source if it hasn't been disabled and hasn't
+ * been passed-through to a KVM guest
+ */
+ if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d))
xive_do_source_eoi(irqd_to_hwirq(d), xd);
/*
@@ -689,9 +694,14 @@ static int xive_irq_set_affinity(struct irq_data *d,
old_target = xd->target;
- rc = xive_ops->configure_irq(hw_irq,
- get_hard_smp_processor_id(target),
- xive_irq_priority, d->irq);
+ /*
+ * Only configure the irq if it's not currently passed-through to
+ * a KVM guest
+ */
+ if (!irqd_is_forwarded_to_vcpu(d))
+ rc = xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(target),
+ xive_irq_priority, d->irq);
if (rc < 0) {
pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
return rc;
@@ -771,6 +781,123 @@ static int xive_irq_retrigger(struct irq_data *d)
return 1;
}
+static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ int rc;
+ u8 pq;
+
+ /*
+ * We only support this on interrupts that do not require
+ * firmware calls for masking and unmasking
+ */
+ if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
+ return -EIO;
+
+ /*
+ * This is called by KVM with state non-NULL for enabling
+ * pass-through or NULL for disabling it
+ */
+ if (state) {
+ irqd_set_forwarded_to_vcpu(d);
+
+ /* Set it to PQ=10 state to prevent further sends */
+ pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_10);
+
+ /* No target ? nothing to do */
+ if (xd->target == XIVE_INVALID_TARGET) {
+ /*
+ * An untargetted interrupt should have been
+ * also masked at the source
+ */
+ WARN_ON(pq & 2);
+
+ return 0;
+ }
+
+ /*
+ * If P was set, adjust state to PQ=11 to indicate
+ * that a resend is needed for the interrupt to reach
+ * the guest. Also remember the value of P.
+ *
+ * This also tells us that it's in flight to a host queue
+ * or has already been fetched but hasn't been EOIed yet
+ * by the host. This it's potentially using up a host
+ * queue slot. This is important to know because as long
+ * as this is the case, we must not hard-unmask it when
+ * "returning" that interrupt to the host.
+ *
+ * This saved_p is cleared by the host EOI, when we know
+ * for sure the queue slot is no longer in use.
+ */
+ if (pq & 2) {
+ pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_11);
+ xd->saved_p = true;
+
+ /*
+ * Sync the XIVE source HW to ensure the interrupt
+ * has gone through the EAS before we change its
+ * target to the guest. That should guarantee us
+ * that we *will* eventually get an EOI for it on
+ * the host. Otherwise there would be a small window
+ * for P to be seen here but the interrupt going
+ * to the guest queue.
+ */
+ if (xive_ops->sync_source)
+ xive_ops->sync_source(hw_irq);
+ } else
+ xd->saved_p = false;
+ } else {
+ irqd_clr_forwarded_to_vcpu(d);
+
+ /* No host target ? hard mask and return */
+ if (xd->target == XIVE_INVALID_TARGET) {
+ xive_do_source_set_mask(xd, true);
+ return 0;
+ }
+
+ /*
+ * Sync the XIVE source HW to ensure the interrupt
+ * has gone through the EAS before we change its
+ * target to the host.
+ */
+ if (xive_ops->sync_source)
+ xive_ops->sync_source(hw_irq);
+
+ /*
+ * By convention we are called with the interrupt in
+ * a PQ=10 or PQ=11 state, ie, it won't fire and will
+ * have latched in Q whether there's a pending HW
+ * interrupt or not.
+ *
+ * First reconfigure the target.
+ */
+ rc = xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(xd->target),
+ xive_irq_priority, d->irq);
+ if (rc)
+ return rc;
+
+ /*
+ * Then if saved_p is not set, effectively re-enable the
+ * interrupt with an EOI. If it is set, we know there is
+ * still a message in a host queue somewhere that will be
+ * EOId eventually.
+ *
+ * Note: We don't check irqd_irq_disabled(). Effectively,
+ * we *will* let the irq get through even if masked if the
+ * HW is still firing it in order to deal with the whole
+ * saved_p business properly. If the interrupt triggers
+ * while masked, the generic code will re-mask it anyway.
+ */
+ if (!xd->saved_p)
+ xive_do_source_eoi(hw_irq, xd);
+
+ }
+ return 0;
+}
+
static struct irq_chip xive_irq_chip = {
.name = "XIVE-IRQ",
.irq_startup = xive_irq_startup,
@@ -781,12 +908,14 @@ static struct irq_chip xive_irq_chip = {
.irq_set_affinity = xive_irq_set_affinity,
.irq_set_type = xive_irq_set_type,
.irq_retrigger = xive_irq_retrigger,
+ .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
};
bool is_xive_irq(struct irq_chip *chip)
{
return chip == &xive_irq_chip;
}
+EXPORT_SYMBOL_GPL(is_xive_irq);
void xive_cleanup_irq_data(struct xive_irq_data *xd)
{
@@ -801,6 +930,7 @@ void xive_cleanup_irq_data(struct xive_irq_data *xd)
xd->trig_mmio = NULL;
}
}
+EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
{
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 1a726229a427..ab9ecce61ee5 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -31,6 +31,7 @@
#include <asm/xive.h>
#include <asm/xive-regs.h>
#include <asm/opal.h>
+#include <asm/kvm_ppc.h>
#include "xive-internal.h"
@@ -95,6 +96,7 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
}
return 0;
}
+EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
{
@@ -108,6 +110,8 @@ int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
}
return rc == 0 ? 0 : -ENXIO;
}
+EXPORT_SYMBOL_GPL(xive_native_configure_irq);
+
/* This can be called multiple time to change a queue configuration */
int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
@@ -172,6 +176,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
fail:
return rc;
}
+EXPORT_SYMBOL_GPL(xive_native_configure_queue);
static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
{
@@ -192,6 +197,7 @@ void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
{
__xive_native_disable_queue(vp_id, q, prio);
}
+EXPORT_SYMBOL_GPL(xive_native_disable_queue);
static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
{
@@ -262,6 +268,7 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
}
return 0;
}
+#endif /* CONFIG_SMP */
u32 xive_native_alloc_irq(void)
{
@@ -277,6 +284,7 @@ u32 xive_native_alloc_irq(void)
return 0;
return rc;
}
+EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
void xive_native_free_irq(u32 irq)
{
@@ -287,7 +295,9 @@ void xive_native_free_irq(u32 irq)
msleep(1);
}
}
+EXPORT_SYMBOL_GPL(xive_native_free_irq);
+#ifdef CONFIG_SMP
static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
{
s64 rc;
@@ -383,7 +393,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
return;
/* Enable the pool VP */
- vp = xive_pool_vps + get_hard_smp_processor_id(cpu);
+ vp = xive_pool_vps + cpu;
pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
for (;;) {
rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
@@ -428,7 +438,7 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
/* Disable it */
- vp = xive_pool_vps + get_hard_smp_processor_id(cpu);
+ vp = xive_pool_vps + cpu;
for (;;) {
rc = opal_xive_set_vp_info(vp, 0, 0);
if (rc != OPAL_BUSY)
@@ -437,10 +447,11 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
}
}
-static void xive_native_sync_source(u32 hw_irq)
+void xive_native_sync_source(u32 hw_irq)
{
opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
}
+EXPORT_SYMBOL_GPL(xive_native_sync_source);
static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data,
@@ -501,10 +512,24 @@ static bool xive_parse_provisioning(struct device_node *np)
return true;
}
+static void xive_native_setup_pools(void)
+{
+ /* Allocate a pool big enough */
+ pr_debug("XIVE: Allocating VP block for pool size %d\n", nr_cpu_ids);
+
+ xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
+ if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
+ pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
+
+ pr_debug("XIVE: Pool VPs allocated at 0x%x for %d max CPUs\n",
+ xive_pool_vps, nr_cpu_ids);
+}
+
u32 xive_native_default_eq_shift(void)
{
return xive_queue_shift;
}
+EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
bool xive_native_init(void)
{
@@ -514,7 +539,7 @@ bool xive_native_init(void)
struct property *prop;
u8 max_prio = 7;
const __be32 *p;
- u32 val;
+ u32 val, cpu;
s64 rc;
if (xive_cmdline_disabled)
@@ -550,7 +575,11 @@ bool xive_native_init(void)
break;
}
- /* Grab size of provisioning pages */
+ /* Configure Thread Management areas for KVM */
+ for_each_possible_cpu(cpu)
+ kvmppc_set_xive_tima(cpu, r.start, tima);
+
+ /* Grab size of provisionning pages */
xive_parse_provisioning(np);
/* Switch the XIVE to exploitation mode */
@@ -560,6 +589,9 @@ bool xive_native_init(void)
return false;
}
+ /* Setup some dummy HV pool VPs */
+ xive_native_setup_pools();
+
/* Initialize XIVE core with our backend */
if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
max_prio)) {
@@ -638,3 +670,47 @@ void xive_native_free_vp_block(u32 vp_base)
pr_warn("OPAL error %lld freeing VP block\n", rc);
}
EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
+
+int xive_native_enable_vp(u32 vp_id)
+{
+ s64 rc;
+
+ for (;;) {
+ rc = opal_xive_set_vp_info(vp_id, OPAL_XIVE_VP_ENABLED, 0);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ return rc ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_enable_vp);
+
+int xive_native_disable_vp(u32 vp_id)
+{
+ s64 rc;
+
+ for (;;) {
+ rc = opal_xive_set_vp_info(vp_id, 0, 0);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ return rc ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_disable_vp);
+
+int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
+{
+ __be64 vp_cam_be;
+ __be32 vp_chip_id_be;
+ s64 rc;
+
+ rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
+ if (rc)
+ return -EIO;
+ *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
+ *out_chip_id = be32_to_cpu(vp_chip_id_be);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index addb09cee0f5..ca62066895e0 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -10,49 +10,3 @@ generic-y += poll.h
generic-y += resource.h
generic-y += sockios.h
generic-y += termbits.h
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += chpid.h
-header-y += chsc.h
-header-y += clp.h
-header-y += cmb.h
-header-y += dasd.h
-header-y += debug.h
-header-y += errno.h
-header-y += guarded_storage.h
-header-y += hypfs.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm.h
-header-y += kvm_para.h
-header-y += kvm_perf.h
-header-y += kvm_virtio.h
-header-y += monwriter.h
-header-y += msgbuf.h
-header-y += pkey.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += qeth.h
-header-y += schid.h
-header-y += sclp_ctl.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sie.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += tape390.h
-header-y += termios.h
-header-y += types.h
-header-y += ucontext.h
-header-y += unistd.h
-header-y += virtio-ccw.h
-header-y += vtoc.h
-header-y += zcrypt.h
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index e3a8d0f96652..54b3b2039af1 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -1,6 +1,3 @@
-
-header-y +=
-
generic-y += barrier.h
generic-y += clkdev.h
generic-y += current.h
diff --git a/arch/score/include/uapi/asm/Kbuild b/arch/score/include/uapi/asm/Kbuild
index 040178cdb3eb..b15bf6bc0e94 100644
--- a/arch/score/include/uapi/asm/Kbuild
+++ b/arch/score/include/uapi/asm/Kbuild
@@ -1,34 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 336f33a419d9..280bbff12102 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -94,7 +94,8 @@ defaultimage-$(CONFIG_SH_7206_SOLUTION_ENGINE) := vmlinux
defaultimage-$(CONFIG_SH_7619_SOLUTION_ENGINE) := vmlinux
# Set some sensible Kbuild defaults
-KBUILD_IMAGE := $(defaultimage-y)
+boot := arch/sh/boot
+KBUILD_IMAGE := $(boot)/$(defaultimage-y)
#
# Choosing incompatible machines durings configuration will result in
@@ -186,8 +187,6 @@ cpuincdir-y += cpu-common # Must be last
drivers-y += arch/sh/drivers/
drivers-$(CONFIG_OPROFILE) += arch/sh/oprofile/
-boot := arch/sh/boot
-
cflags-y += $(foreach d, $(cpuincdir-y), -Iarch/sh/include/$(d)) \
$(foreach d, $(machdir-y), -Iarch/sh/include/$(d))
@@ -211,7 +210,7 @@ BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.xz uImage.lzo \
romImage
PHONY += $(BOOT_TARGETS)
-all: $(KBUILD_IMAGE)
+all: $(notdir $(KBUILD_IMAGE))
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild
index 60613ae78513..b15bf6bc0e94 100644
--- a/arch/sh/include/uapi/asm/Kbuild
+++ b/arch/sh/include/uapi/asm/Kbuild
@@ -1,25 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += byteorder.h
-header-y += cachectl.h
-header-y += cpu-features.h
-header-y += hw_breakpoint.h
-header-y += ioctls.h
-header-y += posix_types.h
-header-y += posix_types_32.h
-header-y += posix_types_64.h
-header-y += ptrace.h
-header-y += ptrace_32.h
-header-y += ptrace_64.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += signal.h
-header-y += sockios.h
-header-y += stat.h
-header-y += swab.h
-header-y += types.h
-header-y += unistd.h
-header-y += unistd_32.h
-header-y += unistd_64.h
diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild
index b5843ee09fb5..b15bf6bc0e94 100644
--- a/arch/sparc/include/uapi/asm/Kbuild
+++ b/arch/sparc/include/uapi/asm/Kbuild
@@ -1,50 +1,2 @@
# UAPI Header export list
-# User exported sparc header files
-
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += apc.h
-header-y += asi.h
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += display7seg.h
-header-y += envctrl.h
-header-y += errno.h
-header-y += fbio.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += jsflash.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += openpromio.h
-header-y += param.h
-header-y += perfctr.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += psr.h
-header-y += psrcompat.h
-header-y += pstate.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += traps.h
-header-y += uctx.h
-header-y += unistd.h
-header-y += utrap.h
-header-y += watchdog.h
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 44101196d02b..41a407328667 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -939,3 +939,9 @@ ENTRY(__retl_o1)
retl
mov %o1, %o0
ENDPROC(__retl_o1)
+
+ENTRY(__retl_o1_asi)
+ wr %o5, 0x0, %asi
+ retl
+ mov %o1, %o0
+ENDPROC(__retl_o1_asi)
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
index 44a3ed93c214..e278bf52963b 100644
--- a/arch/sparc/kernel/led.c
+++ b/arch/sparc/kernel/led.c
@@ -70,16 +70,9 @@ static ssize_t led_proc_write(struct file *file, const char __user *buffer,
if (count > LED_MAX_LENGTH)
count = LED_MAX_LENGTH;
- buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, buffer, count)) {
- kfree(buf);
- return -EFAULT;
- }
-
- buf[count] = '\0';
+ buf = memdup_user_nul(buffer, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
/* work around \n when echo'ing into proc */
if (buf[count - 1] == '\n')
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 6f06058c5ae7..6722308d1a98 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -148,7 +148,7 @@ static void __init boot_flags_init(char *commands)
{
while (*commands) {
/* Move to the start of the next "argument". */
- while (*commands && *commands == ' ')
+ while (*commands == ' ')
commands++;
/* Process any command switches, otherwise skip it. */
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 6b7331d198e9..422b17880955 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -133,7 +133,7 @@ static void __init boot_flags_init(char *commands)
{
while (*commands) {
/* Move to the start of the next "argument". */
- while (*commands && *commands == ' ')
+ while (*commands == ' ')
commands++;
/* Process any command switches, otherwise skip it. */
diff --git a/arch/sparc/lib/GENbzero.S b/arch/sparc/lib/GENbzero.S
index 8e7a843ddd88..2fbf6297d57c 100644
--- a/arch/sparc/lib/GENbzero.S
+++ b/arch/sparc/lib/GENbzero.S
@@ -8,7 +8,7 @@
98: x,y; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_o1; \
+ .word 98b, __retl_o1_asi;\
.text; \
.align 4;
diff --git a/arch/sparc/lib/NGbzero.S b/arch/sparc/lib/NGbzero.S
index beab29bf419b..33053bdf3766 100644
--- a/arch/sparc/lib/NGbzero.S
+++ b/arch/sparc/lib/NGbzero.S
@@ -8,7 +8,7 @@
98: x,y; \
.section __ex_table,"a";\
.align 4; \
- .word 98b, __retl_o1; \
+ .word 98b, __retl_o1_asi;\
.text; \
.align 4;
diff --git a/arch/tile/include/arch/Kbuild b/arch/tile/include/arch/Kbuild
deleted file mode 100644
index 3751c9fabcf2..000000000000
--- a/arch/tile/include/arch/Kbuild
+++ /dev/null
@@ -1 +0,0 @@
-# Tile arch headers
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 24c44e93804d..16f0b08c8ce9 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -1,6 +1,3 @@
-
-header-y += ../arch/
-
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
diff --git a/arch/tile/include/uapi/arch/Kbuild b/arch/tile/include/uapi/arch/Kbuild
deleted file mode 100644
index 97dfbecec6b6..000000000000
--- a/arch/tile/include/uapi/arch/Kbuild
+++ /dev/null
@@ -1,17 +0,0 @@
-# UAPI Header export list
-header-y += abi.h
-header-y += chip.h
-header-y += chip_tilegx.h
-header-y += chip_tilepro.h
-header-y += icache.h
-header-y += interrupts.h
-header-y += interrupts_32.h
-header-y += interrupts_64.h
-header-y += opcode.h
-header-y += opcode_tilegx.h
-header-y += opcode_tilepro.h
-header-y += sim.h
-header-y += sim_def.h
-header-y += spr_def.h
-header-y += spr_def_32.h
-header-y += spr_def_64.h
diff --git a/arch/tile/include/uapi/asm/Kbuild b/arch/tile/include/uapi/asm/Kbuild
index c20db8e428bf..0c74c3c5ebfa 100644
--- a/arch/tile/include/uapi/asm/Kbuild
+++ b/arch/tile/include/uapi/asm/Kbuild
@@ -1,21 +1,4 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += cachectl.h
-header-y += hardwall.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += stat.h
-header-y += swab.h
-header-y += ucontext.h
-header-y += unistd.h
-
generic-y += ucontext.h
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index ed9c5b5ff028..85f6dd204ab6 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -57,3 +57,8 @@ config HZ
config SUBARCH
string
option env="SUBARCH"
+
+config NR_CPUS
+ int
+ range 1 1
+ default 1
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index 48bae81f8dca..6f6e7896e53f 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -14,7 +14,7 @@
static char *initrd __initdata = NULL;
static int load_initrd(char *filename, void *buf, int size);
-static int __init read_initrd(void)
+int __init read_initrd(void)
{
void *area;
long long size;
@@ -46,8 +46,6 @@ static int __init read_initrd(void)
return 0;
}
-__uml_postsetup(read_initrd);
-
static int __init uml_initrd_setup(char *line, int *add)
{
initrd = line;
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index a76295f7ede9..6b995e870d55 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -20,10 +20,8 @@
static void _print_addr(void *data, unsigned long address, int reliable)
{
- pr_info(" [<%08lx>]", address);
- pr_cont(" %s", reliable ? "" : "? ");
- print_symbol("%s", address);
- pr_cont("\n");
+ pr_info(" [<%08lx>] %s%pF\n", address, reliable ? "" : "? ",
+ (void *)address);
}
static const struct stacktrace_ops stackops = {
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 4b85acd4020c..64a1fd06f3fd 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -338,11 +338,17 @@ int __init linux_main(int argc, char **argv)
return start_uml();
}
+int __init __weak read_initrd(void)
+{
+ return 0;
+}
+
void __init setup_arch(char **cmdline_p)
{
stack_protections((unsigned long) &init_thread_info);
setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
mem_total_pages(physmem_size, iomem_size, highmem);
+ read_initrd();
paging_init();
strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 23025d645160..03b3c4cc7735 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -21,6 +21,7 @@
#include <registers.h>
#include <skas.h>
#include <sysdep/stub.h>
+#include <linux/threads.h>
int is_skas_winch(int pid, int fd, void *data)
{
@@ -233,9 +234,6 @@ static int userspace_tramp(void *stack)
return 0;
}
-/* Each element set once, and only accessed by a single processor anyway */
-#undef NR_CPUS
-#define NR_CPUS 1
int userspace_pid[NR_CPUS];
int start_userspace(unsigned long stub_stack)
diff --git a/arch/unicore32/Makefile b/arch/unicore32/Makefile
index b6f5c4c1eaf9..98a5ca43ae87 100644
--- a/arch/unicore32/Makefile
+++ b/arch/unicore32/Makefile
@@ -43,9 +43,9 @@ boot := arch/unicore32/boot
# Default defconfig and target when executing plain make
KBUILD_DEFCONFIG := $(ARCH)_defconfig
-KBUILD_IMAGE := zImage
+KBUILD_IMAGE := $(boot)/zImage
-all: $(KBUILD_IMAGE)
+all: zImage
zImage Image uImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild
index 0514d7ad6855..13a97aa2285f 100644
--- a/arch/unicore32/include/uapi/asm/Kbuild
+++ b/arch/unicore32/include/uapi/asm/Kbuild
@@ -1,10 +1,4 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += byteorder.h
-header-y += kvm_para.h
-header-y += ptrace.h
-header-y += sigcontext.h
-header-y += unistd.h
-
generic-y += kvm_para.h
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 4430dd489620..5851411e60fb 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -179,7 +179,8 @@ ifdef CONFIG_JUMP_LABEL
endif
ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
- KBUILD_CFLAGS += -maccumulate-outgoing-args
+ # This compiler flag is not supported by Clang:
+ KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
endif
# Stackpointer is addressed different for 32 bit and 64 bit x86
diff --git a/arch/x86/boot/compressed/error.h b/arch/x86/boot/compressed/error.h
index 2e59dac07f9e..d732e608e3af 100644
--- a/arch/x86/boot/compressed/error.h
+++ b/arch/x86/boot/compressed/error.h
@@ -1,7 +1,9 @@
#ifndef BOOT_COMPRESSED_ERROR_H
#define BOOT_COMPRESSED_ERROR_H
+#include <linux/compiler.h>
+
void warn(char *m);
-void error(char *m);
+void error(char *m) __noreturn;
#endif /* BOOT_COMPRESSED_ERROR_H */
diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
index 56589d0a804b..1d78f1739087 100644
--- a/arch/x86/boot/compressed/pagetable.c
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -70,7 +70,7 @@ static unsigned long level4p;
* Due to relocation, pointers must be assigned at run time not build time.
*/
static struct x86_mapping_info mapping_info = {
- .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
+ .page_flag = __PAGE_KERNEL_LARGE_EXEC,
};
/* Locates and clears a region for a new top level page table. */
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 9d05c7e67f60..a45e2114a846 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -761,7 +761,7 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
- X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 7acb51c49fec..7a9df3beb89b 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -32,6 +32,7 @@
#define _ASM_ADD __ASM_SIZE(add)
#define _ASM_SUB __ASM_SIZE(sub)
#define _ASM_XADD __ASM_SIZE(xadd)
+#define _ASM_MUL __ASM_SIZE(mul)
#define _ASM_AX __ASM_REG(ax)
#define _ASM_BX __ASM_REG(bx)
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 737da62bfeb0..474eb8c66fee 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -4,8 +4,9 @@
struct x86_mapping_info {
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
void *context; /* context for alloc_pgt_page */
- unsigned long pmd_flag; /* page flag for PMD entry */
+ unsigned long page_flag; /* page flag for PMD or PUD entry */
unsigned long offset; /* ident mapping offset */
+ bool direct_gbpages; /* PUD level 1GB page support */
};
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f5bddf92faba..9c761fea0c98 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1020,6 +1020,8 @@ struct kvm_x86_ops {
void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t offset, unsigned long mask);
+ int (*write_log_dirty)(struct kvm_vcpu *vcpu);
+
/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index d5a22bac9988..0ff8fe71b255 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -98,7 +98,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
if (bytes < 8) {
if (!IS_ALIGNED(dest, 4) || (bytes != 4))
- arch_wb_cache_pmem(addr, 1);
+ arch_wb_cache_pmem(addr, bytes);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
index 3dec769cadf7..83b6e9a0dce4 100644
--- a/arch/x86/include/uapi/asm/Kbuild
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -4,62 +4,3 @@ include include/uapi/asm-generic/Kbuild.asm
genhdr-y += unistd_32.h
genhdr-y += unistd_64.h
genhdr-y += unistd_x32.h
-header-y += a.out.h
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += boot.h
-header-y += bootparam.h
-header-y += byteorder.h
-header-y += debugreg.h
-header-y += e820.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += hw_breakpoint.h
-header-y += hyperv.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += ist.h
-header-y += kvm.h
-header-y += kvm_para.h
-header-y += kvm_perf.h
-header-y += ldt.h
-header-y += mce.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += msr-index.h
-header-y += msr.h
-header-y += mtrr.h
-header-y += param.h
-header-y += perf_regs.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += posix_types_32.h
-header-y += posix_types_64.h
-header-y += posix_types_x32.h
-header-y += prctl.h
-header-y += processor-flags.h
-header-y += ptrace-abi.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += sigcontext32.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += svm.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += ucontext.h
-header-y += unistd.h
-header-y += vm86.h
-header-y += vmx.h
-header-y += vsyscall.h
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ee8f11800295..bb5abe8f5fd4 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -799,8 +799,9 @@ static void init_amd(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
- /* AMD CPUs don't reset SS attributes on SYSRET */
- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 7889ae492af0..45db4d2ebd01 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -10,7 +10,7 @@
* Author: Peter Oruba <peter.oruba@amd.com>
*
* Based on work by:
- * Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+ * Tigran Aivazian <aivazian.tigran@gmail.com>
*
* early loader:
* Copyright (C) 2013 Advanced Micro Devices, Inc.
@@ -352,8 +352,6 @@ void reload_ucode_amd(void)
u32 rev, dummy;
mc = (struct microcode_amd *)amd_ucode_patch;
- if (!mc)
- return;
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index b4a4cd39b358..e53d3c909840 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -1,7 +1,7 @@
/*
* CPU Microcode Update Driver for Linux
*
- * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+ * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
* 2006 Shaohua Li <shaohua.li@intel.com>
* 2013-2016 Borislav Petkov <bp@alien8.de>
*
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 8325d8a09ab0..afdfd237b59f 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -1,7 +1,7 @@
/*
* Intel CPU Microcode Update Driver for Linux
*
- * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+ * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
* 2006 Shaohua Li <shaohua.li@intel.com>
*
* Intel CPU microcode early update for Linux
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index be22f5a2192e..4e3b8a587c88 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -418,6 +418,7 @@ struct legacy_pic default_legacy_pic = {
};
struct legacy_pic *legacy_pic = &default_legacy_pic;
+EXPORT_SYMBOL(legacy_pic);
static int __init i8259A_init_ops(void)
{
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index ce640428d6fe..6f5ca4ebe6e5 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -114,7 +114,7 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
struct x86_mapping_info info = {
.alloc_pgt_page = alloc_pgt_page,
.context = image,
- .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
+ .page_flag = __PAGE_KERNEL_LARGE_EXEC,
};
unsigned long mstart, mend;
pgd_t *level4p;
@@ -123,6 +123,10 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
level4p = (pgd_t *)__va(start_pgtable);
clear_page(level4p);
+
+ if (direct_gbpages)
+ info.direct_gbpages = true;
+
for (i = 0; i < nr_pfn_mapped; i++) {
mstart = pfn_mapped[i].start << PAGE_SHIFT;
mend = pfn_mapped[i].end << PAGE_SHIFT;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 603a1669a2ec..0b4d3c686b1e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1225,6 +1225,21 @@ void __init setup_arch(char **cmdline_p)
kasan_init();
+#ifdef CONFIG_X86_32
+ /* sync back kernel address range */
+ clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+ KERNEL_PGD_PTRS);
+
+ /*
+ * sync back low identity map too. It is used for example
+ * in the 32-bit EFI stub.
+ */
+ clone_pgd_range(initial_page_table,
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+ min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+#endif
+
tboot_probe();
map_vsyscall();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index bb1e8cc0bc84..10edd1e69a68 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -291,11 +291,11 @@ void __init setup_per_cpu_areas(void)
#ifdef CONFIG_X86_32
/*
- * Sync back kernel address range. We want to make sure that
- * all kernel mappings, including percpu mappings, are available
- * in the smpboot asm. We can't reliably pick up percpu
- * mappings using vmalloc_fault(), because exception dispatch
- * needs percpu data.
+ * Sync back kernel address range again. We already did this in
+ * setup_arch(), but percpu data also needs to be available in
+ * the smpboot asm. We can't reliably pick up percpu mappings
+ * using vmalloc_fault(), because exception dispatch needs
+ * percpu data.
*/
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index d4c8011a2293..4b1724059909 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -514,6 +514,9 @@ int tboot_force_iommu(void)
if (!tboot_enabled())
return 0;
+ if (!intel_iommu_tboot_noforce)
+ return 1;
+
if (no_iommu || swiotlb || dmar_disabled)
pr_warning("Forcing Intel-IOMMU to enabled\n");
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 558676538fca..5d3376f67794 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1498,6 +1498,21 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
+/**
+ * kvm_arch_write_log_dirty - emulate dirty page logging
+ * @vcpu: Guest mode vcpu
+ *
+ * Emulate arch specific page modification logging for the
+ * nested hypervisor
+ */
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
+{
+ if (kvm_x86_ops->write_log_dirty)
+ return kvm_x86_ops->write_log_dirty(vcpu);
+
+ return 0;
+}
+
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn)
{
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index d8ccb32f7308..27975807cc64 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -202,4 +202,5 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
#endif
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 314d2071b337..56241746abbd 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -226,6 +226,10 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
if (level == walker->level && write_fault &&
!(pte & PT_GUEST_DIRTY_MASK)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
+#if PTTYPE == PTTYPE_EPT
+ if (kvm_arch_write_log_dirty(vcpu))
+ return -EINVAL;
+#endif
pte |= PT_GUEST_DIRTY_MASK;
}
if (pte == orig_pte)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c5fd459c4043..c6f4ad44aa95 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -248,6 +248,7 @@ struct __packed vmcs12 {
u64 xss_exit_bitmap;
u64 guest_physical_address;
u64 vmcs_link_pointer;
+ u64 pml_address;
u64 guest_ia32_debugctl;
u64 guest_ia32_pat;
u64 guest_ia32_efer;
@@ -369,6 +370,7 @@ struct __packed vmcs12 {
u16 guest_ldtr_selector;
u16 guest_tr_selector;
u16 guest_intr_status;
+ u16 guest_pml_index;
u16 host_es_selector;
u16 host_cs_selector;
u16 host_ss_selector;
@@ -407,6 +409,7 @@ struct nested_vmx {
/* Has the level1 guest done vmxon? */
bool vmxon;
gpa_t vmxon_ptr;
+ bool pml_full;
/* The guest-physical address of the current VMCS L1 keeps for L2 */
gpa_t current_vmptr;
@@ -742,6 +745,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
FIELD(GUEST_INTR_STATUS, guest_intr_status),
+ FIELD(GUEST_PML_INDEX, guest_pml_index),
FIELD(HOST_ES_SELECTOR, host_es_selector),
FIELD(HOST_CS_SELECTOR, host_cs_selector),
FIELD(HOST_SS_SELECTOR, host_ss_selector),
@@ -767,6 +771,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
+ FIELD64(PML_ADDRESS, pml_address),
FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
@@ -1314,6 +1319,11 @@ static inline bool report_flexpriority(void)
return flexpriority_enabled;
}
+static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
+{
+ return vmx_misc_cr3_count(to_vmx(vcpu)->nested.nested_vmx_misc_low);
+}
+
static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
{
return vmcs12->cpu_based_vm_exec_control & bit;
@@ -1348,6 +1358,11 @@ static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
vmx_xsaves_supported();
}
+static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
+}
+
static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
{
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
@@ -2751,8 +2766,11 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_1GB_PAGE_BIT;
- if (enable_ept_ad_bits)
+ if (enable_ept_ad_bits) {
+ vmx->nested.nested_vmx_secondary_ctls_high |=
+ SECONDARY_EXEC_ENABLE_PML;
vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
+ }
} else
vmx->nested.nested_vmx_ept_caps = 0;
@@ -8114,7 +8132,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_PREEMPTION_TIMER:
return false;
case EXIT_REASON_PML_FULL:
- /* We don't expose PML support to L1. */
+ /* We emulate PML support to L1. */
return false;
default:
return true;
@@ -9364,13 +9382,20 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exit_reason;
+ unsigned long exit_qualification = vcpu->arch.exit_qualification;
- if (fault->error_code & PFERR_RSVD_MASK)
+ if (vmx->nested.pml_full) {
+ exit_reason = EXIT_REASON_PML_FULL;
+ vmx->nested.pml_full = false;
+ exit_qualification &= INTR_INFO_UNBLOCK_NMI;
+ } else if (fault->error_code & PFERR_RSVD_MASK)
exit_reason = EXIT_REASON_EPT_MISCONFIG;
else
exit_reason = EXIT_REASON_EPT_VIOLATION;
- nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification);
+
+ nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
vmcs12->guest_physical_address = fault->address;
}
@@ -9713,6 +9738,22 @@ static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
return 0;
}
+static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ u64 address = vmcs12->pml_address;
+ int maxphyaddr = cpuid_maxphyaddr(vcpu);
+
+ if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML)) {
+ if (!nested_cpu_has_ept(vmcs12) ||
+ !IS_ALIGNED(address, 4096) ||
+ address >> maxphyaddr)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
struct vmx_msr_entry *e)
{
@@ -9886,7 +9927,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
bool from_vmentry, u32 *entry_failure_code)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 exec_control;
+ u32 exec_control, vmcs12_exec_ctrl;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -10017,8 +10058,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_APIC_REGISTER_VIRT);
if (nested_cpu_has(vmcs12,
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
- exec_control |= vmcs12->secondary_vm_exec_control;
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
+ vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
+ ~SECONDARY_EXEC_ENABLE_PML;
+ exec_control |= vmcs12_exec_ctrl;
+ }
if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
vmcs_write64(EOI_EXIT_BITMAP0,
@@ -10248,6 +10292,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ if (nested_vmx_check_pml_controls(vcpu, vmcs12))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
vmx->nested.nested_vmx_procbased_ctls_low,
vmx->nested.nested_vmx_procbased_ctls_high) ||
@@ -10266,6 +10313,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmx->nested.nested_vmx_entry_ctls_high))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
!nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
@@ -11143,6 +11193,46 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
kvm_flush_pml_buffers(kvm);
}
+static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ gpa_t gpa;
+ struct page *page = NULL;
+ u64 *pml_address;
+
+ if (is_guest_mode(vcpu)) {
+ WARN_ON_ONCE(vmx->nested.pml_full);
+
+ /*
+ * Check if PML is enabled for the nested guest.
+ * Whether eptp bit 6 is set is already checked
+ * as part of A/D emulation.
+ */
+ vmcs12 = get_vmcs12(vcpu);
+ if (!nested_cpu_has_pml(vmcs12))
+ return 0;
+
+ if (vmcs12->guest_pml_index > PML_ENTITY_NUM) {
+ vmx->nested.pml_full = true;
+ return 1;
+ }
+
+ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+
+ page = nested_get_page(vcpu, vmcs12->pml_address);
+ if (!page)
+ return 0;
+
+ pml_address = kmap(page);
+ pml_address[vmcs12->guest_pml_index--] = gpa;
+ kunmap(page);
+ nested_release_page_clean(page);
+ }
+
+ return 0;
+}
+
static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *memslot,
gfn_t offset, unsigned long mask)
@@ -11502,6 +11592,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.slot_disable_log_dirty = vmx_slot_disable_log_dirty,
.flush_log_dirty = vmx_flush_log_dirty,
.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
+ .write_log_dirty = vmx_write_pml_buffer,
.pre_block = vmx_pre_block,
.post_block = vmx_post_block,
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 7e48807b2fa1..45a53dfe1859 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -55,7 +55,7 @@ ENTRY(csum_partial_copy_generic)
movq %r12, 3*8(%rsp)
movq %r14, 4*8(%rsp)
movq %r13, 5*8(%rsp)
- movq %rbp, 6*8(%rsp)
+ movq %r15, 6*8(%rsp)
movq %r8, (%rsp)
movq %r9, 1*8(%rsp)
@@ -74,7 +74,7 @@ ENTRY(csum_partial_copy_generic)
/* main loop. clear in 64 byte blocks */
/* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
/* r11: temp3, rdx: temp4, r12 loopcnt */
- /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
+ /* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */
.p2align 4
.Lloop:
source
@@ -89,7 +89,7 @@ ENTRY(csum_partial_copy_generic)
source
movq 32(%rdi), %r10
source
- movq 40(%rdi), %rbp
+ movq 40(%rdi), %r15
source
movq 48(%rdi), %r14
source
@@ -103,7 +103,7 @@ ENTRY(csum_partial_copy_generic)
adcq %r11, %rax
adcq %rdx, %rax
adcq %r10, %rax
- adcq %rbp, %rax
+ adcq %r15, %rax
adcq %r14, %rax
adcq %r13, %rax
@@ -121,7 +121,7 @@ ENTRY(csum_partial_copy_generic)
dest
movq %r10, 32(%rsi)
dest
- movq %rbp, 40(%rsi)
+ movq %r15, 40(%rsi)
dest
movq %r14, 48(%rsi)
dest
@@ -203,7 +203,7 @@ ENTRY(csum_partial_copy_generic)
movq 3*8(%rsp), %r12
movq 4*8(%rsp), %r14
movq 5*8(%rsp), %r13
- movq 6*8(%rsp), %rbp
+ movq 6*8(%rsp), %r15
addq $7*8, %rsp
ret
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index 5761a4f19455..ab2d1d73e9e7 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -5,6 +5,7 @@
* kernel starts. This file is included in the compressed kernel and
* normally linked in the regular.
*/
+#include <asm/asm.h>
#include <asm/kaslr.h>
#include <asm/msr.h>
#include <asm/archrandom.h>
@@ -79,7 +80,7 @@ unsigned long kaslr_get_random_long(const char *purpose)
}
/* Circular multiply for better bit diffusion */
- asm("mul %3"
+ asm(_ASM_MUL "%3"
: "=a" (random), "=d" (raw)
: "a" (random), "rm" (mix_const));
random += raw;
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index 04210a29dd60..adab1595f4bd 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -13,7 +13,7 @@ static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
if (pmd_present(*pmd))
continue;
- set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
+ set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
}
}
@@ -30,6 +30,18 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
if (next > end)
next = end;
+ if (info->direct_gbpages) {
+ pud_t pudval;
+
+ if (pud_present(*pud))
+ continue;
+
+ addr &= PUD_MASK;
+ pudval = __pud((addr - info->offset) | info->page_flag);
+ set_pud(pud, pudval);
+ continue;
+ }
+
if (pud_present(*pud)) {
pmd = pmd_offset(pud, 0);
ident_pmd_init(info, pmd, addr, next);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 41270b96403d..95651dc58e09 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -94,10 +94,10 @@ __setup("noexec32=", nonx32_setup);
*/
void sync_global_pgds(unsigned long start, unsigned long end)
{
- unsigned long address;
+ unsigned long addr;
- for (address = start; address <= end; address += PGDIR_SIZE) {
- pgd_t *pgd_ref = pgd_offset_k(address);
+ for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
+ pgd_t *pgd_ref = pgd_offset_k(addr);
const p4d_t *p4d_ref;
struct page *page;
@@ -106,7 +106,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
* handle synchonization on p4d level.
*/
BUILD_BUG_ON(pgd_none(*pgd_ref));
- p4d_ref = p4d_offset(pgd_ref, address);
+ p4d_ref = p4d_offset(pgd_ref, addr);
if (p4d_none(*p4d_ref))
continue;
@@ -117,8 +117,8 @@ void sync_global_pgds(unsigned long start, unsigned long end)
p4d_t *p4d;
spinlock_t *pgt_lock;
- pgd = (pgd_t *)page_address(page) + pgd_index(address);
- p4d = p4d_offset(pgd, address);
+ pgd = (pgd_t *)page_address(page) + pgd_index(addr);
+ p4d = p4d_offset(pgd, addr);
/* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 6b7ce6279133..aca6295350f3 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -100,5 +100,6 @@ void __init initmem_init(void)
printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
(ulong) pfn_to_kaddr(highstart_pfn));
+ __vmalloc_start_set = true;
setup_bootmem_allocator();
}
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index 38868adf07ea..f6ae6830b341 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -9,7 +9,7 @@
#include <linux/mmiotrace.h>
static unsigned long mmio_address;
-module_param(mmio_address, ulong, 0);
+module_param_hw(mmio_address, ulong, iomem, 0);
MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
"(or 8 MB if read_far is non-zero).");
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 6a61194ffd58..a6e21fee22ea 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -104,7 +104,7 @@ static int set_up_temporary_mappings(void)
{
struct x86_mapping_info info = {
.alloc_pgt_page = alloc_pgt_page,
- .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
+ .page_flag = __PAGE_KERNEL_LARGE_EXEC,
.offset = __PAGE_OFFSET,
};
unsigned long mstart, mend;
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index a5c9910d234f..09a085bde0d4 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -125,7 +125,7 @@ int poke_user(struct task_struct *child, long addr, long data)
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
(addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
- addr = addr >> 2;
+ addr = addr >> 3;
if ((addr == 4) || (addr == 5))
return -EIO;
child->thread.arch.debugregs[addr] = data;
diff --git a/arch/x86/um/shared/sysdep/kernel-offsets.h b/arch/x86/um/shared/sysdep/kernel-offsets.h
index 46a9df99f3c5..7e1d35b6ad5c 100644
--- a/arch/x86/um/shared/sysdep/kernel-offsets.h
+++ b/arch/x86/um/shared/sysdep/kernel-offsets.h
@@ -2,16 +2,9 @@
#include <linux/sched.h>
#include <linux/elf.h>
#include <linux/crypto.h>
+#include <linux/kbuild.h>
#include <asm/mman.h>
-#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-#define OFFSET(sym, str, mem) \
- DEFINE(sym, offsetof(struct str, mem));
-
void foo(void)
{
#include <common-offsets.h>
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index a732bc2b9dfc..7cd442690f9d 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -277,31 +277,19 @@ static bool __init xen_check_mwait(void)
static bool __init xen_check_xsave(void)
{
- unsigned int err, eax, edx;
+ unsigned int cx, xsave_mask;
- /*
- * Xen 4.0 and older accidentally leaked the host XSAVE flag into guest
- * view, despite not being able to support guests using the
- * functionality. Probe for the actual availability of XSAVE by seeing
- * whether xgetbv executes successfully or raises #UD.
- */
- asm volatile("1: .byte 0x0f,0x01,0xd0\n\t" /* xgetbv */
- "xor %[err], %[err]\n"
- "2:\n\t"
- ".pushsection .fixup,\"ax\"\n\t"
- "3: movl $1,%[err]\n\t"
- "jmp 2b\n\t"
- ".popsection\n\t"
- _ASM_EXTABLE(1b, 3b)
- : [err] "=r" (err), "=a" (eax), "=d" (edx)
- : "c" (0));
-
- return err == 0;
+ cx = cpuid_ecx(1);
+
+ xsave_mask = (1 << (X86_FEATURE_XSAVE % 32)) |
+ (1 << (X86_FEATURE_OSXSAVE % 32));
+
+ /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
+ return (cx & xsave_mask) == xsave_mask;
}
static void __init xen_init_capabilities(void)
{
- setup_clear_cpu_cap(X86_BUG_SYSRET_SS_ATTRS);
setup_force_cpu_cap(X86_FEATURE_XENPV);
setup_clear_cpu_cap(X86_FEATURE_DCA);
setup_clear_cpu_cap(X86_FEATURE_APERFMPERF);
@@ -317,10 +305,7 @@ static void __init xen_init_capabilities(void)
else
setup_clear_cpu_cap(X86_FEATURE_MWAIT);
- if (xen_check_xsave()) {
- setup_force_cpu_cap(X86_FEATURE_XSAVE);
- setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
- } else {
+ if (!xen_check_xsave()) {
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_OSXSAVE);
}
@@ -988,6 +973,13 @@ void xen_setup_shared_info(void)
#endif
xen_setup_mfn_list_list();
+
+ /*
+ * Now that shared info is set up we can start using routines that
+ * point to pvclock area.
+ */
+ if (system_state == SYSTEM_BOOTING)
+ xen_init_time_ops();
}
/* This is called once we have the cpu_possible_mask */
@@ -1286,8 +1278,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
x86_init.oem.arch_setup = xen_arch_setup;
x86_init.oem.banner = xen_banner;
- xen_init_time_ops();
-
/*
* Set up some pagetable state before starting to set any ptes.
*/
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 9d9ae6650aa1..7397d8b8459d 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2025,7 +2025,8 @@ static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
/*
* Translate a virtual address to a physical one without relying on mapped
- * page tables.
+ * page tables. Don't rely on big pages being aligned in (guest) physical
+ * space!
*/
static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
{
@@ -2046,7 +2047,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
sizeof(pud)));
if (!pud_present(pud))
return 0;
- pa = pud_pfn(pud) << PAGE_SHIFT;
+ pa = pud_val(pud) & PTE_PFN_MASK;
if (pud_large(pud))
return pa + (vaddr & ~PUD_MASK);
@@ -2054,7 +2055,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
sizeof(pmd)));
if (!pmd_present(pmd))
return 0;
- pa = pmd_pfn(pmd) << PAGE_SHIFT;
+ pa = pmd_val(pmd) & PTE_PFN_MASK;
if (pmd_large(pmd))
return pa + (vaddr & ~PMD_MASK);
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 090c7eb4dca9..a1895a8e85c1 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -406,7 +406,7 @@ static void __init xen_time_init(void)
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
}
-void __init xen_init_time_ops(void)
+void __ref xen_init_time_ops(void)
{
pv_time_ops = xen_time_ops;
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild
index 56aad54e7fb7..b15bf6bc0e94 100644
--- a/arch/xtensa/include/uapi/asm/Kbuild
+++ b/arch/xtensa/include/uapi/asm/Kbuild
@@ -1,25 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += byteorder.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += swab.h
-header-y += termbits.h
-header-y += types.h
-header-y += unistd.h