summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2023-10-05 22:57:34 +0200
committerKalle Valo <kvalo@kernel.org>2023-10-06 17:08:47 +0300
commit7d6904bf26b96ef087514cb7a8c50b62a4911c99 (patch)
tree2feaff8495d88c2102dc2e017b7afc5c58db02d6
parent9418edf8ff01e7a4904aac1aca4864ecdea37593 (diff)
parent22061bfc57fe08c77141dc876b4af75603c4d61d (diff)
Merge wireless into wireless-next
Resolve several conflicts, mostly between changes/fixes in wireless and the locking rework in wireless-next. One of the conflicts actually shows a bug in wireless that we'll want to fix separately. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Kalle Valo <kvalo@kernel.org>
-rw-r--r--Documentation/ABI/testing/debugfs-driver-qat61
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu1
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ccp18
-rw-r--r--Documentation/ABI/testing/sysfs-driver-chromeos-acpi17
-rw-r--r--Documentation/ABI/testing/sysfs-platform-power-on-reason12
-rw-r--r--Documentation/RCU/lockdep-splat.rst2
-rw-r--r--Documentation/RCU/rculist_nulls.rst38
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt75
-rw-r--r--Documentation/arch/arm64/silicon-errata.rst19
-rw-r--r--Documentation/arch/arm64/sme.rst2
-rw-r--r--Documentation/arch/index.rst2
-rw-r--r--Documentation/arch/s390/3270.ChangeLog (renamed from Documentation/s390/3270.ChangeLog)0
-rw-r--r--Documentation/arch/s390/3270.rst (renamed from Documentation/s390/3270.rst)4
-rw-r--r--Documentation/arch/s390/cds.rst (renamed from Documentation/s390/cds.rst)2
-rw-r--r--Documentation/arch/s390/common_io.rst (renamed from Documentation/s390/common_io.rst)2
-rw-r--r--Documentation/arch/s390/config3270.sh (renamed from Documentation/s390/config3270.sh)0
-rw-r--r--Documentation/arch/s390/driver-model.rst (renamed from Documentation/s390/driver-model.rst)0
-rw-r--r--Documentation/arch/s390/features.rst (renamed from Documentation/s390/features.rst)0
-rw-r--r--Documentation/arch/s390/index.rst (renamed from Documentation/s390/index.rst)0
-rw-r--r--Documentation/arch/s390/monreader.rst (renamed from Documentation/s390/monreader.rst)0
-rw-r--r--Documentation/arch/s390/pci.rst (renamed from Documentation/s390/pci.rst)2
-rw-r--r--Documentation/arch/s390/qeth.rst (renamed from Documentation/s390/qeth.rst)0
-rw-r--r--Documentation/arch/s390/s390dbf.rst (renamed from Documentation/s390/s390dbf.rst)0
-rw-r--r--Documentation/arch/s390/text_files.rst (renamed from Documentation/s390/text_files.rst)0
-rw-r--r--Documentation/arch/s390/vfio-ap-locking.rst (renamed from Documentation/s390/vfio-ap-locking.rst)0
-rw-r--r--Documentation/arch/s390/vfio-ap.rst (renamed from Documentation/s390/vfio-ap.rst)0
-rw-r--r--Documentation/arch/s390/vfio-ccw.rst (renamed from Documentation/s390/vfio-ccw.rst)2
-rw-r--r--Documentation/arch/s390/zfcpdump.rst (renamed from Documentation/s390/zfcpdump.rst)0
-rw-r--r--Documentation/arch/x86/boot.rst2
-rw-r--r--Documentation/core-api/cpu_hotplug.rst10
-rw-r--r--Documentation/core-api/netlink.rst9
-rw-r--r--Documentation/dev-tools/kunit/run_wrapper.rst12
-rw-r--r--Documentation/dev-tools/kunit/running_tips.rst166
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.yaml5
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml5
-rw-r--r--Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt132
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml1
-rw-r--r--Documentation/devicetree/bindings/gpio/adi,ds4520-gpio.yaml51
-rw-r--r--Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt52
-rw-r--r--Documentation/devicetree/bindings/gpio/brcm,kona-gpio.yaml100
-rw-r--r--Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml1
-rw-r--r--Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml4
-rw-r--r--Documentation/devicetree/bindings/gpio/st,stmpe-gpio.yaml4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml6
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.yaml10
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-atmel.txt4
-rw-r--r--Documentation/devicetree/bindings/net/dsa/marvell.txt2
-rw-r--r--Documentation/devicetree/bindings/net/ti,icss-iep.yaml45
-rw-r--r--Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml9
-rw-r--r--Documentation/devicetree/bindings/net/xilinx_gmii2rgmii.txt35
-rw-r--r--Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml55
-rw-r--r--Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml92
-rw-r--r--Documentation/devicetree/bindings/opp/opp-v2-base.yaml2
-rw-r--r--Documentation/devicetree/bindings/opp/ti,omap-opp-supply.yaml101
-rw-r--r--Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt63
-rw-r--r--Documentation/devicetree/bindings/regulator/active-semi,act8846.yaml74
-rw-r--r--Documentation/devicetree/bindings/regulator/adi,max77857.yaml86
-rw-r--r--Documentation/devicetree/bindings/regulator/awinic,aw37503.yaml78
-rw-r--r--Documentation/devicetree/bindings/regulator/dlg,da9121.yaml22
-rw-r--r--Documentation/devicetree/bindings/regulator/dlg,slg51000.yaml132
-rw-r--r--Documentation/devicetree/bindings/regulator/mps,mp5416.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml3
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.yaml4
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,rpm-regulator.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml26
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,sdm845-refgen-regulator.yaml57
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rt4831-regulator.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rt5739.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rtmv20-regulator.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml197
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml1
-rw-r--r--Documentation/devicetree/bindings/regulator/slg51000.txt88
-rw-r--r--Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/wlf,arizona.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml313
-rw-r--r--Documentation/devicetree/bindings/spi/brcm,bcm63xx-spi.yaml71
-rw-r--r--Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml12
-rw-r--r--Documentation/devicetree/bindings/spi/loongson,ls2k-spi.yaml46
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt61
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.yaml100
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt37
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.yaml81
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt37
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.yaml90
-rw-r--r--Documentation/devicetree/bindings/spi/spi-bcm63xx.txt33
-rw-r--r--Documentation/devicetree/bindings/spi/spi-cadence.yaml6
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml3
-rw-r--r--Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml3
-rw-r--r--Documentation/devicetree/bindings/spi/spi-pl022.yaml1
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml6
-rw-r--r--Documentation/driver-api/s390-drivers.rst4
-rw-r--r--Documentation/filesystems/fscrypt.rst164
-rw-r--r--Documentation/filesystems/idmappings.rst14
-rw-r--r--Documentation/filesystems/locking.rst23
-rw-r--r--Documentation/filesystems/tmpfs.rst38
-rw-r--r--Documentation/filesystems/vfs.rst12
-rw-r--r--Documentation/firmware-guide/acpi/chromeos-acpi-device.rst5
-rw-r--r--Documentation/hwmon/hs3001.rst37
-rw-r--r--Documentation/hwmon/index.rst2
-rw-r--r--Documentation/hwmon/nct6775.rst11
-rw-r--r--Documentation/hwmon/pmbus.rst2
-rw-r--r--Documentation/hwmon/smm665.rst187
-rw-r--r--Documentation/netlink/genetlink-c.yaml2
-rw-r--r--Documentation/netlink/genetlink-legacy.yaml2
-rw-r--r--Documentation/netlink/netlink-raw.yaml410
-rw-r--r--Documentation/netlink/specs/rt_addr.yaml179
-rw-r--r--Documentation/netlink/specs/rt_link.yaml1432
-rw-r--r--Documentation/netlink/specs/rt_route.yaml327
-rw-r--r--Documentation/networking/device_drivers/ethernet/mellanox/mlx5/switchdev.rst20
-rw-r--r--Documentation/networking/devlink/devlink-port.rst55
-rw-r--r--Documentation/process/changes.rst4
-rw-r--r--Documentation/rust/quick-start.rst42
-rw-r--r--Documentation/scheduler/sched-design-CFS.rst2
-rw-r--r--Documentation/userspace-api/netlink/genetlink-legacy.rst26
-rw-r--r--Documentation/userspace-api/netlink/index.rst1
-rw-r--r--Documentation/userspace-api/netlink/netlink-raw.rst58
-rw-r--r--Documentation/userspace-api/netlink/specs.rst13
-rw-r--r--MAINTAINERS120
-rw-r--r--Makefile18
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/arm/include/asm/syscall.h3
-rw-r--r--arch/arm/kernel/entry-common.S1
-rw-r--r--arch/arm/kernel/hw_breakpoint.c8
-rw-r--r--arch/arm/kernel/ptrace.c5
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm64/Kconfig6
-rw-r--r--arch/arm64/crypto/Makefile5
-rw-r--r--arch/arm64/crypto/aes-glue-ce.c2
-rw-r--r--arch/arm64/crypto/aes-glue-neon.c1
-rw-r--r--arch/arm64/include/asm/acpi.h3
-rw-r--r--arch/arm64/include/asm/efi.h18
-rw-r--r--arch/arm64/include/asm/hwcap.h1
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h27
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h8
-rw-r--r--arch/arm64/include/asm/sdei.h6
-rw-r--r--arch/arm64/include/asm/sysreg.h6
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/kernel/cpufeature.c9
-rw-r--r--arch/arm64/kernel/cpuidle.c2
-rw-r--r--arch/arm64/kernel/cpuinfo.c1
-rw-r--r--arch/arm64/kernel/efi.c16
-rw-r--r--arch/arm64/kernel/entry-common.c32
-rw-r--r--arch/arm64/kernel/entry.S27
-rw-r--r--arch/arm64/kernel/fpsimd.c22
-rw-r--r--arch/arm64/kernel/head.S4
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c4
-rw-r--r--arch/arm64/kernel/idreg-override.c6
-rw-r--r--arch/arm64/kernel/pci.c2
-rw-r--r--arch/arm64/kernel/ptrace.c3
-rw-r--r--arch/arm64/kernel/sdei.c3
-rw-r--r--arch/arm64/kernel/smp.c8
-rw-r--r--arch/arm64/kernel/syscall.c33
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/Makefile2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/list_debug.c8
-rw-r--r--arch/arm64/mm/init.c27
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/ia64/include/asm/acpi.h6
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/loongarch/Kconfig2
-rw-r--r--arch/loongarch/Makefile2
-rw-r--r--arch/loongarch/include/asm/Kbuild1
-rw-r--r--arch/loongarch/include/asm/fpu.h22
-rw-r--r--arch/loongarch/include/asm/local.h4
-rw-r--r--arch/loongarch/include/asm/ptrace.h2
-rw-r--r--arch/loongarch/include/asm/smp.h2
-rw-r--r--arch/loongarch/kernel/fpu.S2
-rw-r--r--arch/loongarch/kernel/hw_breakpoint.c3
-rw-r--r--arch/loongarch/kernel/mcount.S2
-rw-r--r--arch/loongarch/kernel/mcount_dyn.S1
-rw-r--r--arch/loongarch/kernel/process.c7
-rw-r--r--arch/loongarch/kernel/ptrace.c4
-rw-r--r--arch/loongarch/kernel/smp.c2
-rw-r--r--arch/loongarch/kernel/traps.c14
-rw-r--r--arch/loongarch/lib/clear_user.S2
-rw-r--r--arch/loongarch/lib/copy_user.S2
-rw-r--r--arch/loongarch/lib/memcpy.S2
-rw-r--r--arch/loongarch/lib/memmove.S2
-rw-r--r--arch/loongarch/lib/memset.S2
-rw-r--r--arch/loongarch/lib/unaligned.S1
-rw-r--r--arch/loongarch/mm/page.S2
-rw-r--r--arch/loongarch/mm/tlbex.S1
-rw-r--r--arch/m68k/configs/amiga_defconfig2
-rw-r--r--arch/m68k/configs/apollo_defconfig2
-rw-r--r--arch/m68k/configs/atari_defconfig2
-rw-r--r--arch/m68k/configs/bvme6000_defconfig2
-rw-r--r--arch/m68k/configs/hp300_defconfig2
-rw-r--r--arch/m68k/configs/mac_defconfig2
-rw-r--r--arch/m68k/configs/multi_defconfig2
-rw-r--r--arch/m68k/configs/mvme147_defconfig2
-rw-r--r--arch/m68k/configs/mvme16x_defconfig2
-rw-r--r--arch/m68k/configs/q40_defconfig2
-rw-r--r--arch/m68k/configs/sun3_defconfig2
-rw-r--r--arch/m68k/configs/sun3x_defconfig2
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/div64.h3
-rw-r--r--arch/m68k/include/asm/string.h1
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/m68k/lib/divsi3.S2
-rw-r--r--arch/m68k/lib/modsi3.S2
-rw-r--r--arch/m68k/lib/mulsi3.S2
-rw-r--r--arch/m68k/lib/udivsi3.S2
-rw-r--r--arch/m68k/lib/umodsi3.S2
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/mips/include/asm/local.h4
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl1
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl1
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl1
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/powerpc/crypto/Kconfig26
-rw-r--r--arch/powerpc/crypto/Makefile4
-rw-r--r--arch/powerpc/crypto/chacha-p10-glue.c221
-rw-r--r--arch/powerpc/crypto/chacha-p10le-8x.S842
-rw-r--r--arch/powerpc/crypto/poly1305-p10-glue.c186
-rw-r--r--arch/powerpc/crypto/poly1305-p10le_64.S1075
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/powerpc/mm/book3s64/subpage_prot.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/riscv/Kconfig8
-rw-r--r--arch/riscv/include/asm/efi.h10
-rw-r--r--arch/riscv/include/asm/vector.h3
-rw-r--r--arch/riscv/include/uapi/asm/ptrace.h1
-rw-r--r--arch/riscv/kernel/ptrace.c69
-rw-r--r--arch/riscv/mm/pageattr.c1
-rw-r--r--arch/s390/Kbuild2
-rw-r--r--arch/s390/Kconfig31
-rw-r--r--arch/s390/Makefile1
-rw-r--r--arch/s390/boot/startup.c22
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/configs/defconfig1
-rw-r--r--arch/s390/crypto/paes_s390.c2
-rw-r--r--arch/s390/hypfs/Makefile11
-rw-r--r--arch/s390/hypfs/hypfs.h10
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c31
-rw-r--r--arch/s390/hypfs/hypfs_diag.c453
-rw-r--r--arch/s390/hypfs/hypfs_diag.h35
-rw-r--r--arch/s390/hypfs/hypfs_diag_fs.c393
-rw-r--r--arch/s390/hypfs/hypfs_vm.c175
-rw-r--r--arch/s390/hypfs/hypfs_vm.h50
-rw-r--r--arch/s390/hypfs/hypfs_vm_fs.c139
-rw-r--r--arch/s390/hypfs/inode.c39
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/debug.h4
-rw-r--r--arch/s390/include/asm/diag.h3
-rw-r--r--arch/s390/include/asm/ftrace.h17
-rw-r--r--arch/s390/include/asm/kfence.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/asm/maccess.h3
-rw-r--r--arch/s390/include/asm/page.h12
-rw-r--r--arch/s390/include/asm/pfault.h26
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/setup.h9
-rw-r--r--arch/s390/include/asm/uv.h6
-rw-r--r--arch/s390/include/uapi/asm/pkey.h2
-rw-r--r--arch/s390/kernel/Makefile7
-rw-r--r--arch/s390/kernel/asm-offsets.c9
-rw-r--r--arch/s390/kernel/cert_store.c811
-rw-r--r--arch/s390/kernel/diag.c25
-rw-r--r--arch/s390/kernel/ebcdic.c2
-rw-r--r--arch/s390/kernel/entry.S2
-rw-r--r--arch/s390/kernel/ipl.c32
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/machine_kexec_file.c4
-rw-r--r--arch/s390/kernel/mcount.S65
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/smp.c16
-rw-r--r--arch/s390/kernel/sthyi.c4
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/s390/kernel/uv.c3
-rw-r--r--arch/s390/kvm/kvm-s390.h12
-rw-r--r--arch/s390/kvm/pv.c14
-rw-r--r--arch/s390/lib/mem.S2
-rw-r--r--arch/s390/lib/tishift.S2
-rw-r--r--arch/s390/mm/Makefile1
-rw-r--r--arch/s390/mm/cmm.c2
-rw-r--r--arch/s390/mm/dump_pagetables.c2
-rw-r--r--arch/s390/mm/extmem.c9
-rw-r--r--arch/s390/mm/fault.c228
-rw-r--r--arch/s390/mm/gmap.c5
-rw-r--r--arch/s390/mm/maccess.c7
-rw-r--r--arch/s390/mm/pfault.c248
-rw-r--r--arch/s390/mm/vmem.c4
-rw-r--r--arch/s390/pci/pci_clp.c7
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/um/drivers/mconsole_kern.c4
-rw-r--r--arch/um/drivers/vector_user.c4
-rw-r--r--arch/um/include/shared/user.h1
-rw-r--r--arch/um/os-Linux/umid.c6
-rw-r--r--arch/x86/Kconfig38
-rw-r--r--arch/x86/boot/compressed/Makefile5
-rw-r--r--arch/x86/boot/compressed/efi_mixed.S107
-rw-r--r--arch/x86/boot/compressed/error.c2
-rw-r--r--arch/x86/boot/compressed/error.h2
-rw-r--r--arch/x86/boot/compressed/head_32.S32
-rw-r--r--arch/x86/boot/compressed/head_64.S280
-rw-r--r--arch/x86/boot/compressed/misc.c44
-rw-r--r--arch/x86/boot/compressed/misc.h2
-rw-r--r--arch/x86/boot/compressed/pgtable.h10
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c87
-rw-r--r--arch/x86/boot/compressed/sev.c114
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c22
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--arch/x86/events/amd/ibs.c186
-rw-r--r--arch/x86/events/core.c11
-rw-r--r--arch/x86/events/intel/core.c54
-rw-r--r--arch/x86/events/intel/cstate.c12
-rw-r--r--arch/x86/events/intel/ds.c9
-rw-r--r--arch/x86/events/intel/uncore.c2
-rw-r--r--arch/x86/events/intel/uncore_snbep.c2
-rw-r--r--arch/x86/events/msr.c10
-rw-r--r--arch/x86/events/perf_event.h2
-rw-r--r--arch/x86/events/rapl.c2
-rw-r--r--arch/x86/include/asm/acpi.h24
-rw-r--r--arch/x86/include/asm/boot.h8
-rw-r--r--arch/x86/include/asm/div64.h6
-rw-r--r--arch/x86/include/asm/efi.h7
-rw-r--r--arch/x86/include/asm/intel-family.h18
-rw-r--r--arch/x86/include/asm/local.h4
-rw-r--r--arch/x86/include/asm/mem_encrypt.h6
-rw-r--r--arch/x86/include/asm/microcode.h155
-rw-r--r--arch/x86/include/asm/microcode_amd.h60
-rw-r--r--arch/x86/include/asm/microcode_intel.h88
-rw-r--r--arch/x86/include/asm/paravirt.h7
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/qspinlock.h7
-rw-r--r--arch/x86/include/asm/qspinlock_paravirt.h2
-rw-r--r--arch/x86/include/asm/sev.h6
-rw-r--r--arch/x86/include/asm/topology.h4
-rw-r--r--arch/x86/include/asm/uv/bios.h4
-rw-r--r--arch/x86/include/asm/xen/page.h5
-rw-r--r--arch/x86/kernel/alternative.c1
-rw-r--r--arch/x86/kernel/amd_nb.c8
-rw-r--r--arch/x86/kernel/apic/ipi.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c5
-rw-r--r--arch/x86/kernel/cpu/common.c6
-rw-r--r--arch/x86/kernel/cpu/intel.c176
-rw-r--r--arch/x86/kernel/cpu/intel_epb.c2
-rw-r--r--arch/x86/kernel/cpu/mce/core.c35
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c19
-rw-r--r--arch/x86/kernel/cpu/mce/internal.h6
-rw-r--r--arch/x86/kernel/cpu/microcode/Makefile4
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c133
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c17
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c304
-rw-r--r--arch/x86/kernel/cpu/microcode/internal.h131
-rw-r--r--arch/x86/kernel/fpu/context.h3
-rw-r--r--arch/x86/kernel/fpu/core.c2
-rw-r--r--arch/x86/kernel/fpu/xstate.c7
-rw-r--r--arch/x86/kernel/head_64.S32
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/kvm.c4
-rw-r--r--arch/x86/kernel/paravirt.c11
-rw-r--r--arch/x86/kernel/sev.c6
-rw-r--r--arch/x86/kernel/smpboot.c19
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c13
-rw-r--r--arch/x86/platform/efi/memmap.c2
-rw-r--r--arch/x86/platform/uv/uv_nmi.c12
-rw-r--r--arch/x86/purgatory/purgatory.c1
-rw-r--r--arch/x86/xen/enlighten_pv.c10
-rw-r--r--arch/x86/xen/mmu_pv.c18
-rw-r--r--arch/x86/xen/setup.c4
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl1
-rw-r--r--block/bdev.c69
-rw-r--r--block/disk-events.c23
-rw-r--r--block/genhd.c45
-rw-r--r--block/ioctl.c9
-rw-r--r--block/partitions/core.c5
-rw-r--r--certs/system_keyring.c91
-rw-r--r--crypto/af_alg.c4
-rw-r--r--crypto/algapi.c16
-rw-r--r--crypto/asymmetric_keys/public_key.c8
-rw-r--r--crypto/asymmetric_keys/restrict.c44
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c2
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c5
-rw-r--r--crypto/crypto_engine.c244
-rw-r--r--crypto/jitterentropy.c10
-rw-r--r--crypto/lrw.c6
-rw-r--r--crypto/sig.c5
-rw-r--r--crypto/xts.c6
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/ac.c27
-rw-r--r--drivers/acpi/acpi_cmos_rtc.c25
-rw-r--r--drivers/acpi/acpi_extlog.c2
-rw-r--r--drivers/acpi/acpi_processor.c124
-rw-r--r--drivers/acpi/acpi_tad.c27
-rw-r--r--drivers/acpi/acpi_video.c26
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h1
-rw-r--r--drivers/acpi/acpica/aclocal.h38
-rw-r--r--drivers/acpi/acpica/acpredef.h3
-rw-r--r--drivers/acpi/acpica/dbcmds.c58
-rw-r--r--drivers/acpi/acpica/dbinput.c8
-rw-r--r--drivers/acpi/acpica/dswstate.c4
-rw-r--r--drivers/acpi/acpica/exserial.c3
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c5
-rw-r--r--drivers/acpi/arm64/Makefile1
-rw-r--r--drivers/acpi/arm64/amba.c (renamed from drivers/acpi/acpi_amba.c)2
-rw-r--r--drivers/acpi/arm64/init.c2
-rw-r--r--drivers/acpi/arm64/init.h1
-rw-r--r--drivers/acpi/arm64/iort.c5
-rw-r--r--drivers/acpi/battery.c24
-rw-r--r--drivers/acpi/bus.c33
-rw-r--r--drivers/acpi/hed.c15
-rw-r--r--drivers/acpi/internal.h16
-rw-r--r--drivers/acpi/nfit/core.c42
-rw-r--r--drivers/acpi/prmt.c8
-rw-r--r--drivers/acpi/processor_core.c29
-rw-r--r--drivers/acpi/processor_pdc.c97
-rw-r--r--drivers/acpi/scan.c4
-rw-r--r--drivers/acpi/thermal.c470
-rw-r--r--drivers/acpi/video_detect.c27
-rw-r--r--drivers/acpi/x86/s2idle.c99
-rw-r--r--drivers/acpi/x86/utils.c35
-rw-r--r--drivers/android/binderfs.c8
-rw-r--r--drivers/auxdisplay/hd44780_common.c10
-rw-r--r--drivers/base/regmap/Kconfig12
-rw-r--r--drivers/base/regmap/regcache-maple.c16
-rw-r--r--drivers/base/regmap/regcache-rbtree.c12
-rw-r--r--drivers/base/regmap/regcache.c38
-rw-r--r--drivers/base/regmap/regmap-kunit.c40
-rw-r--r--drivers/base/regmap/regmap-mmio.c24
-rw-r--r--drivers/base/regmap/regmap.c122
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/nbd.c8
-rw-r--r--drivers/char/hw_random/Kconfig37
-rw-r--r--drivers/char/hw_random/amd-rng.c1
-rw-r--r--drivers/char/hw_random/arm_smccc_trng.c2
-rw-r--r--drivers/char/hw_random/atmel-rng.c2
-rw-r--r--drivers/char/hw_random/ba431-rng.c10
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c3
-rw-r--r--drivers/char/hw_random/cctrng.c93
-rw-r--r--drivers/char/hw_random/cn10k-rng.c18
-rw-r--r--drivers/char/hw_random/core.c3
-rw-r--r--drivers/char/hw_random/exynos-trng.c8
-rw-r--r--drivers/char/hw_random/imx-rngc.c24
-rw-r--r--drivers/char/hw_random/ingenic-rng.c2
-rw-r--r--drivers/char/hw_random/ingenic-trng.c59
-rw-r--r--drivers/char/hw_random/iproc-rng200.c28
-rw-r--r--drivers/char/hw_random/nomadik-rng.c19
-rw-r--r--drivers/char/hw_random/npcm-rng.c3
-rw-r--r--drivers/char/hw_random/omap-rng.c2
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c1
-rw-r--r--drivers/char/hw_random/pasemi-rng.c3
-rw-r--r--drivers/char/hw_random/pic32-rng.c76
-rw-r--r--drivers/char/hw_random/stm32-rng.c3
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c25
-rw-r--r--drivers/char/hw_random/xgene-rng.c6
-rw-r--r--drivers/char/hw_random/xiphera-trng.c1
-rw-r--r--drivers/char/tpm/eventlog/tpm1.c3
-rw-r--r--drivers/char/tpm/tpm_tis.c93
-rw-r--r--drivers/char/tpm/tpm_tis_core.c60
-rw-r--r--drivers/char/tpm/tpm_tis_core.h1
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c91
-rw-r--r--drivers/char/tpm/tpm_tis_synquacer.c18
-rw-r--r--drivers/clk/clk-devres.c13
-rw-r--r--drivers/clk/keystone/syscon-clk.c6
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c6
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c50
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c12
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c13
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c7
-rw-r--r--drivers/cpufreq/cpufreq-dt.c5
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_stats.c3
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c6
-rw-r--r--drivers/cpufreq/imx-cpufreq-dt.c6
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c6
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c9
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c6
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c6
-rw-r--r--drivers/cpufreq/powernow-k8.c3
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c2
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c1
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c8
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c7
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c6
-rw-r--r--drivers/cpufreq/raspberrypi-cpufreq.c6
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c7
-rw-r--r--drivers/cpufreq/sti-cpufreq.c2
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c6
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c6
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c35
-rw-r--r--drivers/cpufreq/ti-cpufreq.c2
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c6
-rw-r--r--drivers/cpuidle/governors/gov.h14
-rw-r--r--drivers/cpuidle/governors/menu.c65
-rw-r--r--drivers/cpuidle/governors/teo.c235
-rw-r--r--drivers/crypto/Kconfig7
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c41
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c172
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c98
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h16
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c27
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c155
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c114
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h19
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c13
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c61
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl.h5
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c40
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c134
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c285
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c9
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h30
-rw-r--r--drivers/crypto/atmel-aes.c21
-rw-r--r--drivers/crypto/atmel-ecc.c2
-rw-r--r--drivers/crypto/atmel-sha.c28
-rw-r--r--drivers/crypto/atmel-tdes.c20
-rw-r--r--drivers/crypto/bcm/cipher.c6
-rw-r--r--drivers/crypto/caam/caamalg.c386
-rw-r--r--drivers/crypto/caam/caamhash.c47
-rw-r--r--drivers/crypto/caam/caampkc.c25
-rw-r--r--drivers/crypto/caam/caampkc.h3
-rw-r--r--drivers/crypto/caam/ctrl.c107
-rw-r--r--drivers/crypto/caam/intern.h25
-rw-r--r--drivers/crypto/caam/jr.c206
-rw-r--r--drivers/crypto/caam/qi.c1
-rw-r--r--drivers/crypto/caam/regs.h9
-rw-r--r--drivers/crypto/ccp/Makefile3
-rw-r--r--drivers/crypto/ccp/dbc.c250
-rw-r--r--drivers/crypto/ccp/dbc.h56
-rw-r--r--drivers/crypto/ccp/psp-dev.c19
-rw-r--r--drivers/crypto/ccp/psp-dev.h1
-rw-r--r--drivers/crypto/ccp/sp-dev.h7
-rw-r--r--drivers/crypto/ccp/sp-pci.c96
-rw-r--r--drivers/crypto/ccree/cc_driver.c1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c3
-rw-r--r--drivers/crypto/chelsio/chcr_core.h1
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h1
-rw-r--r--drivers/crypto/exynos-rng.c4
-rw-r--r--drivers/crypto/gemini/sl3516-ce-cipher.c22
-rw-r--r--drivers/crypto/gemini/sl3516-ce-core.c50
-rw-r--r--drivers/crypto/gemini/sl3516-ce.h8
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c10
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c5
-rw-r--r--drivers/crypto/hisilicon/qm.c28
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c4
-rw-r--r--drivers/crypto/img-hash.c4
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-aes-core.c441
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-ecc.c73
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c243
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c23
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h4
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c3
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c28
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h7
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c28
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h7
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h14
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c61
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_clock.c131
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_clock.h14
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h14
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_counters.c264
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_counters.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_config.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c70
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.c336
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.h79
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c194
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c28
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h23
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c7
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c13
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h5
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/omap-aes-gcm.c37
-rw-r--r--drivers/crypto/omap-aes.c266
-rw-r--r--drivers/crypto/omap-aes.h15
-rw-r--r--drivers/crypto/omap-des.c214
-rw-r--r--drivers/crypto/omap-sham.c281
-rw-r--r--drivers/crypto/qcom-rng.c10
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c59
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h21
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c80
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c60
-rw-r--r--drivers/crypto/s5p-sss.c1
-rw-r--r--drivers/crypto/sa2ul.c3
-rw-r--r--drivers/crypto/sahara.c1
-rw-r--r--drivers/crypto/starfive/Kconfig2
-rw-r--r--drivers/crypto/starfive/Makefile2
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c1024
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c53
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.h74
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c290
-rw-r--r--drivers/crypto/stm32/Kconfig2
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c370
-rw-r--r--drivers/crypto/stm32/stm32-hash.c978
-rw-r--r--drivers/crypto/talitos.c4
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c35
-rw-r--r--drivers/crypto/virtio/virtio_crypto_skcipher_algs.c25
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c39
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c1
-rw-r--r--drivers/devfreq/devfreq.c10
-rw-r--r--drivers/devfreq/imx-bus.c2
-rw-r--r--drivers/devfreq/imx8m-ddrc.c2
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c1
-rw-r--r--drivers/devfreq/tegra30-devfreq.c2
-rw-r--r--drivers/dma-buf/sw_sync.c18
-rw-r--r--drivers/edac/amd64_edac.c15
-rw-r--r--drivers/edac/i10nm_base.c2
-rw-r--r--drivers/eisa/eisa-bus.c2
-rw-r--r--drivers/firmware/arm_sdei.c19
-rw-r--r--drivers/firmware/efi/libstub/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c2
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c2
-rw-r--r--drivers/firmware/efi/libstub/efistub.h3
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c10
-rw-r--r--drivers/firmware/efi/libstub/x86-5lvl.c95
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c283
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.h17
-rw-r--r--drivers/firmware/efi/libstub/zboot.c2
-rw-r--r--drivers/firmware/efi/riscv-runtime.c15
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c358
-rw-r--r--drivers/gpio/Kconfig31
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c127
-rw-r--r--drivers/gpio/gpio-104-idio-16.c286
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c2
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c3
-rw-r--r--drivers/gpio/gpio-ath79.c3
-rw-r--r--drivers/gpio/gpio-bcm-kona.c24
-rw-r--r--drivers/gpio/gpio-brcmstb.c4
-rw-r--r--drivers/gpio/gpio-clps711x.c4
-rw-r--r--drivers/gpio/gpio-creg-snps.c2
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-ds4520.c80
-rw-r--r--drivers/gpio/gpio-eic-sprd.c3
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-ftgpio010.c4
-rw-r--r--drivers/gpio/gpio-ge.c66
-rw-r--r--drivers/gpio/gpio-grgpio.c2
-rw-r--r--drivers/gpio/gpio-hlwd.c2
-rw-r--r--drivers/gpio/gpio-idio-16.c254
-rw-r--r--drivers/gpio/gpio-idio-16.h79
-rw-r--r--drivers/gpio/gpio-imx-scu.c3
-rw-r--r--drivers/gpio/gpio-ixp4xx.c2
-rw-r--r--drivers/gpio/gpio-logicvc.c3
-rw-r--r--drivers/gpio/gpio-lp3943.c2
-rw-r--r--drivers/gpio/gpio-lpc32xx.c4
-rw-r--r--drivers/gpio/gpio-max3191x.c4
-rw-r--r--drivers/gpio/gpio-max732x.c6
-rw-r--r--drivers/gpio/gpio-max77620.c2
-rw-r--r--drivers/gpio/gpio-mb86s7x.c2
-rw-r--r--drivers/gpio/gpio-mlxbf3.c26
-rw-r--r--drivers/gpio/gpio-mmio.c9
-rw-r--r--drivers/gpio/gpio-mpc5200.c2
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c4
-rw-r--r--drivers/gpio/gpio-msc313.c1
-rw-r--r--drivers/gpio/gpio-mxc.c118
-rw-r--r--drivers/gpio/gpio-mxs.c3
-rw-r--r--drivers/gpio/gpio-omap.c8
-rw-r--r--drivers/gpio/gpio-palmas.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c20
-rw-r--r--drivers/gpio/gpio-pca9570.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c29
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c294
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c677
-rw-r--r--drivers/gpio/gpio-pisosr.c28
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c3
-rw-r--r--drivers/gpio/gpio-pxa.c1
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c4
-rw-r--r--drivers/gpio/gpio-rc5t583.c2
-rw-r--r--drivers/gpio/gpio-rcar.c1
-rw-r--r--drivers/gpio/gpio-rockchip.c2
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c1
-rw-r--r--drivers/gpio/gpio-sch.c2
-rw-r--r--drivers/gpio/gpio-sifive.c48
-rw-r--r--drivers/gpio/gpio-sim.c300
-rw-r--r--drivers/gpio/gpio-sprd.c2
-rw-r--r--drivers/gpio/gpio-stp-xway.c3
-rw-r--r--drivers/gpio/gpio-syscon.c3
-rw-r--r--drivers/gpio/gpio-tegra.c2
-rw-r--r--drivers/gpio/gpio-tegra186.c2
-rw-r--r--drivers/gpio/gpio-timberdale.c2
-rw-r--r--drivers/gpio/gpio-tps65218.c2
-rw-r--r--drivers/gpio/gpio-tps6586x.c2
-rw-r--r--drivers/gpio/gpio-tps65910.c2
-rw-r--r--drivers/gpio/gpio-tqmx86.c2
-rw-r--r--drivers/gpio/gpio-ts4800.c3
-rw-r--r--drivers/gpio/gpio-ts4900.c2
-rw-r--r--drivers/gpio/gpio-uniphier.c1
-rw-r--r--drivers/gpio/gpio-vf610.c4
-rw-r--r--drivers/gpio/gpio-vx855.c2
-rw-r--r--drivers/gpio/gpio-wcd934x.c3
-rw-r--r--drivers/gpio/gpio-ws16c48.c552
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpio-xra1403.c6
-rw-r--r--drivers/gpio/gpio-zevio.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpio/gpiolib-cdev.c107
-rw-r--r--drivers/gpio/gpiolib-of.c26
-rw-r--r--drivers/gpio/gpiolib.c203
-rw-r--r--drivers/gpio/gpiolib.h20
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c27
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c33
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c3
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c1
-rw-r--r--drivers/hwmon/Kconfig24
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/ad7418.c4
-rw-r--r--drivers/hwmon/ads7828.c5
-rw-r--r--drivers/hwmon/adt7475.c3
-rw-r--r--drivers/hwmon/as370-hwmon.c3
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c3
-rw-r--r--drivers/hwmon/asus-ec-sensors.c2
-rw-r--r--drivers/hwmon/bt1-pvt.c9
-rw-r--r--drivers/hwmon/g762.c1
-rw-r--r--drivers/hwmon/gxp-fan-ctrl.c2
-rw-r--r--drivers/hwmon/hp-wmi-sensors.c22
-rw-r--r--drivers/hwmon/hs3001.c242
-rw-r--r--drivers/hwmon/ina2xx.c3
-rw-r--r--drivers/hwmon/it87.c74
-rw-r--r--drivers/hwmon/k10temp.c8
-rw-r--r--drivers/hwmon/lan966x-hwmon.c24
-rw-r--r--drivers/hwmon/lm63.c4
-rw-r--r--drivers/hwmon/lm75.c3
-rw-r--r--drivers/hwmon/lm85.c4
-rw-r--r--drivers/hwmon/lm90.c4
-rw-r--r--drivers/hwmon/lochnagar-hwmon.c1
-rw-r--r--drivers/hwmon/ltq-cputemp.c3
-rw-r--r--drivers/hwmon/max31730.c1
-rw-r--r--drivers/hwmon/max6621.c2
-rw-r--r--drivers/hwmon/max6639.c6
-rw-r--r--drivers/hwmon/max6697.c3
-rw-r--r--drivers/hwmon/mcp3021.c1
-rw-r--r--drivers/hwmon/mlxreg-fan.c12
-rw-r--r--drivers/hwmon/nct6775-core.c361
-rw-r--r--drivers/hwmon/nct6775-platform.c2
-rw-r--r--drivers/hwmon/nct6775.h30
-rw-r--r--drivers/hwmon/nsa320-hwmon.c4
-rw-r--r--drivers/hwmon/oxp-sensors.c29
-rw-r--r--drivers/hwmon/peci/dimmtemp.c26
-rw-r--r--drivers/hwmon/pmbus/Kconfig7
-rw-r--r--drivers/hwmon/pmbus/acbel-fsg032.c38
-rw-r--r--drivers/hwmon/pmbus/dps920ab.c2
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c4
-rw-r--r--drivers/hwmon/pmbus/ir38064.c4
-rw-r--r--drivers/hwmon/pmbus/max20730.c68
-rw-r--r--drivers/hwmon/pmbus/mp2975.c429
-rw-r--r--drivers/hwmon/pmbus/mp5023.c2
-rw-r--r--drivers/hwmon/pmbus/mpq7932.c2
-rw-r--r--drivers/hwmon/pmbus/pli1209bc.c26
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c4
-rw-r--r--drivers/hwmon/pmbus/q54sj108a2.c2
-rw-r--r--drivers/hwmon/pmbus/tps53679.c4
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c4
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c4
-rw-r--r--drivers/hwmon/sbtsi_temp.c1
-rw-r--r--drivers/hwmon/scpi-hwmon.c2
-rw-r--r--drivers/hwmon/sht3x.c387
-rw-r--r--drivers/hwmon/sis5595.c35
-rw-r--r--drivers/hwmon/smm665.c706
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/hwmon/tmp464.c2
-rw-r--r--drivers/hwmon/tmp513.c7
-rw-r--r--drivers/hwmon/tps23861.c2
-rw-r--r--drivers/hwmon/ultra45_env.c3
-rw-r--r--drivers/hwmon/vexpress-hwmon.c1
-rw-r--r--drivers/hwmon/via686a.c18
-rw-r--r--drivers/hwmon/vt8231.c11
-rw-r--r--drivers/hwmon/w83773g.c2
-rw-r--r--drivers/idle/intel_idle.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c3
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c1
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c1
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c1
-rw-r--r--drivers/irqchip/irq-gic-pm.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c2
-rw-r--r--drivers/irqchip/irq-i8259.c2
-rw-r--r--drivers/irqchip/irq-imx-intmux.c3
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c3
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c4
-rw-r--r--drivers/irqchip/irq-keystone.c2
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c2
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c1
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c2
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c3
-rw-r--r--drivers/irqchip/irq-madera.c4
-rw-r--r--drivers/irqchip/irq-meson-gpio.c5
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c3
-rw-r--r--drivers/irqchip/irq-orion.c3
-rw-r--r--drivers/irqchip/irq-pruss-intc.c6
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c2
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c1
-rw-r--r--drivers/irqchip/irq-st.c2
-rw-r--r--drivers/irqchip/irq-stm32-exti.c3
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c1
-rw-r--r--drivers/irqchip/irq-tb10x.c1
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c4
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c4
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c1
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c1
-rw-r--r--drivers/irqchip/irqchip.c2
-rw-r--r--drivers/irqchip/qcom-pdc.c1
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c2
-rw-r--r--drivers/mfd/Kconfig23
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/cs42l43-i2c.c98
-rw-r--r--drivers/mfd/cs42l43-sdw.c239
-rw-r--r--drivers/mfd/cs42l43.c1188
-rw-r--r--drivers/mfd/cs42l43.h28
-rw-r--r--drivers/mfd/tps65086.c17
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c2
-rw-r--r--drivers/misc/ibmvmc.c2
-rw-r--r--drivers/misc/lkdtm/bugs.c51
-rw-r--r--drivers/mmc/core/block.c11
-rw-r--r--drivers/mmc/core/bus.c3
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c6
-rw-r--r--drivers/mmc/core/pwrseq_sd8787.c6
-rw-r--r--drivers/mmc/core/pwrseq_simple.c6
-rw-r--r--drivers/mmc/core/sd.c14
-rw-r--r--drivers/mmc/core/sd_ops.c1
-rw-r--r--drivers/mmc/core/sd_ops.h2
-rw-r--r--drivers/mmc/host/alcor.c6
-rw-r--r--drivers/mmc/host/atmel-mci.c140
-rw-r--r--drivers/mmc/host/au1xmmc.c5
-rw-r--r--drivers/mmc/host/bcm2835.c6
-rw-r--r--drivers/mmc/host/cavium-octeon.c7
-rw-r--r--drivers/mmc/host/cavium-thunderx.c1
-rw-r--r--drivers/mmc/host/cb710-mmc.c5
-rw-r--r--drivers/mmc/host/davinci_mmc.c9
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c6
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c6
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c6
-rw-r--r--drivers/mmc/host/jz4740_mmc.c6
-rw-r--r--drivers/mmc/host/litex_mmc.c5
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c11
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c6
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c6
-rw-r--r--drivers/mmc/host/mmci.c39
-rw-r--r--drivers/mmc/host/moxart-mmc.c8
-rw-r--r--drivers/mmc/host/mtk-sd.c10
-rw-r--r--drivers/mmc/host/mvsdio.c6
-rw-r--r--drivers/mmc/host/mxcmmc.c10
-rw-r--r--drivers/mmc/host/mxs-mmc.c7
-rw-r--r--drivers/mmc/host/omap.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c11
-rw-r--r--drivers/mmc/host/owl-mmc.c9
-rw-r--r--drivers/mmc/host/pxamci.c12
-rw-r--r--drivers/mmc/host/renesas_sdhi.h2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c26
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c5
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c5
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c9
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c8
-rw-r--r--drivers/mmc/host/sdhci-acpi.c6
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c13
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c18
-rw-r--r--drivers/mmc/host/sdhci-cadence.c19
-rw-r--r--drivers/mmc/host/sdhci-dove.c8
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c15
-rw-r--r--drivers/mmc/host/sdhci-esdhc-mcf.c6
-rw-r--r--drivers/mmc/host/sdhci-iproc.c16
-rw-r--r--drivers/mmc/host/sdhci-milbeaut.c8
-rw-r--r--drivers/mmc/host/sdhci-msm.c7
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c14
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c15
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c10
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c93
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c2
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c4
-rw-r--r--drivers/mmc/host/sdhci-of-sparx5.c19
-rw-r--r--drivers/mmc/host/sdhci-omap.c7
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c4
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c211
-rw-r--r--drivers/mmc/host/sdhci-pci.h4
-rw-r--r--drivers/mmc/host/sdhci-pic32.c6
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c16
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h8
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c20
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c6
-rw-r--r--drivers/mmc/host/sdhci-s3c.c7
-rw-r--r--drivers/mmc/host/sdhci-spear.c10
-rw-r--r--drivers/mmc/host/sdhci-sprd.c156
-rw-r--r--drivers/mmc/host/sdhci-st.c15
-rw-r--r--drivers/mmc/host/sdhci-tegra.c7
-rw-r--r--drivers/mmc/host/sdhci-xenon.c6
-rw-r--r--drivers/mmc/host/sdhci_am654.c14
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c10
-rw-r--r--drivers/mmc/host/sh_mmcif.c7
-rw-r--r--drivers/mmc/host/sunplus-mmc.c9
-rw-r--r--drivers/mmc/host/sunxi-mmc.c6
-rw-r--r--drivers/mmc/host/uniphier-sd.c19
-rw-r--r--drivers/mmc/host/usdhi6rol0.c6
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/wbsd.c9
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c7
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c11
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c9
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c24
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c369
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c240
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c4
-rw-r--r--drivers/net/ethernet/sfc/mae.c89
-rw-r--r--drivers/net/ethernet/sfc/mae.h4
-rw-r--r--drivers/net/ethernet/sfc/ptp.c4
-rw-r--r--drivers/net/ethernet/sfc/tc.c476
-rw-r--r--drivers/net/ethernet/sfc/tc.h58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c6
-rw-r--r--drivers/net/ethernet/ti/Kconfig12
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c965
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.h41
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c7
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c21
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c451
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h28
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h1
-rw-r--r--drivers/opp/core.c117
-rw-r--r--drivers/opp/cpu.c4
-rw-r--r--drivers/perf/Kconfig2
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c27
-rw-r--r--drivers/perf/amlogic/meson_ddr_pmu_core.c2
-rw-r--r--drivers/perf/arm-cci.c5
-rw-r--r--drivers/perf/arm-cmn.c161
-rw-r--r--drivers/perf/arm_dmc620_pmu.c19
-rw-r--r--drivers/perf/arm_dsu_pmu.c2
-rw-r--r--drivers/perf/arm_pmu.c10
-rw-r--r--drivers/perf/arm_pmu_acpi.c137
-rw-r--r--drivers/perf/arm_pmu_platform.c1
-rw-r--r--drivers/perf/arm_pmuv3.c33
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c47
-rw-r--r--drivers/perf/arm_spe_pmu.c3
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c48
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c4
-rw-r--r--drivers/perf/hisilicon/hisi_pcie_pmu.c17
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c3
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c3
-rw-r--r--drivers/perf/xgene_pmu.c4
-rw-r--r--drivers/pinctrl/cirrus/Kconfig11
-rw-r--r--drivers/pinctrl/cirrus/Makefile2
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs42l43.c609
-rw-r--r--drivers/pinctrl/pinctrl-amd.c30
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c17
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c15
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c13
-rw-r--r--drivers/platform/chrome/chromeos_acpi.c33
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c4
-rw-r--r--drivers/platform/x86/intel/ifs/load.c7
-rw-r--r--drivers/platform/x86/intel/pmc/core.c2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c2
-rw-r--r--drivers/pnp/pnpacpi/core.c3
-rw-r--r--drivers/power/reset/Kconfig7
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/as3722-poweroff.c1
-rw-r--r--drivers/power/reset/at91-poweroff.c4
-rw-r--r--drivers/power/reset/at91-reset.c37
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c4
-rw-r--r--drivers/power/reset/brcm-kona-reset.c8
-rw-r--r--drivers/power/reset/gemini-poweroff.c4
-rw-r--r--drivers/power/reset/gpio-poweroff.c3
-rw-r--r--drivers/power/reset/gpio-restart.c2
-rw-r--r--drivers/power/reset/keystone-reset.c3
-rw-r--r--drivers/power/reset/msm-poweroff.c6
-rw-r--r--drivers/power/reset/ocelot-reset.c9
-rw-r--r--drivers/power/reset/odroid-go-ultra-poweroff.c3
-rw-r--r--drivers/power/reset/oxnas-restart.c233
-rw-r--r--drivers/power/reset/st-poweroff.c2
-rw-r--r--drivers/power/reset/syscon-poweroff.c3
-rw-r--r--drivers/power/reset/syscon-reboot.c3
-rw-r--r--drivers/power/reset/xgene-reboot.c2
-rw-r--r--drivers/power/supply/axp20x_ac_power.c1
-rw-r--r--drivers/power/supply/axp20x_battery.c1
-rw-r--r--drivers/power/supply/axp20x_usb_power.c1
-rw-r--r--drivers/power/supply/bd99954-charger.c2
-rw-r--r--drivers/power/supply/bq24190_charger.c17
-rw-r--r--drivers/power/supply/cpcap-battery.c2
-rw-r--r--drivers/power/supply/da9150-charger.c2
-rw-r--r--drivers/power/supply/da9150-fg.c1
-rw-r--r--drivers/power/supply/lego_ev3_battery.c2
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c2
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c2
-rw-r--r--drivers/power/supply/max14656_charger_detector.c2
-rw-r--r--drivers/power/supply/max17040_battery.c2
-rw-r--r--drivers/power/supply/max8903_charger.c1
-rw-r--r--drivers/power/supply/power_supply_core.c6
-rw-r--r--drivers/power/supply/qcom_pmi8998_charger.c13
-rw-r--r--drivers/power/supply/rn5t618_power.c1
-rw-r--r--drivers/power/supply/rt5033_charger.c2
-rw-r--r--drivers/power/supply/rt9455_charger.c3
-rw-r--r--drivers/power/supply/sbs-battery.c2
-rw-r--r--drivers/power/supply/tps65090-charger.c2
-rw-r--r--drivers/power/supply/tps65217_charger.c1
-rw-r--r--drivers/power/supply/twl4030_charger.c1
-rw-r--r--drivers/powercap/arm_scmi_powercap.c159
-rw-r--r--drivers/powercap/intel_rapl_common.c4
-rw-r--r--drivers/powercap/intel_rapl_msr.c2
-rw-r--r--drivers/regulator/Kconfig53
-rw-r--r--drivers/regulator/Makefile6
-rw-r--r--drivers/regulator/act8945a-regulator.c2
-rw-r--r--drivers/regulator/atc260x-regulator.c7
-rw-r--r--drivers/regulator/aw37503-regulator.c240
-rw-r--r--drivers/regulator/axp20x-regulator.c1
-rw-r--r--drivers/regulator/bd71815-regulator.c1
-rw-r--r--drivers/regulator/bd71828-regulator.c1
-rw-r--r--drivers/regulator/bd718x7-regulator.c22
-rw-r--r--drivers/regulator/cpcap-regulator.c2
-rw-r--r--drivers/regulator/da9062-regulator.c11
-rw-r--r--drivers/regulator/da9121-regulator.c18
-rw-r--r--drivers/regulator/fan53555.c2
-rw-r--r--drivers/regulator/fixed.c1
-rw-r--r--drivers/regulator/helpers.c15
-rw-r--r--drivers/regulator/hi6421-regulator.c20
-rw-r--r--drivers/regulator/lp872x.c2
-rw-r--r--drivers/regulator/lp8755.c1
-rw-r--r--drivers/regulator/lp87565-regulator.c4
-rw-r--r--drivers/regulator/ltc3589.c4
-rw-r--r--drivers/regulator/max20086-regulator.c1
-rw-r--r--drivers/regulator/max5970-regulator.c (renamed from drivers/regulator/max597x-regulator.c)28
-rw-r--r--drivers/regulator/max77541-regulator.c6
-rw-r--r--drivers/regulator/max77650-regulator.c2
-rw-r--r--drivers/regulator/max77826-regulator.c1
-rw-r--r--drivers/regulator/max77857-regulator.c461
-rw-r--r--drivers/regulator/max8893.c4
-rw-r--r--drivers/regulator/mcp16502.c7
-rw-r--r--drivers/regulator/mp5416.c2
-rw-r--r--drivers/regulator/mp886x.c2
-rw-r--r--drivers/regulator/mpq7920.c3
-rw-r--r--drivers/regulator/mt6311-regulator.c1
-rw-r--r--drivers/regulator/mt6315-regulator.c2
-rw-r--r--drivers/regulator/mt6359-regulator.c2
-rw-r--r--drivers/regulator/mtk-dvfsrc-regulator.c3
-rw-r--r--drivers/regulator/pbias-regulator.c1
-rw-r--r--drivers/regulator/pca9450-regulator.c1
-rw-r--r--drivers/regulator/pfuze100-regulator.c3
-rw-r--r--drivers/regulator/pwm-regulator.c2
-rw-r--r--drivers/regulator/qcom-refgen-regulator.c154
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c39
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c3
-rw-r--r--drivers/regulator/qcom_smd-regulator.c1
-rw-r--r--drivers/regulator/qcom_usb_vbus-regulator.c1
-rw-r--r--drivers/regulator/raa215300.c38
-rw-r--r--drivers/regulator/rc5t583-regulator.c1
-rw-r--r--drivers/regulator/rk808-regulator.c3
-rw-r--r--drivers/regulator/rohm-regulator.c2
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c3
-rw-r--r--drivers/regulator/rt5739.c36
-rw-r--r--drivers/regulator/rt5759-regulator.c2
-rw-r--r--drivers/regulator/rtq2208-regulator.c583
-rw-r--r--drivers/regulator/s2mpa01.c1
-rw-r--r--drivers/regulator/stm32-pwr.c3
-rw-r--r--drivers/regulator/stm32-vrefbuf.c2
-rw-r--r--drivers/regulator/sy8824x.c2
-rw-r--r--drivers/regulator/sy8827n.c2
-rw-r--r--drivers/regulator/tps6286x-regulator.c8
-rw-r--r--drivers/regulator/tps6287x-regulator.c6
-rw-r--r--drivers/regulator/tps65086-regulator.c188
-rw-r--r--drivers/regulator/tps65218-regulator.c2
-rw-r--r--drivers/regulator/tps65219-regulator.c2
-rw-r--r--drivers/regulator/tps65910-regulator.c1
-rw-r--r--drivers/regulator/tps6594-regulator.c2
-rw-r--r--drivers/regulator/twl-regulator.c1
-rw-r--r--drivers/regulator/twl6030-regulator.c1
-rw-r--r--drivers/regulator/uniphier-regulator.c2
-rw-r--r--drivers/regulator/vctrl-regulator.c2
-rw-r--r--drivers/regulator/vexpress-regulator.c3
-rw-r--r--drivers/s390/block/dasd.c7
-rw-r--r--drivers/s390/block/dcssblk.c26
-rw-r--r--drivers/s390/block/scm_blk.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c4
-rw-r--r--drivers/s390/char/sclp_early.c1
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/crypto/Makefile2
-rw-r--r--drivers/s390/crypto/ap_bus.c34
-rw-r--r--drivers/s390/crypto/ap_bus.h20
-rw-r--r--drivers/s390/crypto/ap_queue.c47
-rw-r--r--drivers/s390/crypto/pkey_api.c119
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c164
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h6
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c227
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h134
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c421
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.h18
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c347
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h24
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c64
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h3
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c14
-rw-r--r--drivers/scsi/raid_class.c48
-rw-r--r--drivers/scsi/snic/snic_disc.c3
-rw-r--r--drivers/soc/fsl/qe/qe.c4
-rw-r--r--drivers/soundwire/bus.c32
-rw-r--r--drivers/soundwire/bus_type.c12
-rw-r--r--drivers/spi/Kconfig33
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/spi-amd.c52
-rw-r--r--drivers/spi/spi-amlogic-spifc-a1.c88
-rw-r--r--drivers/spi/spi-ar934x.c3
-rw-r--r--drivers/spi/spi-armada-3700.c3
-rw-r--r--drivers/spi/spi-aspeed-smc.c14
-rw-r--r--drivers/spi/spi-at91-usart.c8
-rw-r--r--drivers/spi/spi-ath79.c2
-rw-r--r--drivers/spi/spi-atmel.c10
-rw-r--r--drivers/spi/spi-au1550.c74
-rw-r--r--drivers/spi/spi-axi-spi-engine.c62
-rw-r--r--drivers/spi/spi-bcm-qspi.c64
-rw-r--r--drivers/spi/spi-bcm2835.c130
-rw-r--r--drivers/spi/spi-bcm2835aux.c92
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c86
-rw-r--r--drivers/spi/spi-bcm63xx.c68
-rw-r--r--drivers/spi/spi-bcmbca-hsspi.c66
-rw-r--r--drivers/spi/spi-bitbang-txrx.h16
-rw-r--r--drivers/spi/spi-bitbang.c8
-rw-r--r--drivers/spi/spi-butterfly.c18
-rw-r--r--drivers/spi/spi-cadence-quadspi.c112
-rw-r--r--drivers/spi/spi-cadence-xspi.c31
-rw-r--r--drivers/spi/spi-cadence.c95
-rw-r--r--drivers/spi/spi-cavium-octeon.c32
-rw-r--r--drivers/spi/spi-cavium-thunderx.c32
-rw-r--r--drivers/spi/spi-clps711x.c42
-rw-r--r--drivers/spi/spi-coldfire-qspi.c77
-rw-r--r--drivers/spi/spi-cs42l43.c284
-rw-r--r--drivers/spi/spi-davinci.c90
-rw-r--r--drivers/spi/spi-dln2.c94
-rw-r--r--drivers/spi/spi-dw-core.c112
-rw-r--r--drivers/spi/spi-dw-dma.c22
-rw-r--r--drivers/spi/spi-dw-mmio.c10
-rw-r--r--drivers/spi/spi-dw.h4
-rw-r--r--drivers/spi/spi-ep93xx.c187
-rw-r--r--drivers/spi/spi-falcon.c34
-rw-r--r--drivers/spi/spi-fsi.c2
-rw-r--r--drivers/spi/spi-fsl-cpm.c74
-rw-r--r--drivers/spi/spi-fsl-dspi.c34
-rw-r--r--drivers/spi/spi-fsl-espi.c76
-rw-r--r--drivers/spi/spi-fsl-lib.c3
-rw-r--r--drivers/spi/spi-fsl-lib.h3
-rw-r--r--drivers/spi/spi-fsl-lpspi.c89
-rw-r--r--drivers/spi/spi-fsl-qspi.c11
-rw-r--r--drivers/spi/spi-fsl-spi.c80
-rw-r--r--drivers/spi/spi-geni-qcom.c53
-rw-r--r--drivers/spi/spi-gpio.c77
-rw-r--r--drivers/spi/spi-gxp.c7
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c84
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c18
-rw-r--r--drivers/spi/spi-img-spfi.c118
-rw-r--r--drivers/spi/spi-imx.c128
-rw-r--r--drivers/spi/spi-ingenic.c4
-rw-r--r--drivers/spi/spi-intel.c42
-rw-r--r--drivers/spi/spi-iproc-qspi.c9
-rw-r--r--drivers/spi/spi-jcore.c44
-rw-r--r--drivers/spi/spi-lantiq-ssc.c99
-rw-r--r--drivers/spi/spi-loongson-core.c279
-rw-r--r--drivers/spi/spi-loongson-pci.c55
-rw-r--r--drivers/spi/spi-loongson-plat.c47
-rw-r--r--drivers/spi/spi-loongson.h49
-rw-r--r--drivers/spi/spi-loopback-test.c2
-rw-r--r--drivers/spi/spi-lp8841-rtc.c11
-rw-r--r--drivers/spi/spi-meson-spicc.c3
-rw-r--r--drivers/spi/spi-microchip-core.c6
-rw-r--r--drivers/spi/spi-mpc512x-psc.c2
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c2
-rw-r--r--drivers/spi/spi-mt65xx.c2
-rw-r--r--drivers/spi/spi-mt7621.c2
-rw-r--r--drivers/spi/spi-mtk-nor.c3
-rw-r--r--drivers/spi/spi-mtk-snfi.c3
-rw-r--r--drivers/spi/spi-mxs.c2
-rw-r--r--drivers/spi/spi-npcm-fiu.c22
-rw-r--r--drivers/spi/spi-nxp-fspi.c7
-rw-r--r--drivers/spi/spi-omap-uwire.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c4
-rw-r--r--drivers/spi/spi-orion.c91
-rw-r--r--drivers/spi/spi-pci1xxxx.c16
-rw-r--r--drivers/spi/spi-pic32-sqi.c86
-rw-r--r--drivers/spi/spi-pic32.c154
-rw-r--r--drivers/spi/spi-pl022.c94
-rw-r--r--drivers/spi/spi-ppc4xx.c44
-rw-r--r--drivers/spi/spi-pxa2xx.c26
-rw-r--r--drivers/spi/spi-qcom-qspi.c82
-rw-r--r--drivers/spi/spi-qup.c167
-rw-r--r--drivers/spi/spi-rb4xx.c36
-rw-r--r--drivers/spi/spi-realtek-rtl.c2
-rw-r--r--drivers/spi/spi-rockchip-sfc.c30
-rw-r--r--drivers/spi/spi-rockchip.c60
-rw-r--r--drivers/spi/spi-rpc-if.c2
-rw-r--r--drivers/spi/spi-rspi.c7
-rw-r--r--drivers/spi/spi-rzv2m-csi.c122
-rw-r--r--drivers/spi/spi-s3c64xx.c138
-rw-r--r--drivers/spi/spi-sc18is602.c53
-rw-r--r--drivers/spi/spi-sh-hspi.c4
-rw-r--r--drivers/spi/spi-sh-msiof.c51
-rw-r--r--drivers/spi/spi-sh-sci.c24
-rw-r--r--drivers/spi/spi-sh.c34
-rw-r--r--drivers/spi/spi-sifive.c80
-rw-r--r--drivers/spi/spi-slave-mt27xx.c2
-rw-r--r--drivers/spi/spi-sn-f-ospi.c6
-rw-r--r--drivers/spi/spi-sprd-adi.c3
-rw-r--r--drivers/spi/spi-sprd.c1
-rw-r--r--drivers/spi/spi-stm32-qspi.c1
-rw-r--r--drivers/spi/spi-stm32.c24
-rw-r--r--drivers/spi/spi-sun6i.c32
-rw-r--r--drivers/spi/spi-tegra114.c19
-rw-r--r--drivers/spi/spi-tegra20-sflash.c6
-rw-r--r--drivers/spi/spi-tegra20-slink.c11
-rw-r--r--drivers/spi/spi-tegra210-quad.c1
-rw-r--r--drivers/spi/spi-ti-qspi.c3
-rw-r--r--drivers/spi/spi-wpcm-fiu.c3
-rw-r--r--drivers/spi/spi-xcomm.c2
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c2
-rw-r--r--drivers/spi/spi-zynq-qspi.c4
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c8
-rw-r--r--drivers/spi/spi.c219
-rw-r--r--drivers/thermal/amlogic_thermal.c2
-rw-r--r--drivers/thermal/broadcom/bcm2711_thermal.c2
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c2
-rw-r--r--drivers/thermal/hisi_thermal.c2
-rw-r--r--drivers/thermal/imx8mm_thermal.c1
-rw-r--r--drivers/thermal/imx_sc_thermal.c1
-rw-r--r--drivers/thermal/imx_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c44
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3401_thermal.c13
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c2
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c188
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.h15
-rw-r--r--drivers/thermal/intel/intel_soc_dts_thermal.c17
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c2
-rw-r--r--drivers/thermal/k3_bandgap.c2
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c2
-rw-r--r--drivers/thermal/mediatek/auxadc_thermal.c1
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c2
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c1
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c1
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c2
-rw-r--r--drivers/thermal/rcar_thermal.c2
-rw-r--r--drivers/thermal/rzg2l_thermal.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/thermal/sprd_thermal.c2
-rw-r--r--drivers/thermal/st/stm_thermal.c2
-rw-r--r--drivers/thermal/sun8i_thermal.c2
-rw-r--r--drivers/thermal/tegra/tegra30-tsensor.c2
-rw-r--r--drivers/thermal/thermal_core.c22
-rw-r--r--drivers/thermal/thermal_core.h4
-rw-r--r--drivers/thermal/thermal_of.c11
-rw-r--r--drivers/thermal/thermal_trip.c18
-rw-r--r--drivers/thermal/uniphier_thermal.c1
-rw-r--r--drivers/ufs/core/ufs-mcq.c6
-rw-r--r--drivers/ufs/host/ufs-qcom.c2
-rw-r--r--drivers/usb/core/devio.c16
-rw-r--r--drivers/usb/gadget/function/f_fs.c3
-rw-r--r--drivers/usb/gadget/legacy/inode.c3
-rw-r--r--drivers/xen/Kconfig7
-rw-r--r--drivers/xen/grant-table.c2
-rw-r--r--drivers/xen/privcmd.c282
-rw-r--r--drivers/xen/xen-acpi-processor.c7
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.h2
-rw-r--r--drivers/xen/xen-pciback/pciback.h3
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c4
-rw-r--r--drivers/zorro/names.c1
-rw-r--r--fs/9p/vfs_inode.c8
-rw-r--r--fs/9p/vfs_inode_dotl.c12
-rw-r--r--fs/Kconfig16
-rw-r--r--fs/adfs/inode.c4
-rw-r--r--fs/affs/amigaffs.c6
-rw-r--r--fs/affs/file.c14
-rw-r--r--fs/affs/inode.c16
-rw-r--r--fs/affs/namei.c20
-rw-r--r--fs/afs/dynroot.c2
-rw-r--r--fs/afs/inode.c8
-rw-r--r--fs/aio.c20
-rw-r--r--fs/attr.c22
-rw-r--r--fs/autofs/inode.c2
-rw-r--r--fs/autofs/root.c6
-rw-r--r--fs/autofs/waitq.c5
-rw-r--r--fs/bad_inode.c6
-rw-r--r--fs/befs/linuxvfs.c2
-rw-r--r--fs/bfs/dir.c16
-rw-r--r--fs/bfs/inode.c5
-rw-r--r--fs/binfmt_misc.c3
-rw-r--r--fs/btrfs/Kconfig4
-rw-r--r--fs/btrfs/accessors.h23
-rw-r--r--fs/btrfs/backref.c29
-rw-r--r--fs/btrfs/block-group.c73
-rw-r--r--fs/btrfs/block-group.h4
-rw-r--r--fs/btrfs/btrfs_inode.h6
-rw-r--r--fs/btrfs/delayed-inode.c11
-rw-r--r--fs/btrfs/dev-replace.c6
-rw-r--r--fs/btrfs/disk-io.c175
-rw-r--r--fs/btrfs/disk-io.h1
-rw-r--r--fs/btrfs/extent-io-tree.c14
-rw-r--r--fs/btrfs/extent-io-tree.h6
-rw-r--r--fs/btrfs/extent-tree.c288
-rw-r--r--fs/btrfs/extent-tree.h16
-rw-r--r--fs/btrfs/extent_io.c680
-rw-r--r--fs/btrfs/extent_io.h35
-rw-r--r--fs/btrfs/file-item.c34
-rw-r--r--fs/btrfs/file-item.h6
-rw-r--r--fs/btrfs/file.c46
-rw-r--r--fs/btrfs/free-space-cache.c18
-rw-r--r--fs/btrfs/free-space-tree.c13
-rw-r--r--fs/btrfs/fs.h15
-rw-r--r--fs/btrfs/inode.c850
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/messages.c16
-rw-r--r--fs/btrfs/messages.h2
-rw-r--r--fs/btrfs/ordered-data.c8
-rw-r--r--fs/btrfs/print-tree.c10
-rw-r--r--fs/btrfs/qgroup.c19
-rw-r--r--fs/btrfs/raid56.c29
-rw-r--r--fs/btrfs/raid56.h1
-rw-r--r--fs/btrfs/reflink.c3
-rw-r--r--fs/btrfs/relocation.c33
-rw-r--r--fs/btrfs/scrub.c240
-rw-r--r--fs/btrfs/send.c6
-rw-r--r--fs/btrfs/space-info.c85
-rw-r--r--fs/btrfs/super.c11
-rw-r--r--fs/btrfs/sysfs.c7
-rw-r--r--fs/btrfs/tests/extent-io-tests.c302
-rw-r--r--fs/btrfs/tests/extent-map-tests.c412
-rw-r--r--fs/btrfs/transaction.c42
-rw-r--r--fs/btrfs/tree-log.c12
-rw-r--r--fs/btrfs/volumes.c98
-rw-r--r--fs/btrfs/volumes.h3
-rw-r--r--fs/btrfs/xattr.c4
-rw-r--r--fs/btrfs/zoned.c292
-rw-r--r--fs/btrfs/zoned.h28
-rw-r--r--fs/buffer.c18
-rw-r--r--fs/cachefiles/io.c16
-rw-r--r--fs/ceph/acl.c2
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/ceph/inode.c18
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/xattr.c2
-rw-r--r--fs/coda/coda_linux.c3
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/coda/file.c2
-rw-r--r--fs/coda/inode.c5
-rw-r--r--fs/configfs/inode.c7
-rw-r--r--fs/cramfs/inode.c11
-rw-r--r--fs/dcache.c5
-rw-r--r--fs/debugfs/inode.c3
-rw-r--r--fs/devpts/inode.c16
-rw-r--r--fs/ecryptfs/crypto.c8
-rw-r--r--fs/ecryptfs/inode.c7
-rw-r--r--fs/ecryptfs/mmap.c5
-rw-r--r--fs/ecryptfs/read_write.c12
-rw-r--r--fs/efivarfs/file.c2
-rw-r--r--fs/efivarfs/inode.c2
-rw-r--r--fs/efs/inode.c4
-rw-r--r--fs/erofs/Kconfig16
-rw-r--r--fs/erofs/Makefile1
-rw-r--r--fs/erofs/compress.h2
-rw-r--r--fs/erofs/decompressor.c6
-rw-r--r--fs/erofs/decompressor_deflate.c247
-rw-r--r--fs/erofs/erofs_fs.h17
-rw-r--r--fs/erofs/inode.c14
-rw-r--r--fs/erofs/internal.h23
-rw-r--r--fs/erofs/super.c44
-rw-r--r--fs/erofs/xattr.c14
-rw-r--r--fs/erofs/zdata.c274
-rw-r--r--fs/erofs/zmap.c5
-rw-r--r--fs/eventfd.c2
-rw-r--r--fs/eventpoll.c12
-rw-r--r--fs/exfat/exfat_fs.h2
-rw-r--r--fs/exfat/file.c6
-rw-r--r--fs/exfat/inode.c6
-rw-r--r--fs/exfat/namei.c26
-rw-r--r--fs/exfat/super.c42
-rw-r--r--fs/ext2/acl.c2
-rw-r--r--fs/ext2/dir.c6
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext2/inode.c12
-rw-r--r--fs/ext2/ioctl.c4
-rw-r--r--fs/ext2/namei.c8
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext4/acl.c2
-rw-r--r--fs/ext4/ext4.h90
-rw-r--r--fs/ext4/ext4_jbd2.c3
-rw-r--r--fs/ext4/extents.c12
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inline.c4
-rw-r--r--fs/ext4/inode-test.c6
-rw-r--r--fs/ext4/inode.c18
-rw-r--r--fs/ext4/ioctl.c9
-rw-r--r--fs/ext4/namei.c26
-rw-r--r--fs/ext4/super.c73
-rw-r--r--fs/ext4/xattr.c6
-rw-r--r--fs/f2fs/compress.c2
-rw-r--r--fs/f2fs/dir.c8
-rw-r--r--fs/f2fs/f2fs.h6
-rw-r--r--fs/f2fs/file.c22
-rw-r--r--fs/f2fs/gc.c8
-rw-r--r--fs/f2fs/inline.c2
-rw-r--r--fs/f2fs/inode.c10
-rw-r--r--fs/f2fs/namei.c12
-rw-r--r--fs/f2fs/recovery.c4
-rw-r--r--fs/f2fs/super.c9
-rw-r--r--fs/f2fs/xattr.c2
-rw-r--r--fs/fat/fat.h3
-rw-r--r--fs/fat/file.c2
-rw-r--r--fs/fat/inode.c5
-rw-r--r--fs/fat/misc.c10
-rw-r--r--fs/fcntl.c29
-rw-r--r--fs/file.c30
-rw-r--r--fs/file_table.c5
-rw-r--r--fs/freevxfs/vxfs_inode.c3
-rw-r--r--fs/fs-writeback.c4
-rw-r--r--fs/fs_context.c36
-rw-r--r--fs/fsopen.c106
-rw-r--r--fs/fuse/control.c2
-rw-r--r--fs/fuse/dir.c10
-rw-r--r--fs/fuse/inode.c16
-rw-r--r--fs/gfs2/acl.c2
-rw-r--r--fs/gfs2/aops.c2
-rw-r--r--fs/gfs2/bmap.c13
-rw-r--r--fs/gfs2/dir.c15
-rw-r--r--fs/gfs2/file.c2
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/inode.c16
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/gfs2/super.c16
-rw-r--r--fs/gfs2/sys.c4
-rw-r--r--fs/gfs2/xattr.c8
-rw-r--r--fs/hfs/catalog.c8
-rw-r--r--fs/hfs/dir.c2
-rw-r--r--fs/hfs/inode.c13
-rw-r--r--fs/hfs/sysdep.c4
-rw-r--r--fs/hfsplus/catalog.c8
-rw-r--r--fs/hfsplus/dir.c6
-rw-r--r--fs/hfsplus/inode.c18
-rw-r--r--fs/hostfs/hostfs_kern.c3
-rw-r--r--fs/hpfs/dir.c8
-rw-r--r--fs/hpfs/inode.c6
-rw-r--r--fs/hpfs/namei.c29
-rw-r--r--fs/hpfs/super.c5
-rw-r--r--fs/hugetlbfs/inode.c12
-rw-r--r--fs/inode.c233
-rw-r--r--fs/internal.h4
-rw-r--r--fs/ioctl.c18
-rw-r--r--fs/iomap/buffered-io.c465
-rw-r--r--fs/iomap/direct-io.c163
-rw-r--r--fs/isofs/inode.c9
-rw-r--r--fs/isofs/rock.c16
-rw-r--r--fs/jffs2/dir.c24
-rw-r--r--fs/jffs2/file.c3
-rw-r--r--fs/jffs2/fs.c10
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jfs/acl.c2
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/jfs/ioctl.c2
-rw-r--r--fs/jfs/jfs_imap.c8
-rw-r--r--fs/jfs/jfs_inode.c4
-rw-r--r--fs/jfs/namei.c24
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/jfs/xattr.c2
-rw-r--r--fs/kernel_read_file.c12
-rw-r--r--fs/kernfs/dir.c2
-rw-r--r--fs/kernfs/inode.c53
-rw-r--r--fs/libfs.c349
-rw-r--r--fs/locks.c47
-rw-r--r--fs/minix/bitmap.c2
-rw-r--r--fs/minix/dir.c6
-rw-r--r--fs/minix/inode.c12
-rw-r--r--fs/minix/itree_common.c4
-rw-r--r--fs/minix/namei.c6
-rw-r--r--fs/namei.c5
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/fscache.h4
-rw-r--r--fs/nfs/inode.c22
-rw-r--r--fs/nfs/namespace.c3
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4file.c2
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/nfsd/nfsctl.c3
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/nilfs2/dir.c6
-rw-r--r--fs/nilfs2/inode.c12
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/nilfs2/namei.c8
-rw-r--r--fs/nilfs2/segment.c5
-rw-r--r--fs/nilfs2/super.c81
-rw-r--r--fs/notify/dnotify/dnotify.c4
-rw-r--r--fs/nsfs.c2
-rw-r--r--fs/ntfs/inode.c15
-rw-r--r--fs/ntfs/mft.c3
-rw-r--r--fs/ntfs3/file.c8
-rw-r--r--fs/ntfs3/frecord.c3
-rw-r--r--fs/ntfs3/inode.c14
-rw-r--r--fs/ntfs3/namei.c11
-rw-r--r--fs/ntfs3/super.c33
-rw-r--r--fs/ntfs3/xattr.c4
-rw-r--r--fs/ocfs2/acl.c6
-rw-r--r--fs/ocfs2/alloc.c6
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/dir.c8
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/dlmglue.c7
-rw-r--r--fs/ocfs2/file.c18
-rw-r--r--fs/ocfs2/inode.c12
-rw-r--r--fs/ocfs2/journal.c6
-rw-r--r--fs/ocfs2/move_extents.c6
-rw-r--r--fs/ocfs2/namei.c21
-rw-r--r--fs/ocfs2/refcounttree.c14
-rw-r--r--fs/ocfs2/xattr.c6
-rw-r--r--fs/omfs/dir.c4
-rw-r--r--fs/omfs/inode.c9
-rw-r--r--fs/open.c54
-rw-r--r--fs/openpromfs/inode.c5
-rw-r--r--fs/orangefs/inode.c7
-rw-r--r--fs/orangefs/namei.c2
-rw-r--r--fs/orangefs/orangefs-kernel.h2
-rw-r--r--fs/orangefs/orangefs-utils.c6
-rw-r--r--fs/overlayfs/file.c17
-rw-r--r--fs/overlayfs/inode.c2
-rw-r--r--fs/overlayfs/overlayfs.h2
-rw-r--r--fs/overlayfs/util.c2
-rw-r--r--fs/pipe.c10
-rw-r--r--fs/posix_acl.c2
-rw-r--r--fs/proc/base.c9
-rw-r--r--fs/proc/fd.c2
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/proc_net.c3
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/proc/root.c3
-rw-r--r--fs/proc/self.c2
-rw-r--r--fs/proc/task_mmu.c8
-rw-r--r--fs/proc/thread_self.c2
-rw-r--r--fs/pstore/Kconfig100
-rw-r--r--fs/pstore/inode.c6
-rw-r--r--fs/pstore/platform.c353
-rw-r--r--fs/pstore/ram.c11
-rw-r--r--fs/pstore/ram_core.c17
-rw-r--r--fs/qnx4/inode.c3
-rw-r--r--fs/qnx6/inode.c3
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/ramfs/inode.c6
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/reiserfs/inode.c12
-rw-r--r--fs/reiserfs/ioctl.c4
-rw-r--r--fs/reiserfs/journal.c4
-rw-r--r--fs/reiserfs/namei.c18
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/reiserfs/super.c2
-rw-r--r--fs/reiserfs/xattr.c5
-rw-r--r--fs/reiserfs/xattr_acl.c2
-rw-r--r--fs/romfs/super.c13
-rw-r--r--fs/smb/client/cifsfs.c2
-rw-r--r--fs/smb/client/file.c4
-rw-r--r--fs/smb/client/fscache.h5
-rw-r--r--fs/smb/client/inode.c16
-rw-r--r--fs/smb/client/smb2ops.c3
-rw-r--r--fs/smb/server/smb2pdu.c30
-rw-r--r--fs/smb/server/vfs.c3
-rw-r--r--fs/splice.c64
-rw-r--r--fs/squashfs/inode.c2
-rw-r--r--fs/stack.c2
-rw-r--r--fs/stat.c65
-rw-r--r--fs/super.c829
-rw-r--r--fs/sysv/dir.c6
-rw-r--r--fs/sysv/ialloc.c2
-rw-r--r--fs/sysv/inode.c5
-rw-r--r--fs/sysv/itree.c7
-rw-r--r--fs/sysv/namei.c6
-rw-r--r--fs/tracefs/inode.c2
-rw-r--r--fs/ubifs/debug.c4
-rw-r--r--fs/ubifs/dir.c41
-rw-r--r--fs/ubifs/file.c31
-rw-r--r--fs/ubifs/ioctl.c2
-rw-r--r--fs/ubifs/journal.c4
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--fs/ubifs/ubifs.h2
-rw-r--r--fs/ubifs/xattr.c6
-rw-r--r--fs/udf/ialloc.c2
-rw-r--r--fs/udf/inode.c17
-rw-r--r--fs/udf/namei.c24
-rw-r--r--fs/udf/symlink.c2
-rw-r--r--fs/ufs/dir.c6
-rw-r--r--fs/ufs/ialloc.c2
-rw-r--r--fs/ufs/inode.c23
-rw-r--r--fs/ufs/namei.c8
-rw-r--r--fs/vboxsf/utils.c6
-rw-r--r--fs/verity/fsverity_private.h12
-rw-r--r--fs/verity/hash_algs.c8
-rw-r--r--fs/verity/init.c56
-rw-r--r--fs/verity/open.c18
-rw-r--r--fs/verity/signature.c77
-rw-r--r--fs/verity/verify.c11
-rw-r--r--fs/xattr.c83
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c5
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c6
-rw-r--r--fs/xfs/scrub/fscounters.c188
-rw-r--r--fs/xfs/scrub/scrub.c6
-rw-r--r--fs/xfs/scrub/scrub.h1
-rw-r--r--fs/xfs/scrub/trace.h26
-rw-r--r--fs/xfs/xfs_acl.c2
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_bmap_util.c6
-rw-r--r--fs/xfs/xfs_buf.c7
-rw-r--r--fs/xfs/xfs_inode.c3
-rw-r--r--fs/xfs/xfs_inode_item.c2
-rw-r--r--fs/xfs/xfs_iops.c25
-rw-r--r--fs/xfs/xfs_itable.c4
-rw-r--r--fs/xfs/xfs_super.c138
-rw-r--r--fs/zonefs/file.c2
-rw-r--r--fs/zonefs/super.c8
-rw-r--r--include/acpi/acnames.h1
-rw-r--r--include/acpi/acpi_bus.h17
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/actbl1.h2
-rw-r--r--include/acpi/actbl2.h76
-rw-r--r--include/acpi/actbl3.h4
-rw-r--r--include/acpi/pdc_intel.h36
-rw-r--r--include/acpi/platform/aclinux.h1
-rw-r--r--include/acpi/platform/aczephyr.h3
-rw-r--r--include/acpi/proc_cap_intel.h40
-rw-r--r--include/crypto/algapi.h3
-rw-r--r--include/crypto/engine.h118
-rw-r--r--include/crypto/internal/engine.h74
-rw-r--r--include/crypto/public_key.h12
-rw-r--r--include/drm/display/drm_dp.h2
-rw-r--r--include/drm/drm_probe_helper.h1
-rw-r--r--include/keys/system_keyring.h14
-rw-r--r--include/kunit/attributes.h50
-rw-r--r--include/kunit/static_stub.h6
-rw-r--r--include/kunit/test-bug.h2
-rw-r--r--include/kunit/test.h91
-rw-r--r--include/linux/acpi.h12
-rw-r--r--include/linux/acpi_iort.h1
-rw-r--r--include/linux/arm_sdei.h2
-rw-r--r--include/linux/atmel-mci.h46
-rw-r--r--include/linux/blk_types.h1
-rw-r--r--include/linux/blkdev.h15
-rw-r--r--include/linux/cgroup-defs.h2
-rw-r--r--include/linux/clk.h80
-rw-r--r--include/linux/compiler_attributes.h26
-rw-r--r--include/linux/compiler_types.h28
-rw-r--r--include/linux/completion.h1
-rw-r--r--include/linux/cpu.h26
-rw-r--r--include/linux/cpu_smt.h33
-rw-r--r--include/linux/cpufreq.h16
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/decompress/mm.h2
-rw-r--r--include/linux/dm-verity-loadpin.h2
-rw-r--r--include/linux/dnotify.h4
-rw-r--r--include/linux/efi.h53
-rw-r--r--include/linux/filelock.h12
-rw-r--r--include/linux/fs.h231
-rw-r--r--include/linux/fs_context.h6
-rw-r--r--include/linux/fs_stack.h2
-rw-r--r--include/linux/hisi_acc_qm.h2
-rw-r--r--include/linux/huge_mm.h3
-rw-r--r--include/linux/if_arp.h4
-rw-r--r--include/linux/iomap.h3
-rw-r--r--include/linux/list.h89
-rw-r--r--include/linux/lsm_hook_defs.h1
-rw-r--r--include/linux/mfd/cs42l43-regs.h1184
-rw-r--r--include/linux/mfd/cs42l43.h102
-rw-r--r--include/linux/mfd/tps65086.h23
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h3
-rw-r--r--include/linux/mm.h21
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--include/linux/mmc/host.h8
-rw-r--r--include/linux/notifier.h11
-rw-r--r--include/linux/nsproxy.h7
-rw-r--r--include/linux/pagemap.h82
-rw-r--r--include/linux/pagewalk.h11
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/perf/arm_pmu.h1
-rw-r--r--include/linux/perf_event.h36
-rw-r--r--include/linux/pipe_fs_i.h4
-rw-r--r--include/linux/pm_opp.h62
-rw-r--r--include/linux/pm_runtime.h2
-rw-r--r--include/linux/pm_wakeup.h10
-rw-r--r--include/linux/power/power_on_reason.h19
-rw-r--r--include/linux/psp-platform-access.h4
-rw-r--r--include/linux/raid_class.h4
-rw-r--r--include/linux/rbtree_augmented.h26
-rw-r--r--include/linux/rculist_nulls.h4
-rw-r--r--include/linux/rcupdate_trace.h1
-rw-r--r--include/linux/rcupdate_wait.h5
-rw-r--r--include/linux/regmap.h1
-rw-r--r--include/linux/regulator/db8500-prcmu.h6
-rw-r--r--include/linux/regulator/driver.h11
-rw-r--r--include/linux/sched.h21
-rw-r--r--include/linux/sched/task.h38
-rw-r--r--include/linux/security.h6
-rw-r--r--include/linux/seq_file.h7
-rw-r--r--include/linux/shmem_fs.h31
-rw-r--r--include/linux/soc/qcom/geni-se.h9
-rw-r--r--include/linux/soundwire/sdw.h9
-rw-r--r--include/linux/spi/pxa2xx_spi.h4
-rw-r--r--include/linux/spi/sh_msiof.h4
-rw-r--r--include/linux/spi/spi.h171
-rw-r--r--include/linux/srcutiny.h4
-rw-r--r--include/linux/stmmac.h15
-rw-r--r--include/linux/swait.h2
-rw-r--r--include/linux/syscalls.h20
-rw-r--r--include/linux/thermal.h9
-rw-r--r--include/linux/torture.h7
-rw-r--r--include/linux/trace_events.h11
-rw-r--r--include/linux/uio.h9
-rw-r--r--include/linux/usb/r8152.h1
-rw-r--r--include/linux/wait.h3
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/linux/xattr.h10
-rw-r--r--include/net/cfg80211.h6
-rw-r--r--include/net/devlink.h30
-rw-r--r--include/net/tls.h10
-rw-r--r--include/trace/events/btrfs.h30
-rw-r--r--include/trace/events/erofs.h16
-rw-r--r--include/trace/events/spi.h2
-rw-r--r--include/uapi/asm-generic/unistd.h5
-rw-r--r--include/uapi/linux/btrfs_tree.h6
-rw-r--r--include/uapi/linux/devlink.h4
-rw-r--r--include/uapi/linux/elf.h1
-rw-r--r--include/uapi/linux/mount.h3
-rw-r--r--include/uapi/linux/perf_event.h3
-rw-r--r--include/uapi/linux/psp-dbc.h147
-rw-r--r--include/uapi/linux/quota.h1
-rw-r--r--include/uapi/linux/seccomp.h4
-rw-r--r--include/uapi/linux/stddef.h4
-rw-r--r--include/uapi/xen/privcmd.h14
-rw-r--r--include/xen/events.h1
-rw-r--r--init/Kconfig1
-rw-r--r--init/do_mounts.c38
-rw-r--r--io_uring/rw.c58
-rw-r--r--ipc/mqueue.c23
-rw-r--r--kernel/bpf/inode.c6
-rw-r--r--kernel/cgroup/cgroup.c34
-rw-r--r--kernel/cpu.c144
-rw-r--r--kernel/entry/common.c3
-rw-r--r--kernel/events/core.c24
-rw-r--r--kernel/events/ring_buffer.c5
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/irq/chip.c11
-rw-r--r--kernel/irq/internals.h4
-rw-r--r--kernel/irq/manage.c26
-rw-r--r--kernel/irq/resend.c7
-rw-r--r--kernel/kallsyms.c27
-rw-r--r--kernel/kallsyms_selftest.c23
-rw-r--r--kernel/locking/locktorture.c12
-rw-r--r--kernel/locking/qspinlock_paravirt.h20
-rw-r--r--kernel/nsproxy.c4
-rw-r--r--kernel/power/qos.c9
-rw-r--r--kernel/power/snapshot.c187
-rw-r--r--kernel/rcu/rcu.h8
-rw-r--r--kernel/rcu/rcuscale.c83
-rw-r--r--kernel/rcu/rcutorture.c7
-rw-r--r--kernel/rcu/refscale.c37
-rw-r--r--kernel/rcu/tasks.h136
-rw-r--r--kernel/rcu/tree.c16
-rw-r--r--kernel/rcu/tree_nocb.h4
-rw-r--r--kernel/scftorture.c12
-rw-r--r--kernel/sched/completion.c26
-rw-r--r--kernel/sched/core.c501
-rw-r--r--kernel/sched/debug.c49
-rw-r--r--kernel/sched/fair.c1347
-rw-r--r--kernel/sched/features.h24
-rw-r--r--kernel/sched/psi.c2
-rw-r--r--kernel/sched/rt.c5
-rw-r--r--kernel/sched/sched.h72
-rw-r--r--kernel/sched/swait.c8
-rw-r--r--kernel/sched/topology.c15
-rw-r--r--kernel/sched/wait.c5
-rw-r--r--kernel/seccomp.c84
-rw-r--r--kernel/smp.c13
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/time/time_test.c2
-rw-r--r--kernel/torture.c39
-rw-r--r--kernel/trace/trace.c70
-rw-r--r--kernel/trace/trace.h10
-rw-r--r--kernel/trace/trace_events_synth.c103
-rw-r--r--kernel/trace/trace_irqsoff.c3
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--lib/Kconfig.debug37
-rw-r--r--lib/Kconfig.ubsan10
-rw-r--r--lib/Makefile3
-rw-r--r--lib/clz_ctz.c32
-rw-r--r--lib/crypto/Makefile2
-rw-r--r--lib/crypto/mpi/Makefile (renamed from lib/mpi/Makefile)0
-rw-r--r--lib/crypto/mpi/ec.c (renamed from lib/mpi/ec.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-add1.c (renamed from lib/mpi/generic_mpih-add1.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-lshift.c (renamed from lib/mpi/generic_mpih-lshift.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-mul1.c (renamed from lib/mpi/generic_mpih-mul1.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-mul2.c (renamed from lib/mpi/generic_mpih-mul2.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-mul3.c (renamed from lib/mpi/generic_mpih-mul3.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-rshift.c (renamed from lib/mpi/generic_mpih-rshift.c)0
-rw-r--r--lib/crypto/mpi/generic_mpih-sub1.c (renamed from lib/mpi/generic_mpih-sub1.c)0
-rw-r--r--lib/crypto/mpi/longlong.h (renamed from lib/mpi/longlong.h)0
-rw-r--r--lib/crypto/mpi/mpi-add.c (renamed from lib/mpi/mpi-add.c)0
-rw-r--r--lib/crypto/mpi/mpi-bit.c (renamed from lib/mpi/mpi-bit.c)0
-rw-r--r--lib/crypto/mpi/mpi-cmp.c (renamed from lib/mpi/mpi-cmp.c)8
-rw-r--r--lib/crypto/mpi/mpi-div.c (renamed from lib/mpi/mpi-div.c)0
-rw-r--r--lib/crypto/mpi/mpi-inline.h (renamed from lib/mpi/mpi-inline.h)0
-rw-r--r--lib/crypto/mpi/mpi-internal.h (renamed from lib/mpi/mpi-internal.h)0
-rw-r--r--lib/crypto/mpi/mpi-inv.c (renamed from lib/mpi/mpi-inv.c)0
-rw-r--r--lib/crypto/mpi/mpi-mod.c (renamed from lib/mpi/mpi-mod.c)0
-rw-r--r--lib/crypto/mpi/mpi-mul.c (renamed from lib/mpi/mpi-mul.c)0
-rw-r--r--lib/crypto/mpi/mpi-pow.c (renamed from lib/mpi/mpi-pow.c)0
-rw-r--r--lib/crypto/mpi/mpi-sub-ui.c (renamed from lib/mpi/mpi-sub-ui.c)0
-rw-r--r--lib/crypto/mpi/mpicoder.c (renamed from lib/mpi/mpicoder.c)0
-rw-r--r--lib/crypto/mpi/mpih-cmp.c (renamed from lib/mpi/mpih-cmp.c)0
-rw-r--r--lib/crypto/mpi/mpih-div.c (renamed from lib/mpi/mpih-div.c)0
-rw-r--r--lib/crypto/mpi/mpih-mul.c (renamed from lib/mpi/mpih-mul.c)0
-rw-r--r--lib/crypto/mpi/mpiutil.c (renamed from lib/mpi/mpiutil.c)0
-rw-r--r--lib/iov_iter.c43
-rw-r--r--lib/kunit/Kconfig2
-rw-r--r--lib/kunit/Makefile3
-rw-r--r--lib/kunit/attributes.c414
-rw-r--r--lib/kunit/executor.c227
-rw-r--r--lib/kunit/executor_test.c152
-rw-r--r--lib/kunit/kunit-example-test.c9
-rw-r--r--lib/kunit/test.c64
-rw-r--r--lib/list_debug.c16
-rw-r--r--lib/locking-selftest.c135
-rw-r--r--lib/maple_tree.c7
-rw-r--r--lib/memcpy_kunit.c8
-rw-r--r--lib/radix-tree.c1
-rw-r--r--mm/Makefile2
-rw-r--r--mm/damon/vaddr.c2
-rw-r--r--mm/filemap.c65
-rw-r--r--mm/folio-compat.c2
-rw-r--r--mm/gup.c30
-rw-r--r--mm/hmm.c1
-rw-r--r--mm/huge_memory.c11
-rw-r--r--mm/internal.h17
-rw-r--r--mm/khugepaged.c13
-rw-r--r--mm/ksm.c25
-rw-r--r--mm/madvise.c9
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory-failure.c12
-rw-r--r--mm/mempolicy.c22
-rw-r--r--mm/migrate_device.c1
-rw-r--r--mm/mincore.c1
-rw-r--r--mm/mlock.c1
-rw-r--r--mm/mprotect.c1
-rw-r--r--mm/page-writeback.c49
-rw-r--r--mm/pagewalk.c36
-rw-r--r--mm/readahead.c13
-rw-r--r--mm/shmem.c857
-rw-r--r--mm/shmem_quota.c350
-rw-r--r--mm/truncate.c4
-rw-r--r--mm/vmalloc.c4
-rw-r--r--mm/vmscan.c14
-rw-r--r--net/core/dev.c3
-rw-r--r--net/dccp/ipv4.c13
-rw-r--r--net/dccp/ipv6.c15
-rw-r--r--net/devlink/Makefile3
-rw-r--r--net/devlink/core.c6
-rw-r--r--net/devlink/dev.c28
-rw-r--r--net/devlink/devl_internal.h95
-rw-r--r--net/devlink/dpipe.c917
-rw-r--r--net/devlink/leftover.c9427
-rw-r--r--net/devlink/linecard.c606
-rw-r--r--net/devlink/netlink.c266
-rw-r--r--net/devlink/param.c865
-rw-r--r--net/devlink/port.c1515
-rw-r--r--net/devlink/rate.c722
-rw-r--r--net/devlink/region.c1260
-rw-r--r--net/devlink/resource.c579
-rw-r--r--net/devlink/sb.c996
-rw-r--r--net/devlink/trap.c1861
-rw-r--r--net/ipv4/ip_sockglue.c8
-rw-r--r--net/mac80211/cfg.c11
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/key.c23
-rw-r--r--net/mac80211/mesh.c8
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c45
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/mac80211/vht.c16
-rw-r--r--net/netrom/af_netrom.c5
-rw-r--r--net/rfkill/core.c32
-rw-r--r--net/sched/sch_hfsc.c4
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/tls/tls.h53
-rw-r--r--net/tls/tls_device.c52
-rw-r--r--net/tls/tls_device_fallback.c62
-rw-r--r--net/tls/tls_main.c272
-rw-r--r--net/tls/tls_sw.c179
-rw-r--r--net/wireless/core.c14
-rw-r--r--net/wireless/core.h7
-rw-r--r--net/wireless/mlme.c3
-rw-r--r--net/wireless/nl80211.c106
-rw-r--r--net/wireless/scan.c4
-rw-r--r--rust/.gitignore2
-rw-r--r--rust/Makefile46
-rw-r--r--rust/alloc/alloc.rs20
-rw-r--r--rust/alloc/boxed.rs131
-rw-r--r--rust/alloc/lib.rs48
-rw-r--r--rust/alloc/raw_vec.rs18
-rw-r--r--rust/alloc/slice.rs43
-rw-r--r--rust/alloc/vec/drain.rs8
-rw-r--r--rust/alloc/vec/drain_filter.rs8
-rw-r--r--rust/alloc/vec/into_iter.rs35
-rw-r--r--rust/alloc/vec/mod.rs84
-rw-r--r--rust/bindings/bindings_helper.h1
-rw-r--r--rust/compiler_builtins.rs7
-rw-r--r--rust/helpers.c28
-rw-r--r--rust/kernel/allocator.rs84
-rw-r--r--rust/kernel/init.rs670
-rw-r--r--rust/kernel/init/__internal.rs39
-rw-r--r--rust/kernel/init/macros.rs519
-rw-r--r--rust/kernel/kunit.rs163
-rw-r--r--rust/kernel/lib.rs5
-rw-r--r--rust/kernel/prelude.rs2
-rw-r--r--rust/kernel/str.rs4
-rw-r--r--rust/kernel/sync/arc.rs9
-rw-r--r--rust/kernel/sync/lock.rs6
-rw-r--r--rust/kernel/sync/lock/mutex.rs1
-rw-r--r--rust/kernel/sync/lock/spinlock.rs1
-rw-r--r--rust/kernel/types.rs27
-rw-r--r--rust/macros/lib.rs117
-rw-r--r--rust/macros/module.rs2
-rw-r--r--rust/macros/paste.rs96
-rw-r--r--rust/macros/quote.rs12
-rw-r--r--rust/macros/zeroable.rs72
-rw-r--r--samples/ftrace/ftrace-direct-modify.c4
-rw-r--r--samples/ftrace/ftrace-direct-multi-modify.c4
-rw-r--r--samples/ftrace/ftrace-direct-multi.c2
-rw-r--r--samples/ftrace/ftrace-direct-too.c2
-rw-r--r--samples/ftrace/ftrace-direct.c2
-rw-r--r--scripts/.gitignore2
-rw-r--r--scripts/Makefile4
-rw-r--r--scripts/Makefile.modfinal2
-rwxr-xr-xscripts/checkpatch.pl24
-rw-r--r--scripts/gcc-plugins/gcc-common.h4
-rwxr-xr-xscripts/generate_rust_analyzer.py39
-rwxr-xr-xscripts/is_rust_module.sh16
-rwxr-xr-xscripts/min-tool-version.sh4
-rwxr-xr-xscripts/rust_is_available.sh233
-rwxr-xr-xscripts/rust_is_available_test.py346
-rw-r--r--scripts/rustdoc_test_builder.rs72
-rw-r--r--scripts/rustdoc_test_gen.rs260
-rw-r--r--security/Kconfig.hardening23
-rw-r--r--security/apparmor/apparmorfs.c11
-rw-r--r--security/apparmor/policy_unpack.c11
-rw-r--r--security/inode.c2
-rw-r--r--security/integrity/Kconfig4
-rw-r--r--security/integrity/digsig.c6
-rw-r--r--security/integrity/evm/Kconfig3
-rw-r--r--security/integrity/ima/Kconfig3
-rw-r--r--security/integrity/ima/ima_policy.c4
-rw-r--r--security/integrity/integrity.h5
-rw-r--r--security/integrity/platform_certs/keyring_handler.c19
-rw-r--r--security/integrity/platform_certs/keyring_handler.h10
-rw-r--r--security/integrity/platform_certs/load_ipl_s390.c4
-rw-r--r--security/integrity/platform_certs/load_powerpc.c34
-rw-r--r--security/integrity/platform_certs/machine_keyring.c22
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/loadpin/loadpin.c3
-rw-r--r--security/security.c14
-rw-r--r--security/selinux/hooks.c22
-rw-r--r--security/selinux/selinuxfs.c2
-rw-r--r--security/smack/smack_lsm.c51
-rw-r--r--sound/hda/hdac_regmap.c9
-rw-r--r--sound/pci/ymfpci/ymfpci.c10
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c9
-rw-r--r--sound/soc/codecs/cs35l41.c2
-rw-r--r--sound/soc/codecs/cs35l56-i2c.c9
-rw-r--r--sound/soc/codecs/cs35l56-spi.c9
-rw-r--r--sound/soc/codecs/cs35l56.c31
-rw-r--r--sound/soc/codecs/tas2781-comlib.c19
-rw-r--r--sound/soc/sof/ipc4-pcm.c3
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd_32.h3
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd_64.h3
-rw-r--r--tools/crypto/ccp/.gitignore1
-rw-r--r--tools/crypto/ccp/Makefile13
-rw-r--r--tools/crypto/ccp/dbc.c72
-rw-r--r--tools/crypto/ccp/dbc.py64
-rwxr-xr-xtools/crypto/ccp/dbc_cli.py134
-rwxr-xr-xtools/crypto/ccp/test_dbc.py266
-rw-r--r--tools/include/linux/compiler.h18
-rw-r--r--tools/include/nolibc/Makefile1
-rw-r--r--tools/include/nolibc/arch-aarch64.h85
-rw-r--r--tools/include/nolibc/arch-arm.h111
-rw-r--r--tools/include/nolibc/arch-i386.h86
-rw-r--r--tools/include/nolibc/arch-loongarch.h83
-rw-r--r--tools/include/nolibc/arch-mips.h147
-rw-r--r--tools/include/nolibc/arch-powerpc.h221
-rw-r--r--tools/include/nolibc/arch-riscv.h83
-rw-r--r--tools/include/nolibc/arch-s390.h77
-rw-r--r--tools/include/nolibc/arch-x86_64.h86
-rw-r--r--tools/include/nolibc/arch.h2
-rw-r--r--tools/include/nolibc/crt.h61
-rw-r--r--tools/include/nolibc/nolibc.h9
-rw-r--r--tools/include/nolibc/stackprotector.h5
-rw-r--r--tools/include/nolibc/stdint.h2
-rw-r--r--tools/include/nolibc/stdio.h27
-rw-r--r--tools/include/nolibc/stdlib.h12
-rw-r--r--tools/include/nolibc/sys.h534
-rw-r--r--tools/include/nolibc/types.h22
-rw-r--r--tools/include/nolibc/unistd.h13
-rwxr-xr-xtools/net/ynl/cli.py12
-rw-r--r--tools/net/ynl/lib/__init__.py4
-rw-r--r--tools/net/ynl/lib/nlspec.py31
-rw-r--r--tools/net/ynl/lib/ynl.py198
-rw-r--r--tools/perf/bench/Build1
-rw-r--r--tools/perf/bench/bench.h1
-rw-r--r--tools/perf/bench/sched-seccomp-notify.c178
-rw-r--r--tools/perf/builtin-bench.c1
-rw-r--r--tools/power/cpupower/Makefile2
-rw-r--r--tools/power/cpupower/lib/cpupower.c7
-rw-r--r--tools/power/cpupower/lib/cpupower_intern.h1
-rw-r--r--tools/power/cpupower/utils/cpuidle-set.c16
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c65
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h11
-rw-r--r--tools/power/cpupower/utils/helpers/misc.c57
-rw-r--r--tools/power/x86/turbostat/turbostat.c2
-rw-r--r--tools/testing/kunit/configs/all_tests.config2
-rwxr-xr-xtools/testing/kunit/kunit.py70
-rw-r--r--tools/testing/kunit/kunit_kernel.py8
-rw-r--r--tools/testing/kunit/kunit_parser.py11
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py39
-rw-r--r--tools/testing/kunit/qemu_configs/arm64.py2
-rw-r--r--tools/testing/selftests/Makefile5
-rw-r--r--tools/testing/selftests/arm64/Makefile2
-rw-r--r--tools/testing/selftests/arm64/abi/hwcap.c319
-rw-r--r--tools/testing/selftests/arm64/abi/syscall-abi.c38
-rw-r--r--tools/testing/selftests/arm64/bti/Makefile45
-rw-r--r--tools/testing/selftests/arm64/bti/compiler.h21
-rw-r--r--tools/testing/selftests/arm64/bti/gen/.gitignore2
-rw-r--r--tools/testing/selftests/arm64/bti/system.c4
-rw-r--r--tools/testing/selftests/arm64/bti/system.h4
-rw-r--r--tools/testing/selftests/arm64/bti/test.c1
-rw-r--r--tools/testing/selftests/arm64/fp/vec-syscfg.c127
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals_utils.h27
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/zt_regs.c1
-rw-r--r--tools/testing/selftests/cachestat/Makefile2
-rw-r--r--tools/testing/selftests/cachestat/test_cachestat.c87
-rw-r--r--tools/testing/selftests/cgroup/test_kmem.c4
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh8
-rw-r--r--tools/testing/selftests/fchmodat2/.gitignore (renamed from tools/testing/selftests/rcutorture/formal/srcu-cbmc/.gitignore)2
-rw-r--r--tools/testing/selftests/fchmodat2/Makefile6
-rw-r--r--tools/testing/selftests/fchmodat2/fchmodat2_test.c142
-rw-r--r--tools/testing/selftests/filelock/Makefile5
-rw-r--r--tools/testing/selftests/filelock/ofdlocks.c132
-rwxr-xr-xtools/testing/selftests/filesystems/fat/run_fat_tests.sh2
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/snapshot1.tc31
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_timeout.c7
-rw-r--r--tools/testing/selftests/kselftest_harness.h11
-rw-r--r--tools/testing/selftests/mm/hmm-tests.c7
-rw-r--r--tools/testing/selftests/net/config1
-rw-r--r--tools/testing/selftests/net/tls.c84
-rw-r--r--tools/testing/selftests/nolibc/Makefile111
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c609
-rw-r--r--tools/testing/selftests/prctl/.gitignore1
-rw-r--r--tools/testing/selftests/prctl/Makefile4
-rw-r--r--tools/testing/selftests/prctl/set-process-name.c62
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configcheck.sh61
-rw-r--r--tools/testing/selftests/rcutorture/bin/functions.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuscale.sh8
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh44
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-remote.sh12
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh12
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mkinitrd.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/torture.sh127
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh5
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS031
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE011
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh5
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/CFcommon2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/TRACE012
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/ver_functions.sh5
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT1
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/ver_functions.sh5
-rw-r--r--tools/testing/selftests/rcutorture/configs/scf/NOPREEMPT2
-rw-r--r--tools/testing/selftests/rcutorture/configs/scf/ver_functions.sh5
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/Makefile17
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/delay.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/export.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/mutex.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/percpu.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/preempt.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/rcupdate.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/sched.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/smp.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/workqueue.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/uapi/linux/types.h0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore2
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/kconfig.h1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h152
-rwxr-xr-xtools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk376
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/assume.h17
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h41
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/bug_on.h14
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/combined_source.c14
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/config.h28
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/include_srcu.c32
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/int_typedefs.h34
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h221
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.c12
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h58
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h93
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c79
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.h59
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/simple_sync_srcu.c51
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h103
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore2
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/Makefile12
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/assert_end.fail1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force.fail1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force2.fail1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force3.fail1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/main.pass0
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/test.c73
-rwxr-xr-xtools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/test_script.sh103
-rw-r--r--tools/testing/selftests/resctrl/Makefile2
-rw-r--r--tools/testing/selftests/resctrl/cache.c66
-rw-r--r--tools/testing/selftests/resctrl/cat_test.c28
-rw-r--r--tools/testing/selftests/resctrl/cmt_test.c29
-rw-r--r--tools/testing/selftests/resctrl/fill_buf.c87
-rw-r--r--tools/testing/selftests/resctrl/mba_test.c9
-rw-r--r--tools/testing/selftests/resctrl/mbm_test.c17
-rw-r--r--tools/testing/selftests/resctrl/resctrl.h17
-rw-r--r--tools/testing/selftests/resctrl/resctrl_tests.c83
-rw-r--r--tools/testing/selftests/resctrl/resctrl_val.c7
-rw-r--r--tools/testing/selftests/resctrl/resctrlfs.c64
-rw-r--r--tools/testing/selftests/rseq/Makefile2
-rw-r--r--tools/testing/selftests/rseq/compiler.h26
-rw-r--r--tools/testing/selftests/rseq/rseq-arm.h4
-rw-r--r--tools/testing/selftests/rseq/rseq-arm64.h58
-rw-r--r--tools/testing/selftests/rseq/rseq-mips.h4
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc.h4
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv.h6
-rw-r--r--tools/testing/selftests/rseq/rseq-s390.h4
-rw-r--r--tools/testing/selftests/rseq/rseq-x86.h4
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c67
-rw-r--r--tools/testing/selftests/user_events/Makefile8
2222 files changed, 66201 insertions, 37767 deletions
diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat
new file mode 100644
index 000000000000..6731ffacc5f0
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-driver-qat
@@ -0,0 +1,61 @@
+What: /sys/kernel/debug/qat_<device>_<BDF>/qat/fw_counters
+Date: November 2023
+KernelVersion: 6.6
+Contact: qat-linux@intel.com
+Description: (RO) Read returns the number of requests sent to the FW and the number of responses
+ received from the FW for each Acceleration Engine
+ Reported firmware counters::
+
+ <N>: Number of requests sent from Acceleration Engine N to FW and responses
+ Acceleration Engine N received from FW
+
+What: /sys/kernel/debug/qat_<device>_<BDF>/heartbeat/config
+Date: November 2023
+KernelVersion: 6.6
+Contact: qat-linux@intel.com
+Description: (RW) Read returns value of the Heartbeat update period.
+ Write to the file changes this period value.
+
+ This period should reflect planned polling interval of device
+ health status. High frequency Heartbeat monitoring wastes CPU cycles
+ but minimizes the customer’s system downtime. Also, if there are
+ large service requests that take some time to complete, high frequency
+ Heartbeat monitoring could result in false reports of unresponsiveness
+ and in those cases, period needs to be increased.
+
+ This parameter is effective only for c3xxx, c62x, dh895xcc devices.
+ 4xxx has this value internally fixed to 200ms.
+
+ Default value is set to 500. Minimal allowed value is 200.
+ All values are expressed in milliseconds.
+
+What: /sys/kernel/debug/qat_<device>_<BDF>/heartbeat/queries_failed
+Date: November 2023
+KernelVersion: 6.6
+Contact: qat-linux@intel.com
+Description: (RO) Read returns the number of times the device became unresponsive.
+
+ Attribute returns value of the counter which is incremented when
+ status query results negative.
+
+What: /sys/kernel/debug/qat_<device>_<BDF>/heartbeat/queries_sent
+Date: November 2023
+KernelVersion: 6.6
+Contact: qat-linux@intel.com
+Description: (RO) Read returns the number of times the control process checked
+ if the device is responsive.
+
+ Attribute returns value of the counter which is incremented on
+ every status query.
+
+What: /sys/kernel/debug/qat_<device>_<BDF>/heartbeat/status
+Date: November 2023
+KernelVersion: 6.6
+Contact: qat-linux@intel.com
+Description: (RO) Read returns the device health status.
+
+ Returns 0 when device is healthy or -1 when is unresponsive
+ or the query failed to send.
+
+ The driver does not monitor for Heartbeat. It is left for a user
+ to poll the status periodically.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 77942eedf4f6..183a07c4f191 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -556,6 +556,7 @@ Description: Control Symmetric Multi Threading (SMT)
================ =========================================
"on" SMT is enabled
"off" SMT is disabled
+ "<N>" SMT is enabled with N threads per core.
"forceoff" SMT is force disabled. Cannot be changed.
"notsupported" SMT is not supported by the CPU
"notimplemented" SMT runtime toggling is not
diff --git a/Documentation/ABI/testing/sysfs-driver-ccp b/Documentation/ABI/testing/sysfs-driver-ccp
index 7aded9b75553..ee6b787eee7a 100644
--- a/Documentation/ABI/testing/sysfs-driver-ccp
+++ b/Documentation/ABI/testing/sysfs-driver-ccp
@@ -85,3 +85,21 @@ Description:
Possible values:
0: Not enforced
1: Enforced
+
+What: /sys/bus/pci/devices/<BDF>/bootloader_version
+Date: June 2023
+KernelVersion: 6.4
+Contact: mario.limonciello@amd.com
+Description:
+ The /sys/bus/pci/devices/<BDF>/bootloader_version
+ file reports the firmware version of the AMD AGESA
+ bootloader.
+
+What: /sys/bus/pci/devices/<BDF>/tee_version
+Date: June 2023
+KernelVersion: 6.4
+Contact: mario.limonciello@amd.com
+Description:
+ The /sys/bus/pci/devices/<BDF>/tee_version
+ file reports the firmware version of the AMD Trusted
+ Execution Environment (TEE).
diff --git a/Documentation/ABI/testing/sysfs-driver-chromeos-acpi b/Documentation/ABI/testing/sysfs-driver-chromeos-acpi
index c308926e1568..d46b1c85840d 100644
--- a/Documentation/ABI/testing/sysfs-driver-chromeos-acpi
+++ b/Documentation/ABI/testing/sysfs-driver-chromeos-acpi
@@ -1,4 +1,5 @@
What: /sys/bus/platform/devices/GGL0001:*/BINF.2
+ /sys/bus/platform/devices/GOOG0016:*/BINF.2
Date: May 2022
KernelVersion: 5.19
Description:
@@ -10,6 +11,7 @@ Description:
== ===============================
What: /sys/bus/platform/devices/GGL0001:*/BINF.3
+ /sys/bus/platform/devices/GOOG0016:*/BINF.3
Date: May 2022
KernelVersion: 5.19
Description:
@@ -23,6 +25,7 @@ Description:
== =====================================
What: /sys/bus/platform/devices/GGL0001:*/CHSW
+ /sys/bus/platform/devices/GOOG0016:*/CHSW
Date: May 2022
KernelVersion: 5.19
Description:
@@ -38,6 +41,7 @@ Description:
==== ===========================================
What: /sys/bus/platform/devices/GGL0001:*/FMAP
+ /sys/bus/platform/devices/GOOG0016:*/FMAP
Date: May 2022
KernelVersion: 5.19
Description:
@@ -45,6 +49,7 @@ Description:
processor firmware flashmap.
What: /sys/bus/platform/devices/GGL0001:*/FRID
+ /sys/bus/platform/devices/GOOG0016:*/FRID
Date: May 2022
KernelVersion: 5.19
Description:
@@ -52,6 +57,7 @@ Description:
main processor firmware.
What: /sys/bus/platform/devices/GGL0001:*/FWID
+ /sys/bus/platform/devices/GOOG0016:*/FWID
Date: May 2022
KernelVersion: 5.19
Description:
@@ -59,6 +65,7 @@ Description:
main processor firmware.
What: /sys/bus/platform/devices/GGL0001:*/GPIO.X/GPIO.0
+ /sys/bus/platform/devices/GOOG0016:*/GPIO.X/GPIO.0
Date: May 2022
KernelVersion: 5.19
Description:
@@ -73,6 +80,7 @@ Description:
=========== ==================================
What: /sys/bus/platform/devices/GGL0001:*/GPIO.X/GPIO.1
+ /sys/bus/platform/devices/GOOG0016:*/GPIO.X/GPIO.1
Date: May 2022
KernelVersion: 5.19
Description:
@@ -84,6 +92,7 @@ Description:
== =======================
What: /sys/bus/platform/devices/GGL0001:*/GPIO.X/GPIO.2
+ /sys/bus/platform/devices/GOOG0016:*/GPIO.X/GPIO.2
Date: May 2022
KernelVersion: 5.19
Description:
@@ -91,18 +100,21 @@ Description:
controller.
What: /sys/bus/platform/devices/GGL0001:*/GPIO.X/GPIO.3
+ /sys/bus/platform/devices/GOOG0016:*/GPIO.X/GPIO.3
Date: May 2022
KernelVersion: 5.19
Description:
Returns name of the GPIO controller.
What: /sys/bus/platform/devices/GGL0001:*/HWID
+ /sys/bus/platform/devices/GOOG0016:*/HWID
Date: May 2022
KernelVersion: 5.19
Description:
Returns hardware ID for the Chromebook.
What: /sys/bus/platform/devices/GGL0001:*/MECK
+ /sys/bus/platform/devices/GOOG0016:*/MECK
Date: May 2022
KernelVersion: 5.19
Description:
@@ -113,6 +125,7 @@ Description:
present, or if the firmware was unable to read the extended registers, this buffer size can be zero.
What: /sys/bus/platform/devices/GGL0001:*/VBNV.0
+ /sys/bus/platform/devices/GOOG0016:*/VBNV.0
Date: May 2022
KernelVersion: 5.19
Description:
@@ -122,6 +135,7 @@ Description:
clock data).
What: /sys/bus/platform/devices/GGL0001:*/VBNV.1
+ /sys/bus/platform/devices/GOOG0016:*/VBNV.1
Date: May 2022
KernelVersion: 5.19
Description:
@@ -129,9 +143,10 @@ Description:
storage block.
What: /sys/bus/platform/devices/GGL0001:*/VDAT
+ /sys/bus/platform/devices/GOOG0016:*/VDAT
Date: May 2022
KernelVersion: 5.19
Description:
Returns the verified boot data block shared between the
firmware verification step and the kernel verification step
- (binary).
+ (hex dump).
diff --git a/Documentation/ABI/testing/sysfs-platform-power-on-reason b/Documentation/ABI/testing/sysfs-platform-power-on-reason
new file mode 100644
index 000000000000..c3b29dbc64bf
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-power-on-reason
@@ -0,0 +1,12 @@
+What: /sys/devices/platform/.../power_on_reason
+Date: June 2023
+KernelVersion: 6.5
+Contact: Kamel Bouhara <kamel.bouhara@bootlin.com>
+Description: Shows system power on reason. The following strings/reasons can
+ be read (the list can be extended):
+ "regular power-up", "RTC wakeup", "watchdog timeout",
+ "software reset", "reset button action", "CPU clock failure",
+ "crystal oscillator failure", "brown-out reset",
+ "unknown reason".
+
+ The file is read only.
diff --git a/Documentation/RCU/lockdep-splat.rst b/Documentation/RCU/lockdep-splat.rst
index 2a5c79db57dc..bcbc4b3c88d7 100644
--- a/Documentation/RCU/lockdep-splat.rst
+++ b/Documentation/RCU/lockdep-splat.rst
@@ -10,7 +10,7 @@ misuses of the RCU API, most notably using one of the rcu_dereference()
family to access an RCU-protected pointer without the proper protection.
When such misuse is detected, an lockdep-RCU splat is emitted.
-The usual cause of a lockdep-RCU slat is someone accessing an
+The usual cause of a lockdep-RCU splat is someone accessing an
RCU-protected data structure without either (1) being in the right kind of
RCU read-side critical section or (2) holding the right update-side lock.
This problem can therefore be serious: it might result in random memory
diff --git a/Documentation/RCU/rculist_nulls.rst b/Documentation/RCU/rculist_nulls.rst
index 9a734bf54b76..21e40fcc08de 100644
--- a/Documentation/RCU/rculist_nulls.rst
+++ b/Documentation/RCU/rculist_nulls.rst
@@ -18,7 +18,16 @@ to solve following problem.
Without 'nulls', a typical RCU linked list managing objects which are
allocated with SLAB_TYPESAFE_BY_RCU kmem_cache can use the following
-algorithms:
+algorithms. Following examples assume 'obj' is a pointer to such
+objects, which is having below type.
+
+::
+
+ struct object {
+ struct hlist_node obj_node;
+ atomic_t refcnt;
+ unsigned int key;
+ };
1) Lookup algorithm
-------------------
@@ -26,11 +35,13 @@ algorithms:
::
begin:
- rcu_read_lock()
+ rcu_read_lock();
obj = lockless_lookup(key);
if (obj) {
- if (!try_get_ref(obj)) // might fail for free objects
+ if (!try_get_ref(obj)) { // might fail for free objects
+ rcu_read_unlock();
goto begin;
+ }
/*
* Because a writer could delete object, and a writer could
* reuse these object before the RCU grace period, we
@@ -54,7 +65,7 @@ but a version with an additional memory barrier (smp_rmb())
struct hlist_node *node, *next;
for (pos = rcu_dereference((head)->first);
pos && ({ next = pos->next; smp_rmb(); prefetch(next); 1; }) &&
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; });
+ ({ obj = hlist_entry(pos, typeof(*obj), obj_node); 1; });
pos = rcu_dereference(next))
if (obj->key == key)
return obj;
@@ -66,10 +77,10 @@ And note the traditional hlist_for_each_entry_rcu() misses this smp_rmb()::
struct hlist_node *node;
for (pos = rcu_dereference((head)->first);
pos && ({ prefetch(pos->next); 1; }) &&
- ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; });
+ ({ obj = hlist_entry(pos, typeof(*obj), obj_node); 1; });
pos = rcu_dereference(pos->next))
- if (obj->key == key)
- return obj;
+ if (obj->key == key)
+ return obj;
return NULL;
Quoting Corey Minyard::
@@ -86,7 +97,7 @@ Quoting Corey Minyard::
2) Insertion algorithm
----------------------
-We need to make sure a reader cannot read the new 'obj->obj_next' value
+We need to make sure a reader cannot read the new 'obj->obj_node.next' value
and previous value of 'obj->key'. Otherwise, an item could be deleted
from a chain, and inserted into another chain. If new chain was empty
before the move, 'next' pointer is NULL, and lockless reader can not
@@ -129,8 +140,7 @@ very very fast (before the end of RCU grace period)
Avoiding extra smp_rmb()
========================
-With hlist_nulls we can avoid extra smp_rmb() in lockless_lookup()
-and extra _release() in insert function.
+With hlist_nulls we can avoid extra smp_rmb() in lockless_lookup().
For example, if we choose to store the slot number as the 'nulls'
end-of-list marker for each slot of the hash table, we can detect
@@ -142,6 +152,9 @@ the beginning. If the object was moved to the same chain,
then the reader doesn't care: It might occasionally
scan the list again without harm.
+Note that using hlist_nulls means the type of 'obj_node' field of
+'struct object' becomes 'struct hlist_nulls_node'.
+
1) lookup algorithm
-------------------
@@ -151,7 +164,7 @@ scan the list again without harm.
head = &table[slot];
begin:
rcu_read_lock();
- hlist_nulls_for_each_entry_rcu(obj, node, head, member) {
+ hlist_nulls_for_each_entry_rcu(obj, node, head, obj_node) {
if (obj->key == key) {
if (!try_get_ref(obj)) { // might fail for free objects
rcu_read_unlock();
@@ -182,6 +195,9 @@ scan the list again without harm.
2) Insert algorithm
-------------------
+Same to the above one, but uses hlist_nulls_add_head_rcu() instead of
+hlist_add_head_rcu().
+
::
/*
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 722b6eca2e93..e5d71ba4e6ae 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -553,7 +553,7 @@
others).
ccw_timeout_log [S390]
- See Documentation/s390/common_io.rst for details.
+ See Documentation/arch/s390/common_io.rst for details.
cgroup_disable= [KNL] Disable a particular controller or optional feature
Format: {name of the controller(s) or feature(s) to disable}
@@ -598,7 +598,7 @@
Setting checkreqprot to 1 is deprecated.
cio_ignore= [S390]
- See Documentation/s390/common_io.rst for details.
+ See Documentation/arch/s390/common_io.rst for details.
clearcpuid=X[,X...] [X86]
Disable CPUID feature X for the kernel. See
@@ -2938,6 +2938,10 @@
locktorture.torture_type= [KNL]
Specify the locking implementation to test.
+ locktorture.writer_fifo= [KNL]
+ Run the write-side locktorture kthreads at
+ sched_set_fifo() real-time priority.
+
locktorture.verbose= [KNL]
Enable additional printk() statements.
@@ -4949,6 +4953,15 @@
test until boot completes in order to avoid
interference.
+ rcuscale.kfree_by_call_rcu= [KNL]
+ In kernels built with CONFIG_RCU_LAZY=y, test
+ call_rcu() instead of kfree_rcu().
+
+ rcuscale.kfree_mult= [KNL]
+ Instead of allocating an object of size kfree_obj,
+ allocate one of kfree_mult * sizeof(kfree_obj).
+ Defaults to 1.
+
rcuscale.kfree_rcu_test= [KNL]
Set to measure performance of kfree_rcu() flooding.
@@ -4974,6 +4987,12 @@
Number of loops doing rcuscale.kfree_alloc_num number
of allocations and frees.
+ rcuscale.minruntime= [KNL]
+ Set the minimum test run time in seconds. This
+ does not affect the data-collection interval,
+ but instead allows better measurement of things
+ like CPU consumption.
+
rcuscale.nreaders= [KNL]
Set number of RCU readers. The value -1 selects
N, where N is the number of CPUs. A value
@@ -4988,7 +5007,7 @@
the same as for rcuscale.nreaders.
N, where N is the number of CPUs
- rcuscale.perf_type= [KNL]
+ rcuscale.scale_type= [KNL]
Specify the RCU implementation to test.
rcuscale.shutdown= [KNL]
@@ -5004,6 +5023,11 @@
in microseconds. The default of zero says
no holdoff.
+ rcuscale.writer_holdoff_jiffies= [KNL]
+ Additional write-side holdoff between grace
+ periods, but in jiffies. The default of zero
+ says no holdoff.
+
rcutorture.fqs_duration= [KNL]
Set duration of force_quiescent_state bursts
in microseconds.
@@ -5285,6 +5309,13 @@
number avoids disturbing real-time workloads,
but lengthens grace periods.
+ rcupdate.rcu_task_lazy_lim= [KNL]
+ Number of callbacks on a given CPU that will
+ cancel laziness on that CPU. Use -1 to disable
+ cancellation of laziness, but be advised that
+ doing so increases the danger of OOM due to
+ callback flooding.
+
rcupdate.rcu_task_stall_info= [KNL]
Set initial timeout in jiffies for RCU task stall
informational messages, which give some indication
@@ -5314,6 +5345,29 @@
A change in value does not take effect until
the beginning of the next grace period.
+ rcupdate.rcu_tasks_lazy_ms= [KNL]
+ Set timeout in milliseconds RCU Tasks asynchronous
+ callback batching for call_rcu_tasks().
+ A negative value will take the default. A value
+ of zero will disable batching. Batching is
+ always disabled for synchronize_rcu_tasks().
+
+ rcupdate.rcu_tasks_rude_lazy_ms= [KNL]
+ Set timeout in milliseconds RCU Tasks
+ Rude asynchronous callback batching for
+ call_rcu_tasks_rude(). A negative value
+ will take the default. A value of zero will
+ disable batching. Batching is always disabled
+ for synchronize_rcu_tasks_rude().
+
+ rcupdate.rcu_tasks_trace_lazy_ms= [KNL]
+ Set timeout in milliseconds RCU Tasks
+ Trace asynchronous callback batching for
+ call_rcu_tasks_trace(). A negative value
+ will take the default. A value of zero will
+ disable batching. Batching is always disabled
+ for synchronize_rcu_tasks_trace().
+
rcupdate.rcu_self_test= [KNL]
Run the RCU early boot self tests
@@ -5522,6 +5576,10 @@
Useful for devices that are detected asynchronously
(e.g. USB and MMC devices).
+ rootwait= [KNL] Maximum time (in seconds) to wait for root device
+ to show up before attempting to mount the root
+ filesystem.
+
rproc_mem=nn[KMG][@address]
[KNL,ARM,CMA] Remoteproc physical memory block.
Memory area to be used by remote processor image,
@@ -6275,10 +6333,6 @@
-1: disable all critical trip points in all thermal zones
<degrees C>: override all critical trip points
- thermal.nocrt= [HW,ACPI]
- Set to disable actions on ACPI thermal zone
- critical and hot trip points.
-
thermal.off= [HW,ACPI]
1: disable ACPI thermal control
@@ -6340,6 +6394,13 @@
This will guarantee that all the other pcrs
are saved.
+ tpm_tis.interrupts= [HW,TPM]
+ Enable interrupts for the MMIO based physical layer
+ for the FIFO interface. By default it is set to false
+ (0). For more information about TPM hardware interfaces
+ defined by Trusted Computing Group (TCG) see
+ https://trustedcomputinggroup.org/resource/pc-client-platform-tpm-profile-ptp-specification/
+
tp_printk [FTRACE]
Have the tracepoints sent to printk as well as the
tracing ring buffer. This is useful for early boot up
diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
index bedd3a1d7b42..e96f057ea2a0 100644
--- a/Documentation/arch/arm64/silicon-errata.rst
+++ b/Documentation/arch/arm64/silicon-errata.rst
@@ -63,6 +63,14 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #1902691 | ARM64_ERRATUM_1902691 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #2077057 | ARM64_ERRATUM_2077057 |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #2441009 | ARM64_ERRATUM_2441009 |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #2658417 | ARM64_ERRATUM_2658417 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
@@ -109,14 +117,6 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
+----------------+-----------------+-----------------+-----------------------------+
-| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
-+----------------+-----------------+-----------------+-----------------------------+
-| ARM | Cortex-A510 | #2077057 | ARM64_ERRATUM_2077057 |
-+----------------+-----------------+-----------------+-----------------------------+
-| ARM | Cortex-A510 | #2441009 | ARM64_ERRATUM_2441009 |
-+----------------+-----------------+-----------------+-----------------------------+
-| ARM | Cortex-A510 | #2658417 | ARM64_ERRATUM_2658417 |
-+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
@@ -198,6 +198,9 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
+| Hisilicon | Hip08 SMMU PMCG | #162001900 | N/A |
+| | Hip09 SMMU PMCG | | |
++----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
+----------------+-----------------+-----------------+-----------------------------+
diff --git a/Documentation/arch/arm64/sme.rst b/Documentation/arch/arm64/sme.rst
index ba529a1dc606..3d0e53ecac4f 100644
--- a/Documentation/arch/arm64/sme.rst
+++ b/Documentation/arch/arm64/sme.rst
@@ -322,7 +322,7 @@ The regset data starts with struct user_za_header, containing:
VL is supported.
* The size and layout of the payload depends on the header fields. The
- SME_PT_ZA_*() macros are provided to facilitate access to the data.
+ ZA_PT_ZA*() macros are provided to facilitate access to the data.
* In either case, for SETREGSET it is permissible to omit the payload, in which
case the vector length and flags are changed and PSTATE.ZA is set to 0
diff --git a/Documentation/arch/index.rst b/Documentation/arch/index.rst
index 8458b88e9b79..c9a209878cf3 100644
--- a/Documentation/arch/index.rst
+++ b/Documentation/arch/index.rst
@@ -21,7 +21,7 @@ implementation.
parisc/index
../powerpc/index
../riscv/index
- ../s390/index
+ s390/index
sh/index
sparc/index
x86/index
diff --git a/Documentation/s390/3270.ChangeLog b/Documentation/arch/s390/3270.ChangeLog
index ecaf60b6c381..ecaf60b6c381 100644
--- a/Documentation/s390/3270.ChangeLog
+++ b/Documentation/arch/s390/3270.ChangeLog
diff --git a/Documentation/s390/3270.rst b/Documentation/arch/s390/3270.rst
index e09e77954238..467eace91473 100644
--- a/Documentation/s390/3270.rst
+++ b/Documentation/arch/s390/3270.rst
@@ -116,7 +116,7 @@ Here are the installation steps in detail:
as a 3270, not a 3215.
5. Run the 3270 configuration script config3270. It is
- distributed in this same directory, Documentation/s390, as
+ distributed in this same directory, Documentation/arch/s390, as
config3270.sh. Inspect the output script it produces,
/tmp/mkdev3270, and then run that script. This will create the
necessary character special device files and make the necessary
@@ -125,7 +125,7 @@ Here are the installation steps in detail:
Then notify /sbin/init that /etc/inittab has changed, by issuing
the telinit command with the q operand::
- cd Documentation/s390
+ cd Documentation/arch/s390
sh config3270.sh
sh /tmp/mkdev3270
telinit q
diff --git a/Documentation/s390/cds.rst b/Documentation/arch/s390/cds.rst
index 7006d8209d2e..bcad2a14244a 100644
--- a/Documentation/s390/cds.rst
+++ b/Documentation/arch/s390/cds.rst
@@ -39,7 +39,7 @@ some of them are ESA/390 platform specific.
Note:
In order to write a driver for S/390, you also need to look into the interface
- described in Documentation/s390/driver-model.rst.
+ described in Documentation/arch/s390/driver-model.rst.
Note for porting drivers from 2.4:
diff --git a/Documentation/s390/common_io.rst b/Documentation/arch/s390/common_io.rst
index 846485681ce7..6dcb40cb7145 100644
--- a/Documentation/s390/common_io.rst
+++ b/Documentation/arch/s390/common_io.rst
@@ -136,5 +136,5 @@ debugfs entries
The level of logging can be changed to be more or less verbose by piping to
/sys/kernel/debug/s390dbf/cio_*/level a number between 0 and 6; see the
- documentation on the S/390 debug feature (Documentation/s390/s390dbf.rst)
+ documentation on the S/390 debug feature (Documentation/arch/s390/s390dbf.rst)
for details.
diff --git a/Documentation/s390/config3270.sh b/Documentation/arch/s390/config3270.sh
index 515e2f431487..515e2f431487 100644
--- a/Documentation/s390/config3270.sh
+++ b/Documentation/arch/s390/config3270.sh
diff --git a/Documentation/s390/driver-model.rst b/Documentation/arch/s390/driver-model.rst
index ad4bc2dbea43..ad4bc2dbea43 100644
--- a/Documentation/s390/driver-model.rst
+++ b/Documentation/arch/s390/driver-model.rst
diff --git a/Documentation/s390/features.rst b/Documentation/arch/s390/features.rst
index 57c296a9d8f3..57c296a9d8f3 100644
--- a/Documentation/s390/features.rst
+++ b/Documentation/arch/s390/features.rst
diff --git a/Documentation/s390/index.rst b/Documentation/arch/s390/index.rst
index 73c79bf586fd..73c79bf586fd 100644
--- a/Documentation/s390/index.rst
+++ b/Documentation/arch/s390/index.rst
diff --git a/Documentation/s390/monreader.rst b/Documentation/arch/s390/monreader.rst
index 21cdfb699b49..21cdfb699b49 100644
--- a/Documentation/s390/monreader.rst
+++ b/Documentation/arch/s390/monreader.rst
diff --git a/Documentation/s390/pci.rst b/Documentation/arch/s390/pci.rst
index a1a72a47dc96..d5755484d8e7 100644
--- a/Documentation/s390/pci.rst
+++ b/Documentation/arch/s390/pci.rst
@@ -40,7 +40,7 @@ For example:
Change the level of logging to be more or less verbose by piping
a number between 0 and 6 to /sys/kernel/debug/s390dbf/pci_*/level. For
details, see the documentation on the S/390 debug feature at
- Documentation/s390/s390dbf.rst.
+ Documentation/arch/s390/s390dbf.rst.
Sysfs entries
=============
diff --git a/Documentation/s390/qeth.rst b/Documentation/arch/s390/qeth.rst
index f02fdaa68de0..f02fdaa68de0 100644
--- a/Documentation/s390/qeth.rst
+++ b/Documentation/arch/s390/qeth.rst
diff --git a/Documentation/s390/s390dbf.rst b/Documentation/arch/s390/s390dbf.rst
index af8bdc3629e7..af8bdc3629e7 100644
--- a/Documentation/s390/s390dbf.rst
+++ b/Documentation/arch/s390/s390dbf.rst
diff --git a/Documentation/s390/text_files.rst b/Documentation/arch/s390/text_files.rst
index c94d05d4fa17..c94d05d4fa17 100644
--- a/Documentation/s390/text_files.rst
+++ b/Documentation/arch/s390/text_files.rst
diff --git a/Documentation/s390/vfio-ap-locking.rst b/Documentation/arch/s390/vfio-ap-locking.rst
index 0dfcdb562e21..0dfcdb562e21 100644
--- a/Documentation/s390/vfio-ap-locking.rst
+++ b/Documentation/arch/s390/vfio-ap-locking.rst
diff --git a/Documentation/s390/vfio-ap.rst b/Documentation/arch/s390/vfio-ap.rst
index bb3f4c4e2885..bb3f4c4e2885 100644
--- a/Documentation/s390/vfio-ap.rst
+++ b/Documentation/arch/s390/vfio-ap.rst
diff --git a/Documentation/s390/vfio-ccw.rst b/Documentation/arch/s390/vfio-ccw.rst
index 37026fa18179..42960b7b0d70 100644
--- a/Documentation/s390/vfio-ccw.rst
+++ b/Documentation/arch/s390/vfio-ccw.rst
@@ -440,6 +440,6 @@ Reference
1. ESA/s390 Principles of Operation manual (IBM Form. No. SA22-7832)
2. ESA/390 Common I/O Device Commands manual (IBM Form. No. SA22-7204)
3. https://en.wikipedia.org/wiki/Channel_I/O
-4. Documentation/s390/cds.rst
+4. Documentation/arch/s390/cds.rst
5. Documentation/driver-api/vfio.rst
6. Documentation/driver-api/vfio-mediated-device.rst
diff --git a/Documentation/s390/zfcpdump.rst b/Documentation/arch/s390/zfcpdump.rst
index a61de7aa8778..a61de7aa8778 100644
--- a/Documentation/s390/zfcpdump.rst
+++ b/Documentation/arch/s390/zfcpdump.rst
diff --git a/Documentation/arch/x86/boot.rst b/Documentation/arch/x86/boot.rst
index 33520ecdb37a..cdbca15a4fc2 100644
--- a/Documentation/arch/x86/boot.rst
+++ b/Documentation/arch/x86/boot.rst
@@ -1417,7 +1417,7 @@ execution context provided by the EFI firmware.
The function prototype for the handover entry point looks like this::
- efi_main(void *handle, efi_system_table_t *table, struct boot_params *bp)
+ efi_stub_entry(void *handle, efi_system_table_t *table, struct boot_params *bp)
'handle' is the EFI image handle passed to the boot loader by the EFI
firmware, 'table' is the EFI system table - these are the first two
diff --git a/Documentation/core-api/cpu_hotplug.rst b/Documentation/core-api/cpu_hotplug.rst
index e6f5bc39cf5c..b9ae591d0b18 100644
--- a/Documentation/core-api/cpu_hotplug.rst
+++ b/Documentation/core-api/cpu_hotplug.rst
@@ -395,8 +395,8 @@ multi-instance state the following function is available:
* cpuhp_setup_state_multi(state, name, startup, teardown)
The @state argument is either a statically allocated state or one of the
-constants for dynamically allocated states - CPUHP_PREPARE_DYN,
-CPUHP_ONLINE_DYN - depending on the state section (PREPARE, ONLINE) for
+constants for dynamically allocated states - CPUHP_BP_PREPARE_DYN,
+CPUHP_AP_ONLINE_DYN - depending on the state section (PREPARE, ONLINE) for
which a dynamic state should be allocated.
The @name argument is used for sysfs output and for instrumentation. The
@@ -588,7 +588,7 @@ notifications on online and offline operations::
Setup and teardown a dynamically allocated state in the ONLINE section
for notifications on offline operations::
- state = cpuhp_setup_state(CPUHP_ONLINE_DYN, "subsys:offline", NULL, subsys_cpu_offline);
+ state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "subsys:offline", NULL, subsys_cpu_offline);
if (state < 0)
return state;
....
@@ -597,7 +597,7 @@ for notifications on offline operations::
Setup and teardown a dynamically allocated state in the ONLINE section
for notifications on online operations without invoking the callbacks::
- state = cpuhp_setup_state_nocalls(CPUHP_ONLINE_DYN, "subsys:online", subsys_cpu_online, NULL);
+ state = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "subsys:online", subsys_cpu_online, NULL);
if (state < 0)
return state;
....
@@ -606,7 +606,7 @@ for notifications on online operations without invoking the callbacks::
Setup, use and teardown a dynamically allocated multi-instance state in the
ONLINE section for notifications on online and offline operation::
- state = cpuhp_setup_state_multi(CPUHP_ONLINE_DYN, "subsys:online", subsys_cpu_online, subsys_cpu_offline);
+ state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "subsys:online", subsys_cpu_online, subsys_cpu_offline);
if (state < 0)
return state;
....
diff --git a/Documentation/core-api/netlink.rst b/Documentation/core-api/netlink.rst
index e4a938a05cc9..9f692b02bfe6 100644
--- a/Documentation/core-api/netlink.rst
+++ b/Documentation/core-api/netlink.rst
@@ -67,10 +67,11 @@ Globals
kernel-policy
~~~~~~~~~~~~~
-Defines if the kernel validation policy is per operation (``per-op``)
-or for the entire family (``global``). New families should use ``per-op``
-(default) to be able to narrow down the attributes accepted by a specific
-command.
+Defines whether the kernel validation policy is ``global`` i.e. the same for all
+operations of the family, defined for each operation individually - ``per-op``,
+or separately for each operation and operation type (do vs dump) - ``split``.
+New families should use ``per-op`` (default) to be able to narrow down the
+attributes accepted by a specific command.
checks
------
diff --git a/Documentation/dev-tools/kunit/run_wrapper.rst b/Documentation/dev-tools/kunit/run_wrapper.rst
index dafe8eb28d30..19ddf5e07013 100644
--- a/Documentation/dev-tools/kunit/run_wrapper.rst
+++ b/Documentation/dev-tools/kunit/run_wrapper.rst
@@ -321,3 +321,15 @@ command line arguments:
- ``--json``: If set, stores the test results in a JSON format and prints to `stdout` or
saves to a file if a filename is specified.
+
+- ``--filter``: Specifies filters on test attributes, for example, ``speed!=slow``.
+ Multiple filters can be used by wrapping input in quotes and separating filters
+ by commas. Example: ``--filter "speed>slow, module=example"``.
+
+- ``--filter_action``: If set to ``skip``, filtered tests will be shown as skipped
+ in the output rather than showing no output.
+
+- ``--list_tests``: If set, lists all tests that will be run.
+
+- ``--list_tests_attr``: If set, lists all tests that will be run and all of their
+ attributes.
diff --git a/Documentation/dev-tools/kunit/running_tips.rst b/Documentation/dev-tools/kunit/running_tips.rst
index 8e8c493f17d1..766f9cdea0fa 100644
--- a/Documentation/dev-tools/kunit/running_tips.rst
+++ b/Documentation/dev-tools/kunit/running_tips.rst
@@ -262,3 +262,169 @@ other code executed during boot, e.g.
# Reset coverage counters before running the test.
$ echo 0 > /sys/kernel/debug/gcov/reset
$ modprobe kunit-example-test
+
+
+Test Attributes and Filtering
+=============================
+
+Test suites and cases can be marked with test attributes, such as speed of
+test. These attributes will later be printed in test output and can be used to
+filter test execution.
+
+Marking Test Attributes
+-----------------------
+
+Tests are marked with an attribute by including a ``kunit_attributes`` object
+in the test definition.
+
+Test cases can be marked using the ``KUNIT_CASE_ATTR(test_name, attributes)``
+macro to define the test case instead of ``KUNIT_CASE(test_name)``.
+
+.. code-block:: c
+
+ static const struct kunit_attributes example_attr = {
+ .speed = KUNIT_VERY_SLOW,
+ };
+
+ static struct kunit_case example_test_cases[] = {
+ KUNIT_CASE_ATTR(example_test, example_attr),
+ };
+
+.. note::
+ To mark a test case as slow, you can also use ``KUNIT_CASE_SLOW(test_name)``.
+ This is a helpful macro as the slow attribute is the most commonly used.
+
+Test suites can be marked with an attribute by setting the "attr" field in the
+suite definition.
+
+.. code-block:: c
+
+ static const struct kunit_attributes example_attr = {
+ .speed = KUNIT_VERY_SLOW,
+ };
+
+ static struct kunit_suite example_test_suite = {
+ ...,
+ .attr = example_attr,
+ };
+
+.. note::
+ Not all attributes need to be set in a ``kunit_attributes`` object. Unset
+ attributes will remain uninitialized and act as though the attribute is set
+ to 0 or NULL. Thus, if an attribute is set to 0, it is treated as unset.
+ These unset attributes will not be reported and may act as a default value
+ for filtering purposes.
+
+Reporting Attributes
+--------------------
+
+When a user runs tests, attributes will be present in the raw kernel output (in
+KTAP format). Note that attributes will be hidden by default in kunit.py output
+for all passing tests but the raw kernel output can be accessed using the
+``--raw_output`` flag. This is an example of how test attributes for test cases
+will be formatted in kernel output:
+
+.. code-block:: none
+
+ # example_test.speed: slow
+ ok 1 example_test
+
+This is an example of how test attributes for test suites will be formatted in
+kernel output:
+
+.. code-block:: none
+
+ KTAP version 2
+ # Subtest: example_suite
+ # module: kunit_example_test
+ 1..3
+ ...
+ ok 1 example_suite
+
+Additionally, users can output a full attribute report of tests with their
+attributes, using the command line flag ``--list_tests_attr``:
+
+.. code-block:: bash
+
+ kunit.py run "example" --list_tests_attr
+
+.. note::
+ This report can be accessed when running KUnit manually by passing in the
+ module_param ``kunit.action=list_attr``.
+
+Filtering
+---------
+
+Users can filter tests using the ``--filter`` command line flag when running
+tests. As an example:
+
+.. code-block:: bash
+
+ kunit.py run --filter speed=slow
+
+
+You can also use the following operations on filters: "<", ">", "<=", ">=",
+"!=", and "=". Example:
+
+.. code-block:: bash
+
+ kunit.py run --filter "speed>slow"
+
+This example will run all tests with speeds faster than slow. Note that the
+characters < and > are often interpreted by the shell, so they may need to be
+quoted or escaped, as above.
+
+Additionally, you can use multiple filters at once. Simply separate filters
+using commas. Example:
+
+.. code-block:: bash
+
+ kunit.py run --filter "speed>slow, module=kunit_example_test"
+
+.. note::
+ You can use this filtering feature when running KUnit manually by passing
+ the filter as a module param: ``kunit.filter="speed>slow, speed<=normal"``.
+
+Filtered tests will not run or show up in the test output. You can use the
+``--filter_action=skip`` flag to skip filtered tests instead. These tests will be
+shown in the test output in the test but will not run. To use this feature when
+running KUnit manually, use the module param ``kunit.filter_action=skip``.
+
+Rules of Filtering Procedure
+----------------------------
+
+Since both suites and test cases can have attributes, there may be conflicts
+between attributes during filtering. The process of filtering follows these
+rules:
+
+- Filtering always operates at a per-test level.
+
+- If a test has an attribute set, then the test's value is filtered on.
+
+- Otherwise, the value falls back to the suite's value.
+
+- If neither are set, the attribute has a global "default" value, which is used.
+
+List of Current Attributes
+--------------------------
+
+``speed``
+
+This attribute indicates the speed of a test's execution (how slow or fast the
+test is).
+
+This attribute is saved as an enum with the following categories: "normal",
+"slow", or "very_slow". The assumed default speed for tests is "normal". This
+indicates that the test takes a relatively trivial amount of time (less than
+1 second), regardless of the machine it is running on. Any test slower than
+this could be marked as "slow" or "very_slow".
+
+The macro ``KUNIT_CASE_SLOW(test_name)`` can be easily used to set the speed
+of a test case to "slow".
+
+``module``
+
+This attribute indicates the name of the module associated with the test.
+
+This attribute is automatically saved as a string and is printed for each suite.
+Tests can also be filtered using this attribute.
diff --git a/Documentation/devicetree/bindings/arm/pmu.yaml b/Documentation/devicetree/bindings/arm/pmu.yaml
index e14358bf0b9c..99b5e9530707 100644
--- a/Documentation/devicetree/bindings/arm/pmu.yaml
+++ b/Documentation/devicetree/bindings/arm/pmu.yaml
@@ -49,9 +49,14 @@ properties:
- arm,cortex-a77-pmu
- arm,cortex-a78-pmu
- arm,cortex-a510-pmu
+ - arm,cortex-a520-pmu
- arm,cortex-a710-pmu
+ - arm,cortex-a715-pmu
+ - arm,cortex-a720-pmu
- arm,cortex-x1-pmu
- arm,cortex-x2-pmu
+ - arm,cortex-x3-pmu
+ - arm,cortex-x4-pmu
- arm,neoverse-e1-pmu
- arm,neoverse-n1-pmu
- arm,neoverse-n2-pmu
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
index a6b3bb8fdf33..c1d225fcf2d5 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
@@ -49,6 +49,7 @@ properties:
- description: Frequency domain 0 register region
- description: Frequency domain 1 register region
- description: Frequency domain 2 register region
+ - description: Frequency domain 3 register region
reg-names:
minItems: 1
@@ -56,6 +57,7 @@ properties:
- const: freq-domain0
- const: freq-domain1
- const: freq-domain2
+ - const: freq-domain3
clocks:
items:
@@ -69,7 +71,7 @@ properties:
interrupts:
minItems: 1
- maxItems: 3
+ maxItems: 4
interrupt-names:
minItems: 1
@@ -77,6 +79,7 @@ properties:
- const: dcvsh-irq-0
- const: dcvsh-irq-1
- const: dcvsh-irq-2
+ - const: dcvsh-irq-3
'#freq-domain-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
deleted file mode 100644
index 1758051798fe..000000000000
--- a/Documentation/devicetree/bindings/cpufreq/ti-cpufreq.txt
+++ /dev/null
@@ -1,132 +0,0 @@
-TI CPUFreq and OPP bindings
-================================
-
-Certain TI SoCs, like those in the am335x, am437x, am57xx, and dra7xx
-families support different OPPs depending on the silicon variant in use.
-The ti-cpufreq driver can use revision and an efuse value from the SoC to
-provide the OPP framework with supported hardware information. This is
-used to determine which OPPs from the operating-points-v2 table get enabled
-when it is parsed by the OPP framework.
-
-Required properties:
---------------------
-In 'cpus' nodes:
-- operating-points-v2: Phandle to the operating-points-v2 table to use.
-
-In 'operating-points-v2' table:
-- compatible: Should be
- - 'operating-points-v2-ti-cpu' for am335x, am43xx, and dra7xx/am57xx,
- omap34xx, omap36xx and am3517 SoCs
-- syscon: A phandle pointing to a syscon node representing the control module
- register space of the SoC.
-
-Optional properties:
---------------------
-- "vdd-supply", "vbb-supply": to define two regulators for dra7xx
-- "cpu0-supply", "vbb-supply": to define two regulators for omap36xx
-
-For each opp entry in 'operating-points-v2' table:
-- opp-supported-hw: Two bitfields indicating:
- 1. Which revision of the SoC the OPP is supported by
- 2. Which eFuse bits indicate this OPP is available
-
- A bitwise AND is performed against these values and if any bit
- matches, the OPP gets enabled.
-
-Example:
---------
-
-/* From arch/arm/boot/dts/am33xx.dtsi */
-cpus {
- #address-cells = <1>;
- #size-cells = <0>;
- cpu@0 {
- compatible = "arm,cortex-a8";
- device_type = "cpu";
- reg = <0>;
-
- operating-points-v2 = <&cpu0_opp_table>;
-
- clocks = <&dpll_mpu_ck>;
- clock-names = "cpu";
-
- clock-latency = <300000>; /* From omap-cpufreq driver */
- };
-};
-
-/*
- * cpu0 has different OPPs depending on SoC revision and some on revisions
- * 0x2 and 0x4 have eFuse bits that indicate if they are available or not
- */
-cpu0_opp_table: opp-table {
- compatible = "operating-points-v2-ti-cpu";
- syscon = <&scm_conf>;
-
- /*
- * The three following nodes are marked with opp-suspend
- * because they can not be enabled simultaneously on a
- * single SoC.
- */
- opp50-300000000 {
- opp-hz = /bits/ 64 <300000000>;
- opp-microvolt = <950000 931000 969000>;
- opp-supported-hw = <0x06 0x0010>;
- opp-suspend;
- };
-
- opp100-275000000 {
- opp-hz = /bits/ 64 <275000000>;
- opp-microvolt = <1100000 1078000 1122000>;
- opp-supported-hw = <0x01 0x00FF>;
- opp-suspend;
- };
-
- opp100-300000000 {
- opp-hz = /bits/ 64 <300000000>;
- opp-microvolt = <1100000 1078000 1122000>;
- opp-supported-hw = <0x06 0x0020>;
- opp-suspend;
- };
-
- opp100-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <1100000 1078000 1122000>;
- opp-supported-hw = <0x01 0xFFFF>;
- };
-
- opp100-600000000 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <1100000 1078000 1122000>;
- opp-supported-hw = <0x06 0x0040>;
- };
-
- opp120-600000000 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <1200000 1176000 1224000>;
- opp-supported-hw = <0x01 0xFFFF>;
- };
-
- opp120-720000000 {
- opp-hz = /bits/ 64 <720000000>;
- opp-microvolt = <1200000 1176000 1224000>;
- opp-supported-hw = <0x06 0x0080>;
- };
-
- oppturbo-720000000 {
- opp-hz = /bits/ 64 <720000000>;
- opp-microvolt = <1260000 1234800 1285200>;
- opp-supported-hw = <0x01 0xFFFF>;
- };
-
- oppturbo-800000000 {
- opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <1260000 1234800 1285200>;
- opp-supported-hw = <0x06 0x0100>;
- };
-
- oppnitro-1000000000 {
- opp-hz = /bits/ 64 <1000000000>;
- opp-microvolt = <1325000 1298500 1351500>;
- opp-supported-hw = <0x04 0x0200>;
- };
-};
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml b/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml
index b767ec72a999..ac480765cde0 100644
--- a/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml
+++ b/Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml
@@ -20,6 +20,7 @@ properties:
- stericsson,ux500-hash
- st,stm32f456-hash
- st,stm32f756-hash
+ - st,stm32mp13-hash
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/gpio/adi,ds4520-gpio.yaml b/Documentation/devicetree/bindings/gpio/adi,ds4520-gpio.yaml
new file mode 100644
index 000000000000..25b3198c4d3e
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/adi,ds4520-gpio.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/adi,ds4520-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: DS4520 I2C GPIO expander
+
+maintainers:
+ - Okan Sahin <okan.sahin@analog.com>
+
+properties:
+ compatible:
+ enum:
+ - adi,ds4520-gpio
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+ ngpios:
+ minimum: 1
+ maximum: 9
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - "#gpio-cells"
+ - ngpios
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio@50 {
+ compatible = "adi,ds4520-gpio";
+ reg = <0x50>;
+ ngpios = <9>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt b/Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
deleted file mode 100644
index 4a63bc96b687..000000000000
--- a/Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-Broadcom Kona Family GPIO
-=========================
-
-This GPIO driver is used in the following Broadcom SoCs:
- BCM11130, BCM11140, BCM11351, BCM28145, BCM28155
-
-The Broadcom GPIO Controller IP can be configured prior to synthesis to
-support up to 8 banks of 32 GPIOs where each bank has its own IRQ. The
-GPIO controller only supports edge, not level, triggering of interrupts.
-
-Required properties
--------------------
-
-- compatible: "brcm,bcm11351-gpio", "brcm,kona-gpio"
-- reg: Physical base address and length of the controller's registers.
-- interrupts: The interrupt outputs from the controller. There is one GPIO
- interrupt per GPIO bank. The number of interrupts listed depends on the
- number of GPIO banks on the SoC. The interrupts must be ordered by bank,
- starting with bank 0. There is always a 1:1 mapping between banks and
- IRQs.
-- #gpio-cells: Should be <2>. The first cell is the pin number, the second
- cell is used to specify optional parameters:
- - bit 0 specifies polarity (0 for normal, 1 for inverted)
- See also "gpio-specifier" in .../devicetree/bindings/gpio/gpio.txt.
-- #interrupt-cells: Should be <2>. The first cell is the GPIO number. The
- second cell is used to specify flags. The following subset of flags is
- supported:
- - trigger type (bits[1:0]):
- 1 = low-to-high edge triggered.
- 2 = high-to-low edge triggered.
- 3 = low-to-high or high-to-low edge triggered
- Valid values are 1, 2, 3
- See also .../devicetree/bindings/interrupt-controller/interrupts.txt.
-- gpio-controller: Marks the device node as a GPIO controller.
-- interrupt-controller: Marks the device node as an interrupt controller.
-
-Example:
- gpio: gpio@35003000 {
- compatible = "brcm,bcm11351-gpio", "brcm,kona-gpio";
- reg = <0x35003000 0x800>;
- interrupts =
- <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH
- GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH
- GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH
- GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
- GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH
- GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- gpio-controller;
- interrupt-controller;
- };
diff --git a/Documentation/devicetree/bindings/gpio/brcm,kona-gpio.yaml b/Documentation/devicetree/bindings/gpio/brcm,kona-gpio.yaml
new file mode 100644
index 000000000000..296fdd6b8f38
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/brcm,kona-gpio.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/brcm,kona-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Kona family GPIO controller
+
+description:
+ The Broadcom GPIO Controller IP can be configured prior to synthesis to
+ support up to 8 banks of 32 GPIOs where each bank has its own IRQ. The
+ GPIO controller only supports edge, not level, triggering of interrupts.
+
+maintainers:
+ - Ray Jui <rjui@broadcom.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - brcm,bcm11351-gpio
+ - brcm,bcm21664-gpio
+ - brcm,bcm23550-gpio
+ - const: brcm,kona-gpio
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 4
+ maxItems: 6
+ description:
+ The interrupt outputs from the controller. There is one GPIO interrupt
+ per GPIO bank. The number of interrupts listed depends on the number of
+ GPIO banks on the SoC. The interrupts must be ordered by bank, starting
+ with bank 0. There is always a 1:1 mapping between banks and IRQs.
+
+ '#gpio-cells':
+ const: 2
+
+ '#interrupt-cells':
+ const: 2
+
+ gpio-controller: true
+
+ interrupt-controller: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - '#gpio-cells'
+ - '#interrupt-cells'
+ - gpio-controller
+ - interrupt-controller
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: brcm,bcm11351-gpio
+ then:
+ properties:
+ interrupts:
+ minItems: 6
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,bcm21664-gpio
+ - brcm,bcm23550-gpio
+ then:
+ properties:
+ interrupts:
+ maxItems: 4
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ gpio@35003000 {
+ compatible = "brcm,bcm11351-gpio", "brcm,kona-gpio";
+ reg = <0x35003000 0x800>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ };
+...
diff --git a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml
index ae18603697d7..d0ca2af89f1e 100644
--- a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.yaml
@@ -32,10 +32,12 @@ properties:
- fsl,imx6sx-gpio
- fsl,imx6ul-gpio
- fsl,imx7d-gpio
+ - fsl,imx8dxl-gpio
- fsl,imx8mm-gpio
- fsl,imx8mn-gpio
- fsl,imx8mp-gpio
- fsl,imx8mq-gpio
+ - fsl,imx8qm-gpio
- fsl,imx8qxp-gpio
- fsl,imxrt1050-gpio
- fsl,imxrt1170-gpio
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml b/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
index fa116148ee90..99febb8ea1b6 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca95xx.yaml
@@ -66,6 +66,7 @@ properties:
- ti,tca6408
- ti,tca6416
- ti,tca6424
+ - ti,tca9538
- ti,tca9539
- ti,tca9554
diff --git a/Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml b/Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml
index b391cc1b4590..209f03bba0a7 100644
--- a/Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml
@@ -61,6 +61,10 @@ patternProperties:
'#gpio-cells':
const: 2
+ gpio-line-names:
+ minItems: 1
+ maxItems: 32
+
ngpios:
default: 32
minimum: 1
diff --git a/Documentation/devicetree/bindings/gpio/st,stmpe-gpio.yaml b/Documentation/devicetree/bindings/gpio/st,stmpe-gpio.yaml
index 22c0cae73425..4555f1644a4d 100644
--- a/Documentation/devicetree/bindings/gpio/st,stmpe-gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/st,stmpe-gpio.yaml
@@ -28,6 +28,10 @@ properties:
gpio-controller: true
+ gpio-line-names:
+ minItems: 1
+ maxItems: 24
+
interrupt-controller: true
st,norequest-mask:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml
index e84e4f33b358..3d06db98e978 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.yaml
@@ -35,6 +35,7 @@ properties:
- amlogic,meson-sm1-gpio-intc
- amlogic,meson-a1-gpio-intc
- amlogic,meson-s4-gpio-intc
+ - amlogic,c3-gpio-intc
- const: amlogic,meson-gpio-intc
reg:
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml b/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml
index a6c19a6cc99e..3e99801f77d2 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml
@@ -160,6 +160,12 @@ properties:
description:
The MIO bank number in which the command and data lines are configured.
+ iommus:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
dependencies:
'#clock-cells': [ clock-output-names ]
diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
index 86c73fd825fd..58ae298cd2fc 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
@@ -269,7 +269,7 @@ properties:
post-power-on-delay-ms:
description:
It was invented for MMC pwrseq-simple which could be referred to
- mmc-pwrseq-simple.txt. But now it\'s reused as a tunable delay
+ mmc-pwrseq-simple.yaml. But now it\'s reused as a tunable delay
waiting for I/O signalling and card power supply to be stable,
regardless of whether pwrseq-simple is used. Default to 10ms if
no available.
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.yaml b/Documentation/devicetree/bindings/mmc/mtk-sd.yaml
index 46eefdd19a2c..3fffa467e4e1 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.yaml
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.yaml
@@ -91,16 +91,6 @@ properties:
should switch dat1 pin to GPIO mode.
maxItems: 1
- assigned-clocks:
- description:
- PLL of the source clock.
- maxItems: 1
-
- assigned-clock-parents:
- description:
- parent of source clock, used for HS400 mode to get 400Mhz source clock.
- maxItems: 1
-
hs400-ds-delay:
$ref: /schemas/types.yaml#/definitions/uint32
description:
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt b/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
index 69edfd4d3922..a9fb0a91245f 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-atmel.txt
@@ -5,11 +5,13 @@ Documentation/devicetree/bindings/mmc/mmc.txt and the properties used by the
sdhci-of-at91 driver.
Required properties:
-- compatible: Must be "atmel,sama5d2-sdhci" or "microchip,sam9x60-sdhci".
+- compatible: Must be "atmel,sama5d2-sdhci" or "microchip,sam9x60-sdhci"
+ or "microchip,sam9x7-sdhci", "microchip,sam9x60-sdhci".
- clocks: Phandlers to the clocks.
- clock-names: Must be "hclock", "multclk", "baseclk" for
"atmel,sama5d2-sdhci".
Must be "hclock", "multclk" for "microchip,sam9x60-sdhci".
+ Must be "hclock", "multclk" for "microchip,sam9x7-sdhci".
Optional properties:
- assigned-clocks: The same with "multclk".
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index 33726134f5c9..6ec0c181b6db 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -20,7 +20,7 @@ which is at a different MDIO base address in different switch families.
6171, 6172, 6175, 6176, 6185, 6240, 6320, 6321,
6341, 6350, 6351, 6352
- "marvell,mv88e6190" : Switch has base address 0x00. Use with models:
- 6163, 6190, 6190X, 6191, 6290, 6390, 6390X
+ 6190, 6190X, 6191, 6290, 6361, 6390, 6390X
- "marvell,mv88e6250" : Switch has base address 0x08 or 0x18. Use with model:
6220, 6250
diff --git a/Documentation/devicetree/bindings/net/ti,icss-iep.yaml b/Documentation/devicetree/bindings/net/ti,icss-iep.yaml
new file mode 100644
index 000000000000..f5c22d6dcaee
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ti,icss-iep.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/ti,icss-iep.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments ICSS Industrial Ethernet Peripheral (IEP) module
+
+maintainers:
+ - Md Danish Anwar <danishanwar@ti.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - ti,am642-icss-iep
+ - ti,j721e-icss-iep
+ - const: ti,am654-icss-iep
+
+ - const: ti,am654-icss-iep
+
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+ description: phandle to the IEP source clock
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ /* AM65x */
+ icssg0_iep0: iep@2e000 {
+ compatible = "ti,am654-icss-iep";
+ reg = <0x2e000 0x1000>;
+ clocks = <&icssg0_iepclk_mux>;
+ };
diff --git a/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml b/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml
index 8ec30b3eb760..311c570165f9 100644
--- a/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml
+++ b/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml
@@ -52,6 +52,14 @@ properties:
description:
phandle to MII_RT module's syscon regmap
+ ti,iep:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 2
+ items:
+ maxItems: 1
+ description:
+ phandle to IEP (Industrial Ethernet Peripheral) for ICSSG
+
interrupts:
maxItems: 2
description:
@@ -155,6 +163,7 @@ examples:
"tx1-0", "tx1-1", "tx1-2", "tx1-3",
"rx0", "rx1";
ti,mii-g-rt = <&icssg2_mii_g_rt>;
+ ti,iep = <&icssg2_iep0>, <&icssg2_iep1>;
interrupt-parent = <&icssg2_intc>;
interrupts = <24 0 2>, <25 1 3>;
interrupt-names = "tx_ts0", "tx_ts1";
diff --git a/Documentation/devicetree/bindings/net/xilinx_gmii2rgmii.txt b/Documentation/devicetree/bindings/net/xilinx_gmii2rgmii.txt
deleted file mode 100644
index 038dda48b8e6..000000000000
--- a/Documentation/devicetree/bindings/net/xilinx_gmii2rgmii.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-XILINX GMIITORGMII Converter Driver Device Tree Bindings
---------------------------------------------------------
-
-The Gigabit Media Independent Interface (GMII) to Reduced Gigabit Media
-Independent Interface (RGMII) core provides the RGMII between RGMII-compliant
-Ethernet physical media devices (PHY) and the Gigabit Ethernet controller.
-This core can be used in all three modes of operation(10/100/1000 Mb/s).
-The Management Data Input/Output (MDIO) interface is used to configure the
-Speed of operation. This core can switch dynamically between the three
-Different speed modes by configuring the conveter register through mdio write.
-
-This converter sits between the ethernet MAC and the external phy.
-MAC <==> GMII2RGMII <==> RGMII_PHY
-
-For more details about mdio please refer phy.txt file in the same directory.
-
-Required properties:
-- compatible : Should be "xlnx,gmii-to-rgmii-1.0"
-- reg : The ID number for the phy, usually a small integer
-- phy-handle : Should point to the external phy device.
- See ethernet.txt file in the same directory.
-
-Example:
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- phy: ethernet-phy@0 {
- ......
- };
- gmiitorgmii: gmiitorgmii@8 {
- compatible = "xlnx,gmii-to-rgmii-1.0";
- reg = <8>;
- phy-handle = <&phy>;
- };
- };
diff --git a/Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml b/Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml
new file mode 100644
index 000000000000..0f781dac6717
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/xlnx,gmii-to-rgmii.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx GMII to RGMII Converter
+
+maintainers:
+ - Harini Katakam <harini.katakam@amd.com>
+
+description:
+ The Gigabit Media Independent Interface (GMII) to Reduced Gigabit Media
+ Independent Interface (RGMII) core provides the RGMII between RGMII-compliant
+ ethernet physical media devices (PHY) and the Gigabit Ethernet controller.
+ This core can be used in all three modes of operation(10/100/1000 Mb/s).
+ The Management Data Input/Output (MDIO) interface is used to configure the
+ speed of operation. This core can switch dynamically between the three
+ different speed modes by configuring the converter register through mdio write.
+ The core cannot function without an external phy connected to it.
+
+properties:
+ compatible:
+ const: xlnx,gmii-to-rgmii-1.0
+
+ reg:
+ minimum: 0
+ maximum: 31
+ description: The ID number for the phy.
+
+ phy-handle:
+ $ref: ethernet-controller.yaml#/properties/phy-handle
+
+required:
+ - compatible
+ - reg
+ - phy-handle
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy: ethernet-phy@0 {
+ reg = <0>;
+ };
+ gmiitorgmii@8 {
+ compatible = "xlnx,gmii-to-rgmii-1.0";
+ reg = <8>;
+ phy-handle = <&phy>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml b/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml
new file mode 100644
index 000000000000..02d1d2c17129
--- /dev/null
+++ b/Documentation/devicetree/bindings/opp/operating-points-v2-ti-cpu.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/opp/operating-points-v2-ti-cpu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI CPU OPP (Operating Performance Points)
+
+description:
+ TI SoCs, like those in the AM335x, AM437x, AM57xx, AM62x, and DRA7xx
+ families, the CPU frequencies subset and the voltage value of each
+ OPP vary based on the silicon variant used. The data sheet sections
+ corresponding to "Operating Performance Points" describe the frequency
+ and voltage values based on device type and speed bin information
+ blown in corresponding eFuse bits as referred to by the Technical
+ Reference Manual.
+
+ This document extends the operating-points-v2 binding by providing
+ the hardware description for the scheme mentioned above.
+
+maintainers:
+ - Nishanth Menon <nm@ti.com>
+
+allOf:
+ - $ref: opp-v2-base.yaml#
+
+properties:
+ compatible:
+ const: operating-points-v2-ti-cpu
+
+ syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: |
+ points to syscon node representing the control module
+ register space of the SoC.
+
+ opp-shared: true
+
+patternProperties:
+ '^opp(-?[0-9]+)*$':
+ type: object
+ additionalProperties: false
+
+ properties:
+ clock-latency-ns: true
+ opp-hz: true
+ opp-microvolt: true
+ opp-supported-hw: true
+ opp-suspend: true
+ turbo-mode: true
+
+ required:
+ - opp-hz
+ - opp-supported-hw
+
+required:
+ - compatible
+ - syscon
+
+additionalProperties: false
+
+examples:
+ - |
+ opp-table {
+ compatible = "operating-points-v2-ti-cpu";
+ syscon = <&scm_conf>;
+
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <1100000 1078000 1122000>;
+ opp-supported-hw = <0x06 0x0020>;
+ opp-suspend;
+ };
+
+ opp-500000000 {
+ opp-hz = /bits/ 64 <500000000>;
+ opp-microvolt = <1100000 1078000 1122000>;
+ opp-supported-hw = <0x01 0xFFFF>;
+ };
+
+ opp-600000000 {
+ opp-hz = /bits/ 64 <600000000>;
+ opp-microvolt = <1100000 1078000 1122000>;
+ opp-supported-hw = <0x06 0x0040>;
+ };
+
+ opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <1325000 1298500 1351500>;
+ opp-supported-hw = <0x04 0x0200>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/opp/opp-v2-base.yaml b/Documentation/devicetree/bindings/opp/opp-v2-base.yaml
index 47e6f36b7637..e2f8f7af3cf4 100644
--- a/Documentation/devicetree/bindings/opp/opp-v2-base.yaml
+++ b/Documentation/devicetree/bindings/opp/opp-v2-base.yaml
@@ -56,7 +56,7 @@ patternProperties:
need to be configured and that is left for the implementation
specific binding.
minItems: 1
- maxItems: 16
+ maxItems: 32
items:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/opp/ti,omap-opp-supply.yaml b/Documentation/devicetree/bindings/opp/ti,omap-opp-supply.yaml
new file mode 100644
index 000000000000..693f22539606
--- /dev/null
+++ b/Documentation/devicetree/bindings/opp/ti,omap-opp-supply.yaml
@@ -0,0 +1,101 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/opp/ti,omap-opp-supply.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments OMAP compatible OPP supply
+
+description:
+ OMAP5, DRA7, and AM57 families of SoCs have Class 0 AVS eFuse
+ registers, which contain OPP-specific voltage information tailored
+ for the specific device. This binding provides the information
+ needed to describe such a hardware values and relate them to program
+ the primary regulator during an OPP transition.
+
+ Also, some supplies may have an associated vbb-supply, an Adaptive
+ Body Bias regulator, which must transition in a specific sequence
+ w.r.t the vdd-supply and clk when making an OPP transition. By
+ supplying two regulators to the device that will undergo OPP
+ transitions, we can use the multi-regulator support implemented by
+ the OPP core to describe both regulators the platform needs. The
+ OPP core binding Documentation/devicetree/bindings/opp/opp-v2.yaml
+ provides further information (refer to Example 4 Handling multiple
+ regulators).
+
+maintainers:
+ - Nishanth Menon <nm@ti.com>
+
+properties:
+ $nodename:
+ pattern: '^opp-supply(@[0-9a-f]+)?$'
+
+ compatible:
+ oneOf:
+ - description: Basic OPP supply controlling VDD and VBB
+ const: ti,omap-opp-supply
+ - description: OMAP5+ optimized voltages in efuse(Class 0) VDD along with
+ VBB.
+ const: ti,omap5-opp-supply
+ - description: OMAP5+ optimized voltages in efuse(class0) VDD but no VBB
+ const: ti,omap5-core-opp-supply
+
+ reg:
+ maxItems: 1
+
+ ti,absolute-max-voltage-uv:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Absolute maximum voltage for the OPP supply in micro-volts.
+ minimum: 750000
+ maximum: 1500000
+
+ ti,efuse-settings:
+ description: An array of u32 tuple items providing information about
+ optimized efuse configuration.
+ minItems: 1
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: Reference voltage in micro-volts (OPP Voltage)
+ minimum: 750000
+ maximum: 1500000
+ multipleOf: 10000
+ - description: efuse offset where the optimized voltage is located
+ multipleOf: 4
+ maximum: 256
+
+required:
+ - compatible
+ - ti,absolute-max-voltage-uv
+
+allOf:
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: ti,omap-opp-supply
+ then:
+ required:
+ - reg
+ - ti,efuse-settings
+
+additionalProperties: false
+
+examples:
+ - |
+ opp-supply {
+ compatible = "ti,omap-opp-supply";
+ ti,absolute-max-voltage-uv = <1375000>;
+ };
+ - |
+ opp-supply@4a003b20 {
+ compatible = "ti,omap5-opp-supply";
+ reg = <0x4a003b20 0x8>;
+ ti,efuse-settings =
+ /* uV offset */
+ <1060000 0x0>,
+ <1160000 0x4>,
+ <1210000 0x8>;
+ ti,absolute-max-voltage-uv = <1500000>;
+ };
diff --git a/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt b/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt
deleted file mode 100644
index b70d326117cd..000000000000
--- a/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-Texas Instruments OMAP compatible OPP supply description
-
-OMAP5, DRA7, and AM57 family of SoCs have Class0 AVS eFuse registers which
-contain data that can be used to adjust voltages programmed for some of their
-supplies for more efficient operation. This binding provides the information
-needed to read these values and use them to program the main regulator during
-an OPP transitions.
-
-Also, some supplies may have an associated vbb-supply which is an Adaptive Body
-Bias regulator which much be transitioned in a specific sequence with regards
-to the vdd-supply and clk when making an OPP transition. By supplying two
-regulators to the device that will undergo OPP transitions we can make use
-of the multi regulator binding that is part of the OPP core described here [1]
-to describe both regulators needed by the platform.
-
-[1] Documentation/devicetree/bindings/opp/opp-v2.yaml
-
-Required Properties for Device Node:
-- vdd-supply: phandle to regulator controlling VDD supply
-- vbb-supply: phandle to regulator controlling Body Bias supply
- (Usually Adaptive Body Bias regulator)
-
-Required Properties for opp-supply node:
-- compatible: Should be one of:
- "ti,omap-opp-supply" - basic OPP supply controlling VDD and VBB
- "ti,omap5-opp-supply" - OMAP5+ optimized voltages in efuse(class0)VDD
- along with VBB
- "ti,omap5-core-opp-supply" - OMAP5+ optimized voltages in efuse(class0) VDD
- but no VBB.
-- reg: Address and length of the efuse register set for the device (mandatory
- only for "ti,omap5-opp-supply")
-- ti,efuse-settings: An array of u32 tuple items providing information about
- optimized efuse configuration. Each item consists of the following:
- volt: voltage in uV - reference voltage (OPP voltage)
- efuse_offseet: efuse offset from reg where the optimized voltage is stored.
-- ti,absolute-max-voltage-uv: absolute maximum voltage for the OPP supply.
-
-Example:
-
-/* Device Node (CPU) */
-cpus {
- cpu0: cpu@0 {
- device_type = "cpu";
-
- ...
-
- vdd-supply = <&vcc>;
- vbb-supply = <&abb_mpu>;
- };
-};
-
-/* OMAP OPP Supply with Class0 registers */
-opp_supply_mpu: opp_supply@4a003b20 {
- compatible = "ti,omap5-opp-supply";
- reg = <0x4a003b20 0x8>;
- ti,efuse-settings = <
- /* uV offset */
- 1060000 0x0
- 1160000 0x4
- 1210000 0x8
- >;
- ti,absolute-max-voltage-uv = <1500000>;
-};
diff --git a/Documentation/devicetree/bindings/regulator/active-semi,act8846.yaml b/Documentation/devicetree/bindings/regulator/active-semi,act8846.yaml
index 3725348bb235..02f45b5834d0 100644
--- a/Documentation/devicetree/bindings/regulator/active-semi,act8846.yaml
+++ b/Documentation/devicetree/bindings/regulator/active-semi,act8846.yaml
@@ -28,75 +28,37 @@ properties:
the VSEL pin is assumed to be low.
type: boolean
- regulators:
- type: object
- additionalProperties: false
+ inl1-supply:
+ description: Handle to the INL1 input supply (REG5-7)
- properties:
- REG1:
- type: object
- $ref: /schemas/regulator/regulator.yaml#
- unevaluatedProperties: false
+ inl2-supply:
+ description: Handle to the INL2 input supply (REG8-9)
- properties:
- vp1-supply:
- description: Handle to the VP1 input supply
+ inl3-supply:
+ description: Handle to the INL3 input supply (REG10-12)
- REG2:
- type: object
- $ref: /schemas/regulator/regulator.yaml#
- unevaluatedProperties: false
+ vp1-supply:
+ description: Handle to the VP1 input supply (REG1)
- properties:
- vp2-supply:
- description: Handle to the VP2 input supply
+ vp2-supply:
+ description: Handle to the VP2 input supply (REG2)
- REG3:
- type: object
- $ref: /schemas/regulator/regulator.yaml#
- unevaluatedProperties: false
+ vp3-supply:
+ description: Handle to the VP3 input supply (REG3)
- properties:
- vp3-supply:
- description: Handle to the VP3 input supply
-
- REG4:
- type: object
- $ref: /schemas/regulator/regulator.yaml#
- unevaluatedProperties: false
+ vp4-supply:
+ description: Handle to the VP4 input supply (REG4)
- properties:
- vp4-supply:
- description: Handle to the VP4 input supply
+ regulators:
+ type: object
+ additionalProperties: false
patternProperties:
- "^REG[5-7]$":
+ "^REG([1-9]|1[0-2])$":
type: object
$ref: /schemas/regulator/regulator.yaml#
unevaluatedProperties: false
- properties:
- inl1-supply:
- description: Handle to the INL1 input supply
-
- "^REG[8-9]$":
- type: object
- $ref: /schemas/regulator/regulator.yaml#
- unevaluatedProperties: false
-
- properties:
- inl2-supply:
- description: Handle to the INL2 input supply
-
- "^REG1[0-2]$":
- type: object
- $ref: /schemas/regulator/regulator.yaml#
- unevaluatedProperties: false
-
- properties:
- inl3-supply:
- description: Handle to the INL3 input supply
-
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/regulator/adi,max77857.yaml b/Documentation/devicetree/bindings/regulator/adi,max77857.yaml
new file mode 100644
index 000000000000..d1fa74aca721
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/adi,max77857.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2022 Analog Devices Inc.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/adi,max77857.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices MAX77857 Buck-Boost Converter
+
+maintainers:
+ - Ibrahim Tilki <Ibrahim.Tilki@analog.com>
+ - Okan Sahin <Okan.Sahin@analog.com>
+
+description: Analog Devices MAX77857 Buck-Boost Converter
+
+properties:
+ compatible:
+ enum:
+ - adi,max77831
+ - adi,max77857
+ - adi,max77859
+ - adi,max77859a
+
+ reg:
+ description: I2C address of the device
+ items:
+ - enum: [0x66, 0x67, 0x6E, 0x6F]
+
+ interrupts:
+ maxItems: 1
+
+ adi,switch-frequency-hz:
+ description: Switching frequency of the Buck-Boost converter in Hz.
+ items:
+ - enum: [1200000, 1500000, 1800000, 2100000]
+
+ adi,rtop-ohms:
+ description: Top feedback resistor value in ohms for external feedback.
+ minimum: 150000
+ maximum: 330000
+
+ adi,rbot-ohms:
+ description: Bottom feedback resistor value in ohms for external feedback.
+
+dependencies:
+ adi,rtop-ohms: [ 'adi,rbot-ohms' ]
+ adi,rbot-ohms: [ 'adi,rtop-ohms' ]
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: regulator.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - adi,max77831
+
+ then:
+ properties:
+ adi,switch-frequency-hz:
+ items:
+ enum: [1200000, 1500000, 1800000]
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ regulator@66 {
+ reg = <0x66>;
+ compatible = "adi,max77857";
+ interrupt-parent = <&gpio>;
+ interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
+
+ adi,rtop-ohms = <312000>;
+ adi,rbot-ohms = <12000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/awinic,aw37503.yaml b/Documentation/devicetree/bindings/regulator/awinic,aw37503.yaml
new file mode 100644
index 000000000000..c92a881ed60e
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/awinic,aw37503.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/awinic,aw37503.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Awinic AW37503 Voltage Regulator
+
+maintainers:
+ - Alec Li <like@awinic.com>
+
+description:
+ The AW37503 are dual voltage regulator, designed to support positive/negative
+ supply for driving TFT-LCD panels. It support software-configurable output
+ switching and monitoring. The output voltages can be programmed via an I2C
+ compatible interface.
+
+properties:
+ compatible:
+ const: awinic,aw37503
+
+ reg:
+ maxItems: 1
+
+patternProperties:
+ "^out[pn]$":
+ type: object
+ $ref: regulator.yaml#
+ unevaluatedProperties: false
+ description:
+ Properties for single regulator.
+
+ properties:
+ enable-gpios:
+ maxItems: 1
+ description:
+ GPIO specifier to enable the GPIO control (on/off) for regulator.
+
+ required:
+ - regulator-name
+
+required:
+ - compatible
+ - reg
+ - outp
+ - outn
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ regulator@3e {
+ compatible = "awinic,aw37503";
+ reg = <0x3e>;
+
+ outp {
+ regulator-name = "outp";
+ regulator-boot-on;
+ regulator-always-on;
+ enable-gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+ };
+
+ outn {
+ regulator-name = "outn";
+ regulator-boot-on;
+ regulator-always-on;
+ enable-gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+ };
+ };
+ };
+...
+
diff --git a/Documentation/devicetree/bindings/regulator/dlg,da9121.yaml b/Documentation/devicetree/bindings/regulator/dlg,da9121.yaml
index dc626517c2ad..13b3f75f8e5e 100644
--- a/Documentation/devicetree/bindings/regulator/dlg,da9121.yaml
+++ b/Documentation/devicetree/bindings/regulator/dlg,da9121.yaml
@@ -95,11 +95,6 @@ properties:
Properties for a single BUCK regulator
properties:
- regulator-name:
- pattern: "^BUCK([1-2])$"
- description: |
- BUCK2 present in DA9122, DA9220, DA9131, DA9132 only
-
regulator-initial-mode:
enum: [ 0, 1, 2, 3 ]
description: Defined in include/dt-bindings/regulator/dlg,da9121-regulator.h
@@ -122,6 +117,23 @@ required:
- reg
- regulators
+allOf:
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ enum:
+ - dlg,da9122
+ - dlg,da9131
+ - dlg,da9132
+ - dlg,da9220
+ then:
+ properties:
+ regulators:
+ properties:
+ buck2: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/regulator/dlg,slg51000.yaml b/Documentation/devicetree/bindings/regulator/dlg,slg51000.yaml
new file mode 100644
index 000000000000..bad140418e49
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/dlg,slg51000.yaml
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/dlg,slg51000.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Dialog Semiconductor SLG51000 Voltage Regulator
+
+maintainers:
+ - Eric Jeong <eric.jeong.opensource@diasemi.com>
+ - Support Opensource <support.opensource@diasemi.com>
+
+properties:
+ compatible:
+ const: dlg,slg51000
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ dlg,cs-gpios:
+ maxItems: 1
+ description:
+ GPIO for chip select
+
+ vin3-supply:
+ description:
+ Input supply for ldo3, required if regulator is enabled
+
+ vin4-supply:
+ description:
+ Input supply for ldo4, required if regulator is enabled
+
+ vin5-supply:
+ description:
+ Input supply for ldo5, required if regulator is enabled
+
+ vin6-supply:
+ description:
+ Input supply for ldo6, required if regulator is enabled
+
+ vin7-supply:
+ description:
+ Input supply for ldo7, required if regulator is enabled
+
+ regulators:
+ type: object
+ additionalProperties: false
+
+ patternProperties:
+ "^ldo[1-7]$":
+ type: object
+ $ref: /schemas/regulator/regulator.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ enable-gpios:
+ maxItems: 1
+
+ required:
+ - regulator-name
+
+required:
+ - compatible
+ - reg
+ - regulators
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/regulator/dlg,da9121-regulator.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@75 {
+ compatible = "dlg,slg51000";
+ reg = <0x75>;
+ dlg,cs-gpios = <&tlmm 69 GPIO_ACTIVE_HIGH>;
+ vin5-supply = <&vreg_s1f_1p2>;
+ vin6-supply = <&vreg_s1f_1p2>;
+
+ regulators {
+ ldo1 {
+ regulator-name = "slg51000_b_ldo1";
+ regulator-min-microvolt = <2400000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo2 {
+ regulator-name = "slg51000_b_ldo2";
+ regulator-min-microvolt = <2400000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo3 {
+ regulator-name = "slg51000_b_ldo3";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3750000>;
+ };
+
+ ldo4 {
+ regulator-name = "slg51000_b_ldo4";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3750000>;
+ };
+
+ ldo5 {
+ regulator-name = "slg51000_b_ldo5";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ ldo6 {
+ regulator-name = "slg51000_b_ldo6";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ ldo7 {
+ regulator-name = "slg51000_b_ldo7";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3750000>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/mps,mp5416.yaml b/Documentation/devicetree/bindings/regulator/mps,mp5416.yaml
index 2e720d152890..0221397eb51e 100644
--- a/Documentation/devicetree/bindings/regulator/mps,mp5416.yaml
+++ b/Documentation/devicetree/bindings/regulator/mps,mp5416.yaml
@@ -29,10 +29,12 @@ properties:
patternProperties:
"^buck[1-4]$":
$ref: regulator.yaml#
+ unevaluatedProperties: false
type: object
"^ldo[1-4]$":
$ref: regulator.yaml#
+ unevaluatedProperties: false
type: object
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
index f3fcfc8be72f..6de5b027f990 100644
--- a/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
+++ b/Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
@@ -21,7 +21,6 @@ properties:
regulators:
type: object
- $ref: regulator.yaml#
description: |
list of regulators provided by this controller, must be named
@@ -39,11 +38,13 @@ properties:
ldortc:
type: object
$ref: regulator.yaml#
+ unevaluatedProperties: false
patternProperties:
"^ldo[1-4]$":
type: object
$ref: regulator.yaml#
+ unevaluatedProperties: false
"^buck[1-4]$":
type: object
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.yaml b/Documentation/devicetree/bindings/regulator/pfuze100.yaml
index e384e4953f0a..0eda44752cdd 100644
--- a/Documentation/devicetree/bindings/regulator/pfuze100.yaml
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.yaml
@@ -68,18 +68,22 @@ properties:
"^sw([1-4]|[1-4][a-c]|[1-4][a-c][a-c])$":
$ref: regulator.yaml#
type: object
+ unevaluatedProperties: false
"^vgen[1-6]$":
$ref: regulator.yaml#
type: object
+ unevaluatedProperties: false
"^vldo[1-4]$":
$ref: regulator.yaml#
type: object
+ unevaluatedProperties: false
"^(vsnvs|vref|vrefddr|swbst|coin|v33|vccsd)$":
$ref: regulator.yaml#
type: object
+ unevaluatedProperties: false
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/qcom,rpm-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,rpm-regulator.yaml
index 8a08698e3484..b4eb4001eb3d 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,rpm-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,rpm-regulator.yaml
@@ -49,7 +49,7 @@ patternProperties:
".*-supply$":
description: Input supply phandle(s) for this node
- "^((s|l|lvs)[0-9]*)|(s[1-2][a-b])|(ncp)|(mvs)|(usb-switch)|(hdmi-switch)$":
+ "^((s|l|lvs)[0-9]*|s[1-2][a-b]|ncp|mvs|usb-switch|hdmi-switch)$":
description: List of regulators and its properties
$ref: regulator.yaml#
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml
index b9498504ad79..127a6f39b7f0 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.yaml
@@ -53,6 +53,7 @@ description: |
For PMR735A, smps1 - smps3, ldo1 - ldo7
For PMX55, smps1 - smps7, ldo1 - ldo16
For PMX65, smps1 - smps8, ldo1 - ldo21
+ For PMX75, smps1 - smps10, ldo1 - ldo21
properties:
compatible:
@@ -84,13 +85,14 @@ properties:
- qcom,pmr735a-rpmh-regulators
- qcom,pmx55-rpmh-regulators
- qcom,pmx65-rpmh-regulators
+ - qcom,pmx75-rpmh-regulators
qcom,pmic-id:
description: |
RPMh resource name suffix used for the regulators found
on this PMIC.
$ref: /schemas/types.yaml#/definitions/string
- enum: [a, b, c, d, e, f, g, h, k]
+ enum: [a, b, c, d, e, f, g, h, i, j, k, l, m, n]
qcom,always-wait-for-ack:
description: |
@@ -109,6 +111,7 @@ properties:
bob:
type: object
$ref: regulator.yaml#
+ unevaluatedProperties: false
description: BOB regulator node.
dependencies:
regulator-allow-set-load: [ regulator-allowed-modes ]
@@ -117,6 +120,7 @@ patternProperties:
"^(smps|ldo|lvs|bob)[0-9]+$":
type: object
$ref: regulator.yaml#
+ unevaluatedProperties: false
description: smps/ldo regulator nodes(s).
dependencies:
regulator-allow-set-load: [ regulator-allowed-modes ]
@@ -424,10 +428,28 @@ allOf:
vdd-l11-l13-supply: true
patternProperties:
"^vdd-l[1347]-supply$": true
- "^vdd-l1[0245789]-supply$": true
+ "^vdd-l1[024579]-supply$": true
"^vdd-l2[01]-supply$": true
"^vdd-s[1-8]-supply$": true
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,pmx75-rpmh-regulators
+ then:
+ properties:
+ vdd-l2-l18-supply: true
+ vdd-l4-l16-supply: true
+ vdd-l5-l6-supply: true
+ vdd-l8-l9-supply: true
+ vdd-l11-l13-supply: true
+ vdd-l20-l21-supply: true
+ patternProperties:
+ "^vdd-l[137]-supply$": true
+ "^vdd-l1[024579]-supply$": true
+ "^vdd-s([1-9]|10)-supply$": true
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/regulator/qcom,sdm845-refgen-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,sdm845-refgen-regulator.yaml
new file mode 100644
index 000000000000..f02f97d4fdd2
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/qcom,sdm845-refgen-regulator.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/qcom,sdm845-refgen-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. REFGEN Regulator
+
+maintainers:
+ - Konrad Dybcio <konradybcio@kernel.org>
+
+description:
+ The REFGEN (reference voltage generator) regulator provides reference
+ voltage for on-chip IPs (like PHYs) on some Qualcomm SoCs.
+
+allOf:
+ - $ref: regulator.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - qcom,sc7180-refgen-regulator
+ - qcom,sc8180x-refgen-regulator
+ - qcom,sm8150-refgen-regulator
+ - const: qcom,sdm845-refgen-regulator
+
+ - items:
+ - enum:
+ - qcom,sc7280-refgen-regulator
+ - qcom,sc8280xp-refgen-regulator
+ - qcom,sm6350-refgen-regulator
+ - qcom,sm6375-refgen-regulator
+ - qcom,sm8350-refgen-regulator
+ - const: qcom,sm8250-refgen-regulator
+
+ - enum:
+ - qcom,sdm845-refgen-regulator
+ - qcom,sm8250-refgen-regulator
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ regulator@162f000 {
+ compatible = "qcom,sm8250-refgen-regulator";
+ reg = <0x0162f000 0x84>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
index a8ca8e0b27f8..9ea8ac0786ac 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,smd-rpm-regulator.yaml
@@ -110,6 +110,7 @@ patternProperties:
"^((s|l|lvs|5vs)[0-9]*)|(boost-bypass)|(bob)$":
description: List of regulators and its properties
$ref: regulator.yaml#
+ unevaluatedProperties: false
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rt4831-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rt4831-regulator.yaml
index d9c23333e157..cd06e957b9db 100644
--- a/Documentation/devicetree/bindings/regulator/richtek,rt4831-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/richtek,rt4831-regulator.yaml
@@ -29,6 +29,7 @@ patternProperties:
"^DSV(LCM|P|N)$":
type: object
$ref: regulator.yaml#
+ unevaluatedProperties: false
description:
Properties for single Display Bias Voltage regulator.
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rt5739.yaml b/Documentation/devicetree/bindings/regulator/richtek,rt5739.yaml
index 358297dd3fb7..e95e046e9ed6 100644
--- a/Documentation/devicetree/bindings/regulator/richtek,rt5739.yaml
+++ b/Documentation/devicetree/bindings/regulator/richtek,rt5739.yaml
@@ -21,6 +21,7 @@ allOf:
properties:
compatible:
enum:
+ - richtek,rt5733
- richtek,rt5739
reg:
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rtmv20-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rtmv20-regulator.yaml
index 446ec5127d1f..fec3d396ca50 100644
--- a/Documentation/devicetree/bindings/regulator/richtek,rtmv20-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/richtek,rtmv20-regulator.yaml
@@ -121,6 +121,7 @@ properties:
description: load switch current regulator description.
type: object
$ref: regulator.yaml#
+ unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml b/Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml
new file mode 100644
index 000000000000..609c06615bdc
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/richtek,rtq2208.yaml
@@ -0,0 +1,197 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/regulator/richtek,rtq2208.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RTQ2208 SubPMIC Regulator
+
+maintainers:
+ - Alina Yu <alina_yu@richtek.com>
+
+description: |
+ RTQ2208 is a highly integrated power converter that offers functional safety dual
+ multi-configurable synchronous buck converters and two LDOs.
+
+ Bucks support "regulator-allowed-modes" and "regulator-mode". The former defines the permitted
+ switching operation in normal mode; the latter defines the operation in suspend to RAM mode.
+
+ No matter the RTQ2208 is configured to normal or suspend to RAM mode, there are two switching
+ operation modes for all buck rails, automatic power saving mode (Auto mode) and forced continuous
+ conduction mode (FCCM).
+
+ The definition of modes is in the datasheet which is available in below link
+ and their meaning is::
+ 0 - Auto mode for power saving, which reducing the switching frequency at light load condition
+ to maintain high frequency.
+ 1 - FCCM to meet the strict voltage regulation accuracy, which keeping constant switching frequency.
+
+ Datasheet will be available soon at
+ https://www.richtek.com/assets/Products
+
+properties:
+ compatible:
+ enum:
+ - richtek,rtq2208
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ richtek,mtp-sel-high:
+ type: boolean
+ description:
+ vout register selection based on this boolean value.
+ false - Using DVS0 register setting to adjust vout
+ true - Using DVS1 register setting to adjust vout
+
+ regulators:
+ type: object
+ additionalProperties: false
+
+ patternProperties:
+ "^buck-[a-h]$":
+ type: object
+ $ref: regulator.yaml#
+ unevaluatedProperties: false
+ description:
+ description for buck-[a-h] regulator.
+
+ properties:
+ regulator-allowed-modes:
+ description:
+ two buck modes in different switching accuracy.
+ 0 - Auto mode
+ 1 - FCCM
+ items:
+ enum: [0, 1]
+
+ "^ldo[1-2]$":
+ type: object
+ $ref: regulator.yaml#
+ unevaluatedProperties: false
+ description:
+ regulator description for ldo[1-2].
+
+required:
+ - compatible
+ - reg
+ - regulators
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmic@10 {
+ compatible = "richtek,rtq2208";
+ reg = <0x10>;
+ interrupts-extended = <&gpio26 0 IRQ_TYPE_LEVEL_LOW>;
+ richtek,mtp-sel-high;
+
+ regulators {
+ buck-a {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <2050000>;
+ regulator-allowed-modes = <0 1>;
+ regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-mode = <1>;
+ };
+ };
+ buck-b {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <2050000>;
+ regulator-allowed-modes = <0 1>;
+ regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-mode = <1>;
+ };
+ };
+ buck-c {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <2050000>;
+ regulator-allowed-modes = <0 1>;
+ regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-mode = <1>;
+ };
+ };
+ buck-d {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <2050000>;
+ regulator-allowed-modes = <0 1>;
+ regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-mode = <1>;
+ };
+ };
+ buck-e {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <2050000>;
+ regulator-allowed-modes = <0 1>;
+ regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-mode = <1>;
+ };
+ };
+ buck-f {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <2050000>;
+ regulator-allowed-modes = <0 1>;
+ regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-mode = <1>;
+ };
+ };
+ buck-g {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <2050000>;
+ regulator-allowed-modes = <0 1>;
+ regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-mode = <1>;
+ };
+ };
+ buck-h {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <2050000>;
+ regulator-allowed-modes = <0 1>;
+ regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-mode = <1>;
+ };
+ };
+ ldo1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+ ldo2 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml
index e6e5a9a7d940..ef62c618de67 100644
--- a/Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/richtek,rtq6752-regulator.yaml
@@ -35,6 +35,7 @@ properties:
"^(p|n)avdd$":
type: object
$ref: regulator.yaml#
+ unevaluatedProperties: false
description: |
regulator description for pavdd and navdd.
diff --git a/Documentation/devicetree/bindings/regulator/slg51000.txt b/Documentation/devicetree/bindings/regulator/slg51000.txt
deleted file mode 100644
index aa0733e49b90..000000000000
--- a/Documentation/devicetree/bindings/regulator/slg51000.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-* Dialog Semiconductor SLG51000 Voltage Regulator
-
-Required properties:
-- compatible : Should be "dlg,slg51000" for SLG51000
-- reg : Specifies the I2C slave address.
-- xxx-supply: Input voltage supply regulator for ldo3 to ldo7.
- These entries are required if regulators are enabled for a device.
- An absence of these properties can cause the regulator registration to fail.
- If some of input supply is powered through battery or always-on supply then
- also it is required to have these parameters with proper node handle of always
- on power supply.
- vin3-supply: Input supply for ldo3
- vin4-supply: Input supply for ldo4
- vin5-supply: Input supply for ldo5
- vin6-supply: Input supply for ldo6
- vin7-supply: Input supply for ldo7
-
-Optional properties:
-- interrupt-parent : Specifies the reference to the interrupt controller.
-- interrupts : IRQ line information.
-- dlg,cs-gpios : Specify a valid GPIO for chip select
-
-Sub-nodes:
-- regulators : This node defines the settings for the regulators.
- The content of the sub-node is defined by the standard binding
- for regulators; see regulator.txt.
-
- The SLG51000 regulators are bound using their names listed below:
- ldo1
- ldo2
- ldo3
- ldo4
- ldo5
- ldo6
- ldo7
-
-Optional properties for regulators:
-- enable-gpios : Specify a valid GPIO for platform control of the regulator.
-
-Example:
- pmic: slg51000@75 {
- compatible = "dlg,slg51000";
- reg = <0x75>;
-
- regulators {
- ldo1 {
- regulator-name = "ldo1";
- regulator-min-microvolt = <2400000>;
- regulator-max-microvolt = <3300000>;
- };
-
- ldo2 {
- regulator-name = "ldo2";
- regulator-min-microvolt = <2400000>;
- regulator-max-microvolt = <3300000>;
- };
-
- ldo3 {
- regulator-name = "ldo3";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3750000>;
- };
-
- ldo4 {
- regulator-name = "ldo4";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3750000>;
- };
-
- ldo5 {
- regulator-name = "ldo5";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1200000>;
- };
-
- ldo6 {
- regulator-name = "ldo6";
- regulator-min-microvolt = <500000>;
- regulator-max-microvolt = <1200000>;
- };
-
- ldo7 {
- regulator-name = "ldo7";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3750000>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml b/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml
index 7d53cfa2c288..c9586d277f41 100644
--- a/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml
+++ b/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.yaml
@@ -25,8 +25,8 @@ properties:
patternProperties:
"^(reg11|reg18|usb33)$":
type: object
-
$ref: regulator.yaml#
+ unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/regulator/wlf,arizona.yaml b/Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
index 011819c10988..11e378648b3f 100644
--- a/Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
+++ b/Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
@@ -29,11 +29,13 @@ properties:
Initial data for the LDO1 regulator.
$ref: regulator.yaml#
type: object
+ unevaluatedProperties: false
micvdd:
description:
Initial data for the MICVDD regulator.
$ref: regulator.yaml#
type: object
+ unevaluatedProperties: false
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml b/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
new file mode 100644
index 000000000000..7a6de938b11d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
@@ -0,0 +1,313 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/cirrus,cs42l43.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic CS42L43 Audio CODEC
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description: |
+ The CS42L43 is an audio CODEC with integrated MIPI SoundWire interface
+ (Version 1.2.1 compliant), I2C, SPI, and I2S/TDM interfaces designed
+ for portable applications. It provides a high dynamic range, stereo
+ DAC for headphone output, two integrated Class D amplifiers for
+ loudspeakers, and two ADCs for wired headset microphone input or
+ stereo line input. PDM inputs are provided for digital microphones.
+
+allOf:
+ - $ref: dai-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ - cirrus,cs42l43
+
+ reg:
+ maxItems: 1
+
+ vdd-p-supply:
+ description:
+ Power supply for the high voltage interface.
+
+ vdd-a-supply:
+ description:
+ Power supply for internal analog circuits.
+
+ vdd-d-supply:
+ description:
+ Power supply for internal digital circuits. Can be internally supplied.
+
+ vdd-io-supply:
+ description:
+ Power supply for external interface and internal digital logic.
+
+ vdd-cp-supply:
+ description:
+ Power supply for the amplifier 3 and 4 charge pump.
+
+ vdd-amp-supply:
+ description:
+ Power supply for amplifier 1 and 2.
+
+ reset-gpios:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 2
+
+ interrupts:
+ maxItems: 1
+
+ "#sound-dai-cells":
+ const: 1
+
+ clocks:
+ items:
+ - description: Synchronous audio clock provided on mclk_in.
+
+ clock-names:
+ const: mclk
+
+ cirrus,bias-low:
+ type: boolean
+ description:
+ Select a 1.8V headset micbias rather than 2.8V.
+
+ cirrus,bias-sense-microamp:
+ description:
+ Current at which the headset micbias sense clamp will engage, 0 to
+ disable.
+ enum: [ 0, 14, 23, 41, 50, 60, 68, 86, 95 ]
+ default: 0
+
+ cirrus,bias-ramp-ms:
+ description:
+ Time in milliseconds the hardware allows for the headset micbias to
+ ramp up.
+ enum: [ 10, 40, 90, 170 ]
+ default: 170
+
+ cirrus,detect-us:
+ description:
+ Time in microseconds the type detection will run for. Long values will
+ cause more audible effects, but give more accurate detection.
+ enum: [ 20, 100, 1000, 10000, 50000, 75000, 100000, 200000 ]
+ default: 10000
+
+ cirrus,button-automute:
+ type: boolean
+ description:
+ Enable the hardware automuting of decimator 1 when a headset button is
+ pressed.
+
+ cirrus,buttons-ohms:
+ description:
+ Impedance in Ohms for each headset button, these should be listed in
+ ascending order.
+ minItems: 1
+ maxItems: 6
+
+ cirrus,tip-debounce-ms:
+ description:
+ Software debounce on tip sense triggering in milliseconds.
+ default: 0
+
+ cirrus,tip-invert:
+ type: boolean
+ description:
+ Indicates tip detect polarity, inverted implies open-circuit whilst the
+ jack is inserted.
+
+ cirrus,tip-disable-pullup:
+ type: boolean
+ description:
+ Indicates if the internal pullup on the tip detect should be disabled.
+
+ cirrus,tip-fall-db-ms:
+ description:
+ Time in milliseconds a falling edge on the tip detect should be hardware
+ debounced for. Note the falling edge is considered after the invert.
+ enum: [ 0, 125, 250, 500, 750, 1000, 1250, 1500 ]
+ default: 500
+
+ cirrus,tip-rise-db-ms:
+ description:
+ Time in milliseconds a rising edge on the tip detect should be hardware
+ debounced for. Note the rising edge is considered after the invert.
+ enum: [ 0, 125, 250, 500, 750, 1000, 1250, 1500 ]
+ default: 500
+
+ cirrus,use-ring-sense:
+ type: boolean
+ description:
+ Indicates if the ring sense should be used.
+
+ cirrus,ring-invert:
+ type: boolean
+ description:
+ Indicates ring detect polarity, inverted implies open-circuit whilst the
+ jack is inserted.
+
+ cirrus,ring-disable-pullup:
+ type: boolean
+ description:
+ Indicates if the internal pullup on the ring detect should be disabled.
+
+ cirrus,ring-fall-db-ms:
+ description:
+ Time in milliseconds a falling edge on the ring detect should be hardware
+ debounced for. Note the falling edge is considered after the invert.
+ enum: [ 0, 125, 250, 500, 750, 1000, 1250, 1500 ]
+ default: 500
+
+ cirrus,ring-rise-db-ms:
+ description:
+ Time in milliseconds a rising edge on the ring detect should be hardware
+ debounced for. Note the rising edge is considered after the invert.
+ enum: [ 0, 125, 250, 500, 750, 1000, 1250, 1500 ]
+ default: 500
+
+ pinctrl:
+ type: object
+ $ref: /schemas/pinctrl/pinctrl.yaml#
+ additionalProperties: false
+
+ properties:
+ gpio-controller: true
+
+ "#gpio-cells":
+ const: 2
+
+ gpio-ranges:
+ items:
+ - description: A phandle to the CODEC pinctrl node
+ minimum: 0
+ - const: 0
+ - const: 0
+ - const: 3
+
+ patternProperties:
+ "-state$":
+ oneOf:
+ - $ref: "#/$defs/cirrus-cs42l43-state"
+ - patternProperties:
+ "-pins$":
+ $ref: "#/$defs/cirrus-cs42l43-state"
+ additionalProperties: false
+
+ spi:
+ type: object
+ $ref: /schemas/spi/spi-controller.yaml#
+ unevaluatedProperties: false
+
+$defs:
+ cirrus-cs42l43-state:
+ type: object
+
+ allOf:
+ - $ref: /schemas/pinctrl/pincfg-node.yaml#
+ - $ref: /schemas/pinctrl/pinmux-node.yaml#
+
+ oneOf:
+ - required: [ groups ]
+ - required: [ pins ]
+
+ additionalProperties: false
+
+ properties:
+ groups:
+ enum: [ gpio1, gpio2, gpio3, asp, pdmout2, pdmout1, i2c, spi ]
+
+ pins:
+ enum: [ gpio1, gpio2, gpio3,
+ asp_dout, asp_fsync, asp_bclk,
+ pdmout2_clk, pdmout2_data, pdmout1_clk, pdmout1_data,
+ i2c_sda, i2c_scl,
+ spi_miso, spi_sck, spi_ssb ]
+
+ function:
+ enum: [ gpio, spdif, irq, mic-shutter, spk-shutter ]
+
+ drive-strength:
+ description: Set drive strength in mA
+ enum: [ 1, 2, 4, 8, 9, 10, 12, 16 ]
+
+ input-debounce:
+ description: Set input debounce in uS
+ enum: [ 0, 85 ]
+
+required:
+ - compatible
+ - reg
+ - vdd-p-supply
+ - vdd-a-supply
+ - vdd-io-supply
+ - vdd-cp-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cs42l43: codec@1a {
+ compatible = "cirrus,cs42l43";
+ reg = <0x1a>;
+
+ vdd-p-supply = <&vdd5v0>;
+ vdd-a-supply = <&vdd1v8>;
+ vdd-io-supply = <&vdd1v8>;
+ vdd-cp-supply = <&vdd1v8>;
+ vdd-amp-supply = <&vdd5v0>;
+
+ reset-gpios = <&gpio 0>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&gpio>;
+ interrupts = <56 IRQ_TYPE_LEVEL_LOW>;
+
+ #sound-dai-cells = <1>;
+
+ clocks = <&clks 0>;
+ clock-names = "mclk";
+
+ cs42l43_pins: pinctrl {
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&cs42l43_pins 0 0 3>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinsettings>;
+
+ pinsettings: default-state {
+ shutter-pins {
+ groups = "gpio3";
+ function = "mic-shutter";
+ };
+ };
+ };
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cs-gpios = <&cs42l43_pins 1 0>;
+
+ sensor@0 {
+ compatible = "bosch,bme680";
+ reg = <0>;
+ spi-max-frequency = <1400000>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/brcm,bcm63xx-spi.yaml b/Documentation/devicetree/bindings/spi/brcm,bcm63xx-spi.yaml
new file mode 100644
index 000000000000..fa03cdd68e70
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/brcm,bcm63xx-spi.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/brcm,bcm63xx-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM6348/BCM6358 SPI controller
+
+maintainers:
+ - Jonas Gorski <jonas.gorski@gmail.com>
+
+description: |
+ Broadcom "Low Speed" SPI controller found in many older MIPS based Broadband
+ SoCs.
+
+ This controller has a limitation that can not keep the chip select line active
+ between the SPI transfers within the same SPI message. This can terminate the
+ transaction to some SPI devices prematurely. The issue can be worked around by
+ the controller's prepend mode.
+
+allOf:
+ - $ref: spi-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - brcm,bcm6368-spi
+ - brcm,bcm6362-spi
+ - brcm,bcm63268-spi
+ - const: brcm,bcm6358-spi
+ - enum:
+ - brcm,bcm6348-spi
+ - brcm,bcm6358-spi
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: SPI master reference clock
+
+ clock-names:
+ items:
+ - const: spi
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi@10000800 {
+ compatible = "brcm,bcm6368-spi", "brcm,bcm6358-spi";
+ reg = <0x10000800 0x70c>;
+ interrupts = <1>;
+ clocks = <&clkctl 9>;
+ clock-names = "spi";
+ num-cs = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml b/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
index 4f15f9a0cc34..cca81f89e252 100644
--- a/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
+++ b/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml
@@ -86,7 +86,17 @@ properties:
maxItems: 1
clocks:
- maxItems: 1
+ minItems: 1
+ maxItems: 3
+
+ clock-names:
+ oneOf:
+ - items:
+ - const: ref
+ - items:
+ - const: ref
+ - const: ahb
+ - const: apb
cdns,fifo-depth:
description:
diff --git a/Documentation/devicetree/bindings/spi/loongson,ls2k-spi.yaml b/Documentation/devicetree/bindings/spi/loongson,ls2k-spi.yaml
new file mode 100644
index 000000000000..de9d32feadf5
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/loongson,ls2k-spi.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/loongson,ls2k-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Loongson SPI controller
+
+maintainers:
+ - Yinbo Zhu <zhuyinbo@loongson.cn>
+
+allOf:
+ - $ref: /schemas/spi/spi-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - loongson,ls2k1000-spi
+ - items:
+ - enum:
+ - loongson,ls2k0500-spi
+ - const: loongson,ls2k1000-spi
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ spi0: spi@1fff0220{
+ compatible = "loongson,ls2k1000-spi";
+ reg = <0x1fff0220 0x10>;
+ clocks = <&clk 17>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt
deleted file mode 100644
index db8e0d71c5bc..000000000000
--- a/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-NVIDIA Tegra114 SPI controller.
-
-Required properties:
-- compatible : For Tegra114, must contain "nvidia,tegra114-spi".
- Otherwise, must contain '"nvidia,<chip>-spi", "nvidia,tegra114-spi"' where
- <chip> is tegra124, tegra132, or tegra210.
-- reg: Should contain SPI registers location and length.
-- interrupts: Should contain SPI interrupts.
-- clock-names : Must include the following entries:
- - spi
-- resets : Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
-- reset-names : Must include the following entries:
- - spi
-- dmas : Must contain an entry for each entry in clock-names.
- See ../dma/dma.txt for details.
-- dma-names : Must include the following entries:
- - rx
- - tx
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-
-Recommended properties:
-- spi-max-frequency: Definition as per
- Documentation/devicetree/bindings/spi/spi-bus.txt
-Optional properties:
-- nvidia,tx-clk-tap-delay: Delays the clock going out to the external device
- with this tap value. This property is used to tune the outgoing data from
- Tegra SPI master with respect to outgoing Tegra SPI master clock.
- Tap values vary based on the platform design trace lengths from Tegra SPI
- to corresponding slave devices. Valid tap values are from 0 thru 63.
-- nvidia,rx-clk-tap-delay: Delays the clock coming in from the external device
- with this tap value. This property is used to adjust the Tegra SPI master
- clock with respect to the data from the SPI slave device.
- Tap values vary based on the platform design trace lengths from Tegra SPI
- to corresponding slave devices. Valid tap values are from 0 thru 63.
-
-Example:
-
-spi@7000d600 {
- compatible = "nvidia,tegra114-spi";
- reg = <0x7000d600 0x200>;
- interrupts = <0 82 0x04>;
- spi-max-frequency = <25000000>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&tegra_car 44>;
- clock-names = "spi";
- resets = <&tegra_car 44>;
- reset-names = "spi";
- dmas = <&apbdma 16>, <&apbdma 16>;
- dma-names = "rx", "tx";
- <spi-client>@<bus_num> {
- ...
- ...
- nvidia,rx-clk-tap-delay = <0>;
- nvidia,tx-clk-tap-delay = <16>;
- ...
- };
-
-};
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.yaml b/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.yaml
new file mode 100644
index 000000000000..58222ffa53d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra114-spi.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/nvidia,tegra114-spi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra114 SPI controller
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+
+properties:
+ compatible:
+ oneOf:
+ - const: nvidia,tegra114-spi
+ - items:
+ - enum:
+ - nvidia,tegra210-spi
+ - nvidia,tegra124-spi
+ - const: nvidia,tegra114-spi
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: SPI module clock
+
+ clock-names:
+ items:
+ - const: spi
+
+ resets:
+ items:
+ - description: SPI module reset
+
+ reset-names:
+ items:
+ - const: spi
+
+ dmas:
+ items:
+ - description: DMA channel for the reception FIFO
+ - description: DMA channel for the transmission FIFO
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+ spi-max-frequency:
+ description: Maximum SPI clocking speed of the controller in Hz.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+allOf:
+ - $ref: spi-controller.yaml
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - dmas
+ - dma-names
+
+examples:
+ - |
+ spi@7000d600 {
+ compatible = "nvidia,tegra114-spi";
+ reg = <0x7000d600 0x200>;
+ interrupts = <0 82 0x04>;
+ clocks = <&tegra_car 44>;
+ clock-names = "spi";
+ resets = <&tegra_car 44>;
+ reset-names = "spi";
+ dmas = <&apbdma 16>, <&apbdma 16>;
+ dma-names = "rx", "tx";
+
+ spi-max-frequency = <25000000>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ nvidia,rx-clk-tap-delay = <0>;
+ nvidia,tx-clk-tap-delay = <16>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt
deleted file mode 100644
index c212491929b5..000000000000
--- a/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-NVIDIA Tegra20 SFLASH controller.
-
-Required properties:
-- compatible : should be "nvidia,tegra20-sflash".
-- reg: Should contain SFLASH registers location and length.
-- interrupts: Should contain SFLASH interrupts.
-- clocks : Must contain one entry, for the module clock.
- See ../clocks/clock-bindings.txt for details.
-- resets : Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
-- reset-names : Must include the following entries:
- - spi
-- dmas : Must contain an entry for each entry in clock-names.
- See ../dma/dma.txt for details.
-- dma-names : Must include the following entries:
- - rx
- - tx
-
-Recommended properties:
-- spi-max-frequency: Definition as per
- Documentation/devicetree/bindings/spi/spi-bus.txt
-
-Example:
-
-spi@7000c380 {
- compatible = "nvidia,tegra20-sflash";
- reg = <0x7000c380 0x80>;
- interrupts = <0 39 0x04>;
- spi-max-frequency = <25000000>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&tegra_car 43>;
- resets = <&tegra_car 43>;
- reset-names = "spi";
- dmas = <&apbdma 11>, <&apbdma 11>;
- dma-names = "rx", "tx";
-};
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.yaml b/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.yaml
new file mode 100644
index 000000000000..e245bad85a25
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra20-sflash.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/nvidia,tegra20-sflash.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra20 SFLASH controller
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+
+properties:
+ compatible:
+ const: nvidia,tegra20-sflash
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: module clock
+
+ resets:
+ items:
+ - description: module reset
+
+ reset-names:
+ items:
+ - const: spi
+
+ dmas:
+ items:
+ - description: DMA channel used for reception
+ - description: DMA channel used for transmission
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+ spi-max-frequency:
+ description: Maximum SPI clocking speed of the controller in Hz.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+allOf:
+ - $ref: spi-controller.yaml
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - resets
+ - reset-names
+ - dmas
+ - dma-names
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra20-car.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ spi@7000c380 {
+ compatible = "nvidia,tegra20-sflash";
+ reg = <0x7000c380 0x80>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ spi-max-frequency = <25000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&tegra_car TEGRA20_CLK_SPI>;
+ resets = <&tegra_car 43>;
+ reset-names = "spi";
+ dmas = <&apbdma 11>, <&apbdma 11>;
+ dma-names = "rx", "tx";
+ };
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt
deleted file mode 100644
index 40d80b93e327..000000000000
--- a/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-NVIDIA Tegra20/Tegra30 SLINK controller.
-
-Required properties:
-- compatible : should be "nvidia,tegra20-slink", "nvidia,tegra30-slink".
-- reg: Should contain SLINK registers location and length.
-- interrupts: Should contain SLINK interrupts.
-- clocks : Must contain one entry, for the module clock.
- See ../clocks/clock-bindings.txt for details.
-- resets : Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
-- reset-names : Must include the following entries:
- - spi
-- dmas : Must contain an entry for each entry in clock-names.
- See ../dma/dma.txt for details.
-- dma-names : Must include the following entries:
- - rx
- - tx
-
-Recommended properties:
-- spi-max-frequency: Definition as per
- Documentation/devicetree/bindings/spi/spi-bus.txt
-
-Example:
-
-spi@7000d600 {
- compatible = "nvidia,tegra20-slink";
- reg = <0x7000d600 0x200>;
- interrupts = <0 82 0x04>;
- spi-max-frequency = <25000000>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&tegra_car 44>;
- resets = <&tegra_car 44>;
- reset-names = "spi";
- dmas = <&apbdma 16>, <&apbdma 16>;
- dma-names = "rx", "tx";
-};
diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.yaml b/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.yaml
new file mode 100644
index 000000000000..291c25ec015d
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra20-slink.yaml
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spi/nvidia,tegra20-slink.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra20/30 SLINK controller
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+
+properties:
+ compatible:
+ enum:
+ - nvidia,tegra20-slink
+ - nvidia,tegra30-slink
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: module clock
+
+ resets:
+ items:
+ - description: module reset
+
+ reset-names:
+ items:
+ - const: spi
+
+ dmas:
+ items:
+ - description: DMA channel used for reception
+ - description: DMA channel used for transmission
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+ operating-points-v2:
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ power-domains:
+ items:
+ - description: phandle to the core power domain
+
+ spi-max-frequency:
+ description: Maximum SPI clocking speed of the controller in Hz.
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+allOf:
+ - $ref: spi-controller.yaml
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - resets
+ - reset-names
+ - dmas
+ - dma-names
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra20-car.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ spi@7000d600 {
+ compatible = "nvidia,tegra20-slink";
+ reg = <0x7000d600 0x200>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ spi-max-frequency = <25000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&tegra_car TEGRA20_CLK_SBC2>;
+ resets = <&tegra_car 44>;
+ reset-names = "spi";
+ dmas = <&apbdma 16>, <&apbdma 16>;
+ dma-names = "rx", "tx";
+ };
diff --git a/Documentation/devicetree/bindings/spi/spi-bcm63xx.txt b/Documentation/devicetree/bindings/spi/spi-bcm63xx.txt
deleted file mode 100644
index 1c16f6692613..000000000000
--- a/Documentation/devicetree/bindings/spi/spi-bcm63xx.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-Binding for Broadcom BCM6348/BCM6358 SPI controller
-
-Required properties:
-- compatible: must contain one of "brcm,bcm6348-spi", "brcm,bcm6358-spi".
-- reg: Base address and size of the controllers memory area.
-- interrupts: Interrupt for the SPI block.
-- clocks: phandle of the SPI clock.
-- clock-names: has to be "spi".
-- #address-cells: <1>, as required by generic SPI binding.
-- #size-cells: <0>, also as required by generic SPI binding.
-
-Optional properties:
-- num-cs: some controllers have less than 8 cs signals. Defaults to 8
- if absent.
-
-Child nodes as per the generic SPI binding.
-
-Example:
-
- spi@10000800 {
- compatible = "brcm,bcm6368-spi", "brcm,bcm6358-spi";
- reg = <0x10000800 0x70c>;
-
- interrupts = <1>;
-
- clocks = <&clkctl 9>;
- clock-names = "spi";
-
- num-cs = <5>;
-
- #address-cells = <1>;
- #size-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.yaml b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
index b7552739b554..d4b61b0e8301 100644
--- a/Documentation/devicetree/bindings/spi/spi-cadence.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-cadence.yaml
@@ -49,6 +49,12 @@ properties:
enum: [ 0, 1 ]
default: 0
+ power-domains:
+ maxItems: 1
+
+ label:
+ description: Descriptive name of the SPI controller.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml
index e91425012319..727c5346b8ce 100644
--- a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.yaml
@@ -63,6 +63,9 @@ properties:
maximum: 2
default: 1
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
index a813c971ecf6..7fd591145480 100644
--- a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.yaml
@@ -45,6 +45,9 @@ properties:
- const: fspi_en
- const: fspi
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/spi/spi-pl022.yaml b/Documentation/devicetree/bindings/spi/spi-pl022.yaml
index 91e540a92faf..5e5a704a766e 100644
--- a/Documentation/devicetree/bindings/spi/spi-pl022.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-pl022.yaml
@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: spi-controller.yaml#
+ - $ref: /schemas/arm/primecell.yaml#
# We need a select here so we don't match all nodes with 'arm,primecell'
select:
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index ba2bfb547909..40bc475ee7e1 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -119,6 +119,10 @@ properties:
- fsl,mpr121
# Monolithic Power Systems Inc. multi-phase controller mp2888
- mps,mp2888
+ # Monolithic Power Systems Inc. multi-phase controller mp2971
+ - mps,mp2971
+ # Monolithic Power Systems Inc. multi-phase controller mp2973
+ - mps,mp2973
# Monolithic Power Systems Inc. multi-phase controller mp2975
- mps,mp2975
# Honeywell Humidicon HIH-6130 humidity/temperature sensor
@@ -315,6 +319,8 @@ properties:
- plx,pex8648
# Pulsedlight LIDAR range-finding sensor
- pulsedlight,lidar-lite-v2
+ # Renesas HS3001 Temperature and Relative Humidity Sensors
+ - renesas,hs3001
# Renesas ISL29501 time-of-flight sensor
- renesas,isl29501
# Rohm DH2228FV
diff --git a/Documentation/driver-api/s390-drivers.rst b/Documentation/driver-api/s390-drivers.rst
index 5158577bc29b..8c0845c4eee7 100644
--- a/Documentation/driver-api/s390-drivers.rst
+++ b/Documentation/driver-api/s390-drivers.rst
@@ -27,7 +27,7 @@ not strictly considered I/O devices. They are considered here as well,
although they are not the focus of this document.
Some additional information can also be found in the kernel source under
-Documentation/s390/driver-model.rst.
+Documentation/arch/s390/driver-model.rst.
The css bus
===========
@@ -38,7 +38,7 @@ into several categories:
* Standard I/O subchannels, for use by the system. They have a child
device on the ccw bus and are described below.
* I/O subchannels bound to the vfio-ccw driver. See
- Documentation/s390/vfio-ccw.rst.
+ Documentation/arch/s390/vfio-ccw.rst.
* Message subchannels. No Linux driver currently exists.
* CHSC subchannels (at most one). The chsc subchannel driver can be used
to send asynchronous chsc commands.
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index eccd327e6df5..a624e92f2687 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -332,54 +332,121 @@ Encryption modes and usage
fscrypt allows one encryption mode to be specified for file contents
and one encryption mode to be specified for filenames. Different
directory trees are permitted to use different encryption modes.
+
+Supported modes
+---------------
+
Currently, the following pairs of encryption modes are supported:
- AES-256-XTS for contents and AES-256-CTS-CBC for filenames
-- AES-128-CBC for contents and AES-128-CTS-CBC for filenames
+- AES-256-XTS for contents and AES-256-HCTR2 for filenames
- Adiantum for both contents and filenames
-- AES-256-XTS for contents and AES-256-HCTR2 for filenames (v2 policies only)
-- SM4-XTS for contents and SM4-CTS-CBC for filenames (v2 policies only)
-
-If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair.
-
-AES-128-CBC was added only for low-powered embedded devices with
-crypto accelerators such as CAAM or CESA that do not support XTS. To
-use AES-128-CBC, CONFIG_CRYPTO_ESSIV and CONFIG_CRYPTO_SHA256 (or
-another SHA-256 implementation) must be enabled so that ESSIV can be
-used.
-
-Adiantum is a (primarily) stream cipher-based mode that is fast even
-on CPUs without dedicated crypto instructions. It's also a true
-wide-block mode, unlike XTS. It can also eliminate the need to derive
-per-file encryption keys. However, it depends on the security of two
-primitives, XChaCha12 and AES-256, rather than just one. See the
-paper "Adiantum: length-preserving encryption for entry-level
-processors" (https://eprint.iacr.org/2018/720.pdf) for more details.
-To use Adiantum, CONFIG_CRYPTO_ADIANTUM must be enabled. Also, fast
-implementations of ChaCha and NHPoly1305 should be enabled, e.g.
-CONFIG_CRYPTO_CHACHA20_NEON and CONFIG_CRYPTO_NHPOLY1305_NEON for ARM.
-
-AES-256-HCTR2 is another true wide-block encryption mode that is intended for
-use on CPUs with dedicated crypto instructions. AES-256-HCTR2 has the property
-that a bitflip in the plaintext changes the entire ciphertext. This property
-makes it desirable for filename encryption since initialization vectors are
-reused within a directory. For more details on AES-256-HCTR2, see the paper
-"Length-preserving encryption with HCTR2"
-(https://eprint.iacr.org/2021/1441.pdf). To use AES-256-HCTR2,
-CONFIG_CRYPTO_HCTR2 must be enabled. Also, fast implementations of XCTR and
-POLYVAL should be enabled, e.g. CRYPTO_POLYVAL_ARM64_CE and
-CRYPTO_AES_ARM64_CE_BLK for ARM64.
-
-SM4 is a Chinese block cipher that is an alternative to AES. It has
-not seen as much security review as AES, and it only has a 128-bit key
-size. It may be useful in cases where its use is mandated.
-Otherwise, it should not be used. For SM4 support to be available, it
-also needs to be enabled in the kernel crypto API.
-
-New encryption modes can be added relatively easily, without changes
-to individual filesystems. However, authenticated encryption (AE)
-modes are not currently supported because of the difficulty of dealing
-with ciphertext expansion.
+- AES-128-CBC-ESSIV for contents and AES-128-CTS-CBC for filenames
+- SM4-XTS for contents and SM4-CTS-CBC for filenames
+
+Authenticated encryption modes are not currently supported because of
+the difficulty of dealing with ciphertext expansion. Therefore,
+contents encryption uses a block cipher in `XTS mode
+<https://en.wikipedia.org/wiki/Disk_encryption_theory#XTS>`_ or
+`CBC-ESSIV mode
+<https://en.wikipedia.org/wiki/Disk_encryption_theory#Encrypted_salt-sector_initialization_vector_(ESSIV)>`_,
+or a wide-block cipher. Filenames encryption uses a
+block cipher in `CTS-CBC mode
+<https://en.wikipedia.org/wiki/Ciphertext_stealing>`_ or a wide-block
+cipher.
+
+The (AES-256-XTS, AES-256-CTS-CBC) pair is the recommended default.
+It is also the only option that is *guaranteed* to always be supported
+if the kernel supports fscrypt at all; see `Kernel config options`_.
+
+The (AES-256-XTS, AES-256-HCTR2) pair is also a good choice that
+upgrades the filenames encryption to use a wide-block cipher. (A
+*wide-block cipher*, also called a tweakable super-pseudorandom
+permutation, has the property that changing one bit scrambles the
+entire result.) As described in `Filenames encryption`_, a wide-block
+cipher is the ideal mode for the problem domain, though CTS-CBC is the
+"least bad" choice among the alternatives. For more information about
+HCTR2, see `the HCTR2 paper <https://eprint.iacr.org/2021/1441.pdf>`_.
+
+Adiantum is recommended on systems where AES is too slow due to lack
+of hardware acceleration for AES. Adiantum is a wide-block cipher
+that uses XChaCha12 and AES-256 as its underlying components. Most of
+the work is done by XChaCha12, which is much faster than AES when AES
+acceleration is unavailable. For more information about Adiantum, see
+`the Adiantum paper <https://eprint.iacr.org/2018/720.pdf>`_.
+
+The (AES-128-CBC-ESSIV, AES-128-CTS-CBC) pair exists only to support
+systems whose only form of AES acceleration is an off-CPU crypto
+accelerator such as CAAM or CESA that does not support XTS.
+
+The remaining mode pairs are the "national pride ciphers":
+
+- (SM4-XTS, SM4-CTS-CBC)
+
+Generally speaking, these ciphers aren't "bad" per se, but they
+receive limited security review compared to the usual choices such as
+AES and ChaCha. They also don't bring much new to the table. It is
+suggested to only use these ciphers where their use is mandated.
+
+Kernel config options
+---------------------
+
+Enabling fscrypt support (CONFIG_FS_ENCRYPTION) automatically pulls in
+only the basic support from the crypto API needed to use AES-256-XTS
+and AES-256-CTS-CBC encryption. For optimal performance, it is
+strongly recommended to also enable any available platform-specific
+kconfig options that provide acceleration for the algorithm(s) you
+wish to use. Support for any "non-default" encryption modes typically
+requires extra kconfig options as well.
+
+Below, some relevant options are listed by encryption mode. Note,
+acceleration options not listed below may be available for your
+platform; refer to the kconfig menus. File contents encryption can
+also be configured to use inline encryption hardware instead of the
+kernel crypto API (see `Inline encryption support`_); in that case,
+the file contents mode doesn't need to supported in the kernel crypto
+API, but the filenames mode still does.
+
+- AES-256-XTS and AES-256-CTS-CBC
+ - Recommended:
+ - arm64: CONFIG_CRYPTO_AES_ARM64_CE_BLK
+ - x86: CONFIG_CRYPTO_AES_NI_INTEL
+
+- AES-256-HCTR2
+ - Mandatory:
+ - CONFIG_CRYPTO_HCTR2
+ - Recommended:
+ - arm64: CONFIG_CRYPTO_AES_ARM64_CE_BLK
+ - arm64: CONFIG_CRYPTO_POLYVAL_ARM64_CE
+ - x86: CONFIG_CRYPTO_AES_NI_INTEL
+ - x86: CONFIG_CRYPTO_POLYVAL_CLMUL_NI
+
+- Adiantum
+ - Mandatory:
+ - CONFIG_CRYPTO_ADIANTUM
+ - Recommended:
+ - arm32: CONFIG_CRYPTO_CHACHA20_NEON
+ - arm32: CONFIG_CRYPTO_NHPOLY1305_NEON
+ - arm64: CONFIG_CRYPTO_CHACHA20_NEON
+ - arm64: CONFIG_CRYPTO_NHPOLY1305_NEON
+ - x86: CONFIG_CRYPTO_CHACHA20_X86_64
+ - x86: CONFIG_CRYPTO_NHPOLY1305_SSE2
+ - x86: CONFIG_CRYPTO_NHPOLY1305_AVX2
+
+- AES-128-CBC-ESSIV and AES-128-CTS-CBC:
+ - Mandatory:
+ - CONFIG_CRYPTO_ESSIV
+ - CONFIG_CRYPTO_SHA256 or another SHA-256 implementation
+ - Recommended:
+ - AES-CBC acceleration
+
+fscrypt also uses HMAC-SHA512 for key derivation, so enabling SHA-512
+acceleration is recommended:
+
+- SHA-512
+ - Recommended:
+ - arm64: CONFIG_CRYPTO_SHA512_ARM64_CE
+ - x86: CONFIG_CRYPTO_SHA512_SSSE3
Contents encryption
-------------------
@@ -493,7 +560,14 @@ This structure must be initialized as follows:
be set to constants from ``<linux/fscrypt.h>`` which identify the
encryption modes to use. If unsure, use FSCRYPT_MODE_AES_256_XTS
(1) for ``contents_encryption_mode`` and FSCRYPT_MODE_AES_256_CTS
- (4) for ``filenames_encryption_mode``.
+ (4) for ``filenames_encryption_mode``. For details, see `Encryption
+ modes and usage`_.
+
+ v1 encryption policies only support three combinations of modes:
+ (FSCRYPT_MODE_AES_256_XTS, FSCRYPT_MODE_AES_256_CTS),
+ (FSCRYPT_MODE_AES_128_CBC, FSCRYPT_MODE_AES_128_CTS), and
+ (FSCRYPT_MODE_ADIANTUM, FSCRYPT_MODE_ADIANTUM). v2 policies support
+ all combinations documented in `Supported modes`_.
- ``flags`` contains optional flags from ``<linux/fscrypt.h>``:
diff --git a/Documentation/filesystems/idmappings.rst b/Documentation/filesystems/idmappings.rst
index ad6d21640576..d095c5838f94 100644
--- a/Documentation/filesystems/idmappings.rst
+++ b/Documentation/filesystems/idmappings.rst
@@ -146,9 +146,10 @@ For the rest of this document we will prefix all userspace ids with ``u`` and
all kernel ids with ``k``. Ranges of idmappings will be prefixed with ``r``. So
an idmapping will be written as ``u0:k10000:r10000``.
-For example, the id ``u1000`` is an id in the upper idmapset or "userspace
-idmapset" starting with ``u1000``. And it is mapped to ``k11000`` which is a
-kernel id in the lower idmapset or "kernel idmapset" starting with ``k10000``.
+For example, within this idmapping, the id ``u1000`` is an id in the upper
+idmapset or "userspace idmapset" starting with ``u0``. And it is mapped to
+``k11000`` which is a kernel id in the lower idmapset or "kernel idmapset"
+starting with ``k10000``.
A kernel id is always created by an idmapping. Such idmappings are associated
with user namespaces. Since we mainly care about how idmappings work we're not
@@ -373,6 +374,13 @@ kernel maps the caller's userspace id down into a kernel id according to the
caller's idmapping and then maps that kernel id up according to the
filesystem's idmapping.
+From the implementation point it's worth mentioning how idmappings are represented.
+All idmappings are taken from the corresponding user namespace.
+
+ - caller's idmapping (usually taken from ``current_user_ns()``)
+ - filesystem's idmapping (``sb->s_user_ns``)
+ - mount's idmapping (``mnt_idmap(vfsmnt)``)
+
Let's see some examples with caller/filesystem idmapping but without mount
idmappings. This will exhibit some problems we can hit. After that we will
revisit/reconsider these examples, this time using mount idmappings, to see how
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index 0ca479dbb1cd..b7e5a3841aa4 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -85,13 +85,14 @@ prototypes::
struct dentry *dentry, struct fileattr *fa);
int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa);
struct posix_acl * (*get_acl)(struct mnt_idmap *, struct dentry *, int);
+ struct offset_ctx *(*get_offset_ctx)(struct inode *inode);
locking rules:
all may block
-============== =============================================
+============== ==================================================
ops i_rwsem(inode)
-============== =============================================
+============== ==================================================
lookup: shared
create: exclusive
link: exclusive (both)
@@ -115,7 +116,8 @@ atomic_open: shared (exclusive if O_CREAT is set in open flags)
tmpfile: no
fileattr_get: no or exclusive
fileattr_set: exclusive
-============== =============================================
+get_offset_ctx no
+============== ==================================================
Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_rwsem
@@ -374,10 +376,17 @@ invalidate_lock before invalidating page cache in truncate / hole punch
path (and thus calling into ->invalidate_folio) to block races between page
cache invalidation and page cache filling functions (fault, read, ...).
-->release_folio() is called when the kernel is about to try to drop the
-buffers from the folio in preparation for freeing it. It returns false to
-indicate that the buffers are (or may be) freeable. If ->release_folio is
-NULL, the kernel assumes that the fs has no private interest in the buffers.
+->release_folio() is called when the MM wants to make a change to the
+folio that would invalidate the filesystem's private data. For example,
+it may be about to be removed from the address_space or split. The folio
+is locked and not under writeback. It may be dirty. The gfp parameter
+is not usually used for allocation, but rather to indicate what the
+filesystem may do to attempt to free the private data. The filesystem may
+return false to indicate that the folio's private data cannot be freed.
+If it returns true, it should have already removed the private data from
+the folio. If a filesystem does not provide a ->release_folio method,
+the pagecache will assume that private data is buffer_heads and call
+try_to_free_buffers().
->free_folio() is called when the kernel has dropped the folio
from the page cache.
diff --git a/Documentation/filesystems/tmpfs.rst b/Documentation/filesystems/tmpfs.rst
index 2cd8fa332feb..56a26c843dbe 100644
--- a/Documentation/filesystems/tmpfs.rst
+++ b/Documentation/filesystems/tmpfs.rst
@@ -21,8 +21,8 @@ explained further below, some of which can be reconfigured dynamically on the
fly using a remount ('mount -o remount ...') of the filesystem. A tmpfs
filesystem can be resized but it cannot be resized to a size below its current
usage. tmpfs also supports POSIX ACLs, and extended attributes for the
-trusted.* and security.* namespaces. ramfs does not use swap and you cannot
-modify any parameter for a ramfs filesystem. The size limit of a ramfs
+trusted.*, security.* and user.* namespaces. ramfs does not use swap and you
+cannot modify any parameter for a ramfs filesystem. The size limit of a ramfs
filesystem is how much memory you have available, and so care must be taken if
used so to not run out of memory.
@@ -97,6 +97,9 @@ mount with such options, since it allows any user with write access to
use up all the memory on the machine; but enhances the scalability of
that instance in a system with many CPUs making intensive use of it.
+If nr_inodes is not 0, that limited space for inodes is also used up by
+extended attributes: "df -i"'s IUsed and IUse% increase, IFree decreases.
+
tmpfs blocks may be swapped out, when there is a shortage of memory.
tmpfs has a mount option to disable its use of swap:
@@ -123,6 +126,37 @@ sysfs file /sys/kernel/mm/transparent_hugepage/shmem_enabled: which can
be used to deny huge pages on all tmpfs mounts in an emergency, or to
force huge pages on all tmpfs mounts for testing.
+tmpfs also supports quota with the following mount options
+
+======================== =================================================
+quota User and group quota accounting and enforcement
+ is enabled on the mount. Tmpfs is using hidden
+ system quota files that are initialized on mount.
+usrquota User quota accounting and enforcement is enabled
+ on the mount.
+grpquota Group quota accounting and enforcement is enabled
+ on the mount.
+usrquota_block_hardlimit Set global user quota block hard limit.
+usrquota_inode_hardlimit Set global user quota inode hard limit.
+grpquota_block_hardlimit Set global group quota block hard limit.
+grpquota_inode_hardlimit Set global group quota inode hard limit.
+======================== =================================================
+
+None of the quota related mount options can be set or changed on remount.
+
+Quota limit parameters accept a suffix k, m or g for kilo, mega and giga
+and can't be changed on remount. Default global quota limits are taking
+effect for any and all user/group/project except root the first time the
+quota entry for user/group/project id is being accessed - typically the
+first time an inode with a particular id ownership is being created after
+the mount. In other words, instead of the limits being initialized to zero,
+they are initialized with the particular value provided with these mount
+options. The limits can be changed for any user/group id at any time as they
+normally can be.
+
+Note that tmpfs quotas do not support user namespaces so no uid/gid
+translation is done if quotas are enabled inside user namespaces.
+
tmpfs has a mount option to set the NUMA memory allocation policy for
all files in that instance (if CONFIG_NUMA is enabled) - which can be
adjusted on the fly via 'mount -o remount ...'
diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
index cb2a97e49872..f8fe815ab1f3 100644
--- a/Documentation/filesystems/vfs.rst
+++ b/Documentation/filesystems/vfs.rst
@@ -260,9 +260,11 @@ filesystem. The following members are defined:
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
- int (*freeze_super) (struct super_block *);
+ int (*freeze_super) (struct super_block *sb,
+ enum freeze_holder who);
int (*freeze_fs) (struct super_block *);
- int (*thaw_super) (struct super_block *);
+ int (*thaw_super) (struct super_block *sb,
+ enum freeze_wholder who);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
@@ -515,6 +517,7 @@ As of kernel 2.6.22, the following members are defined:
int (*fileattr_set)(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa);
int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa);
+ struct offset_ctx *(*get_offset_ctx)(struct inode *inode);
};
Again, all methods are called without any locks being held, unless
@@ -675,7 +678,10 @@ otherwise noted.
called on ioctl(FS_IOC_SETFLAGS) and ioctl(FS_IOC_FSSETXATTR) to
change miscellaneous file flags and attributes. Callers hold
i_rwsem exclusive. If unset, then fall back to f_op->ioctl().
-
+``get_offset_ctx``
+ called to get the offset context for a directory inode. A
+ filesystem must define this operation to use
+ simple_offset_dir_operations.
The Address Space Object
========================
diff --git a/Documentation/firmware-guide/acpi/chromeos-acpi-device.rst b/Documentation/firmware-guide/acpi/chromeos-acpi-device.rst
index f37fc90ce340..89419e116413 100644
--- a/Documentation/firmware-guide/acpi/chromeos-acpi-device.rst
+++ b/Documentation/firmware-guide/acpi/chromeos-acpi-device.rst
@@ -5,9 +5,8 @@ Chrome OS ACPI Device
=====================
Hardware functionality specific to Chrome OS is exposed through a Chrome OS ACPI device.
-The plug and play ID of a Chrome OS ACPI device is GGL0001. GGL is a valid PNP ID of Google.
-PNP ID can be used with the ACPI devices according to the guidelines. The following ACPI
-objects are supported:
+The plug and play ID of a Chrome OS ACPI device is GGL0001 and the hardware ID is
+GOOG0016. The following ACPI objects are supported:
.. flat-table:: Supported ACPI Objects
:widths: 1 2
diff --git a/Documentation/hwmon/hs3001.rst b/Documentation/hwmon/hs3001.rst
new file mode 100644
index 000000000000..9f59dfc212d9
--- /dev/null
+++ b/Documentation/hwmon/hs3001.rst
@@ -0,0 +1,37 @@
+.. SPDX-License-Identifier: GPL-2.0-or-later
+
+Kernel driver HS3001
+====================
+
+Supported chips:
+
+ * Renesas HS3001, HS3002, HS3003, HS3004
+
+ Prefix: 'hs3001'
+
+ Addresses scanned: -
+
+ Datasheet: https://www.renesas.com/us/en/document/dst/hs300x-datasheet?r=417401
+
+Author:
+
+ - Andre Werner <andre.werner@systec-electronic.com>
+
+Description
+-----------
+
+This driver implements support for the Renesas HS3001 chips, a humidity
+and temperature family. Temperature is measured in degrees celsius, relative
+humidity is expressed as a percentage. In the sysfs interface, all values are
+scaled by 1000, i.e. the value for 31.5 degrees celsius is 31500.
+
+The device communicates with the I2C protocol. Sensors have the I2C
+address 0x44 by default.
+
+sysfs-Interface
+---------------
+
+=================== =================
+temp1_input: temperature input
+humidity1_input: humidity input
+=================== =================
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index 042e1cf9501b..88dadea85cfc 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -78,6 +78,7 @@ Hardware Monitoring Kernel Drivers
gxp-fan-ctrl
hih6130
hp-wmi-sensors
+ hs3001
ibmaem
ibm-cffps
ibmpowernv
@@ -195,7 +196,6 @@ Hardware Monitoring Kernel Drivers
shtc1
sis5595
sl28cpld
- smm665
smpro-hwmon
smsc47b397
smsc47m192
diff --git a/Documentation/hwmon/nct6775.rst b/Documentation/hwmon/nct6775.rst
index 5ba8276aad4b..9d7a10de61a7 100644
--- a/Documentation/hwmon/nct6775.rst
+++ b/Documentation/hwmon/nct6775.rst
@@ -80,7 +80,13 @@ Supported chips:
Datasheet: Available from Nuvoton upon request
+ * Nuvoton NCT6796D-S/NCT6799D-R
+ Prefix: 'nct6799'
+
+ Addresses scanned: ISA address retrieved from Super I/O registers
+
+ Datasheet: Available from Nuvoton upon request
Authors:
@@ -277,4 +283,7 @@ will not reflect a usable value. It often reports unreasonably high
temperatures, and in some cases the reported temperature declines if the actual
temperature increases (similar to the raw PECI temperature value - see PECI
specification for details). CPUTIN should therefore be ignored on ASUS
-boards. The CPU temperature on ASUS boards is reported from PECI 0.
+boards. The CPU temperature on ASUS boards is reported from PECI 0 or TSI 0.
+
+NCT6796D-S and NCT6799D-R chips are very similar and their chip_id indicates
+they are different versions. This driver treats them the same way.
diff --git a/Documentation/hwmon/pmbus.rst b/Documentation/hwmon/pmbus.rst
index 7ecfec6ca2db..eb1569bfa676 100644
--- a/Documentation/hwmon/pmbus.rst
+++ b/Documentation/hwmon/pmbus.rst
@@ -163,7 +163,7 @@ Emerson DS1200 power modules might look as follows::
.driver = {
.name = "ds1200",
},
- .probe_new = ds1200_probe,
+ .probe = ds1200_probe,
.id_table = ds1200_id,
};
diff --git a/Documentation/hwmon/smm665.rst b/Documentation/hwmon/smm665.rst
deleted file mode 100644
index 481e69d8bf39..000000000000
--- a/Documentation/hwmon/smm665.rst
+++ /dev/null
@@ -1,187 +0,0 @@
-Kernel driver smm665
-====================
-
-Supported chips:
-
- * Summit Microelectronics SMM465
-
- Prefix: 'smm465'
-
- Addresses scanned: -
-
- Datasheet:
-
- http://www.summitmicro.com/prod_select/summary/SMM465/SMM465DS.pdf
-
- * Summit Microelectronics SMM665, SMM665B
-
- Prefix: 'smm665'
-
- Addresses scanned: -
-
- Datasheet:
-
- http://www.summitmicro.com/prod_select/summary/SMM665/SMM665B_2089_20.pdf
-
- * Summit Microelectronics SMM665C
-
- Prefix: 'smm665c'
-
- Addresses scanned: -
-
- Datasheet:
-
- http://www.summitmicro.com/prod_select/summary/SMM665C/SMM665C_2125.pdf
-
- * Summit Microelectronics SMM764
-
- Prefix: 'smm764'
-
- Addresses scanned: -
-
- Datasheet:
-
- http://www.summitmicro.com/prod_select/summary/SMM764/SMM764_2098.pdf
-
- * Summit Microelectronics SMM766, SMM766B
-
- Prefix: 'smm766'
-
- Addresses scanned: -
-
- Datasheets:
-
- http://www.summitmicro.com/prod_select/summary/SMM766/SMM766_2086.pdf
-
- http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf
-
-Author: Guenter Roeck <linux@roeck-us.net>
-
-
-Module Parameters
------------------
-
-* vref: int
- Default: 1250 (mV)
-
- Reference voltage on VREF_ADC pin in mV. It should not be necessary to set
- this parameter unless a non-default reference voltage is used.
-
-
-Description
------------
-
-[From datasheet] The SMM665 is an Active DC Output power supply Controller
-that monitors, margins and cascade sequences power. The part monitors six
-power supply channels as well as VDD, 12V input, two general-purpose analog
-inputs and an internal temperature sensor using a 10-bit ADC.
-
-Each monitored channel has its own high and low limits, plus a critical
-limit.
-
-Support for SMM465, SMM764, and SMM766 has been implemented but is untested.
-
-
-Usage Notes
------------
-
-This driver does not probe for devices, since there is no register which
-can be safely used to identify the chip. You will have to instantiate
-the devices explicitly. When instantiating the device, you have to specify
-its configuration register address.
-
-Example: the following will load the driver for an SMM665 at address 0x57
-on I2C bus #1::
-
- $ modprobe smm665
- $ echo smm665 0x57 > /sys/bus/i2c/devices/i2c-1/new_device
-
-
-Sysfs entries
--------------
-
-This driver uses the values in the datasheet to convert ADC register values
-into the values specified in the sysfs-interface document. All attributes are
-read only.
-
-Min, max, lcrit, and crit values are used by the chip to trigger external signals
-and/or other activity. Triggered signals can include HEALTHY, RST, Power Off,
-or Fault depending on the chip configuration. The driver reports values as lcrit
-or crit if exceeding the limits triggers RST, Power Off, or Fault, and as min or
-max otherwise. For details please see the SMM665 datasheet.
-
-For SMM465 and SMM764, values for Channel E and F are reported but undefined.
-
-======================= =======================================================
-in1_input 12V input voltage (mV)
-in2_input 3.3V (VDD) input voltage (mV)
-in3_input Channel A voltage (mV)
-in4_input Channel B voltage (mV)
-in5_input Channel C voltage (mV)
-in6_input Channel D voltage (mV)
-in7_input Channel E voltage (mV)
-in8_input Channel F voltage (mV)
-in9_input AIN1 voltage (mV)
-in10_input AIN2 voltage (mV)
-
-in1_min 12v input minimum voltage (mV)
-in2_min 3.3V (VDD) input minimum voltage (mV)
-in3_min Channel A minimum voltage (mV)
-in4_min Channel B minimum voltage (mV)
-in5_min Channel C minimum voltage (mV)
-in6_min Channel D minimum voltage (mV)
-in7_min Channel E minimum voltage (mV)
-in8_min Channel F minimum voltage (mV)
-in9_min AIN1 minimum voltage (mV)
-in10_min AIN2 minimum voltage (mV)
-
-in1_max 12v input maximum voltage (mV)
-in2_max 3.3V (VDD) input maximum voltage (mV)
-in3_max Channel A maximum voltage (mV)
-in4_max Channel B maximum voltage (mV)
-in5_max Channel C maximum voltage (mV)
-in6_max Channel D maximum voltage (mV)
-in7_max Channel E maximum voltage (mV)
-in8_max Channel F maximum voltage (mV)
-in9_max AIN1 maximum voltage (mV)
-in10_max AIN2 maximum voltage (mV)
-
-in1_lcrit 12v input critical minimum voltage (mV)
-in2_lcrit 3.3V (VDD) input critical minimum voltage (mV)
-in3_lcrit Channel A critical minimum voltage (mV)
-in4_lcrit Channel B critical minimum voltage (mV)
-in5_lcrit Channel C critical minimum voltage (mV)
-in6_lcrit Channel D critical minimum voltage (mV)
-in7_lcrit Channel E critical minimum voltage (mV)
-in8_lcrit Channel F critical minimum voltage (mV)
-in9_lcrit AIN1 critical minimum voltage (mV)
-in10_lcrit AIN2 critical minimum voltage (mV)
-
-in1_crit 12v input critical maximum voltage (mV)
-in2_crit 3.3V (VDD) input critical maximum voltage (mV)
-in3_crit Channel A critical maximum voltage (mV)
-in4_crit Channel B critical maximum voltage (mV)
-in5_crit Channel C critical maximum voltage (mV)
-in6_crit Channel D critical maximum voltage (mV)
-in7_crit Channel E critical maximum voltage (mV)
-in8_crit Channel F critical maximum voltage (mV)
-in9_crit AIN1 critical maximum voltage (mV)
-in10_crit AIN2 critical maximum voltage (mV)
-
-in1_crit_alarm 12v input critical alarm
-in2_crit_alarm 3.3V (VDD) input critical alarm
-in3_crit_alarm Channel A critical alarm
-in4_crit_alarm Channel B critical alarm
-in5_crit_alarm Channel C critical alarm
-in6_crit_alarm Channel D critical alarm
-in7_crit_alarm Channel E critical alarm
-in8_crit_alarm Channel F critical alarm
-in9_crit_alarm AIN1 critical alarm
-in10_crit_alarm AIN2 critical alarm
-
-temp1_input Chip temperature
-temp1_min Minimum chip temperature
-temp1_max Maximum chip temperature
-temp1_crit Critical chip temperature
-temp1_crit_alarm Temperature critical alarm
-======================= =======================================================
diff --git a/Documentation/netlink/genetlink-c.yaml b/Documentation/netlink/genetlink-c.yaml
index 4c1f8c22627b..9806c44f604c 100644
--- a/Documentation/netlink/genetlink-c.yaml
+++ b/Documentation/netlink/genetlink-c.yaml
@@ -41,7 +41,7 @@ properties:
description: Name of the define for the family name.
type: string
c-version-name:
- description: Name of the define for the verion of the family.
+ description: Name of the define for the version of the family.
type: string
max-by-define:
description: Makes the number of attributes and commands be specified by a define, not an enum value.
diff --git a/Documentation/netlink/genetlink-legacy.yaml b/Documentation/netlink/genetlink-legacy.yaml
index 196076dfa309..12a0a045605d 100644
--- a/Documentation/netlink/genetlink-legacy.yaml
+++ b/Documentation/netlink/genetlink-legacy.yaml
@@ -41,7 +41,7 @@ properties:
description: Name of the define for the family name.
type: string
c-version-name:
- description: Name of the define for the verion of the family.
+ description: Name of the define for the version of the family.
type: string
max-by-define:
description: Makes the number of attributes and commands be specified by a define, not an enum value.
diff --git a/Documentation/netlink/netlink-raw.yaml b/Documentation/netlink/netlink-raw.yaml
new file mode 100644
index 000000000000..896797876414
--- /dev/null
+++ b/Documentation/netlink/netlink-raw.yaml
@@ -0,0 +1,410 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+%YAML 1.2
+---
+$id: http://kernel.org/schemas/netlink/netlink-raw.yaml#
+$schema: https://json-schema.org/draft-07/schema
+
+# Common defines
+$defs:
+ uint:
+ type: integer
+ minimum: 0
+ len-or-define:
+ type: [ string, integer ]
+ pattern: ^[0-9A-Za-z_]+( - 1)?$
+ minimum: 0
+
+# Schema for specs
+title: Protocol
+description: Specification of a raw netlink protocol
+type: object
+required: [ name, doc, attribute-sets, operations ]
+additionalProperties: False
+properties:
+ name:
+ description: Name of the netlink family.
+ type: string
+ doc:
+ type: string
+ protocol:
+ description: Schema compatibility level.
+ enum: [ netlink-raw ] # Trim
+ # Start netlink-raw
+ protonum:
+ description: Protocol number to use for netlink-raw
+ type: integer
+ # End netlink-raw
+ uapi-header:
+ description: Path to the uAPI header, default is linux/${family-name}.h
+ type: string
+ # Start genetlink-c
+ c-family-name:
+ description: Name of the define for the family name.
+ type: string
+ c-version-name:
+ description: Name of the define for the version of the family.
+ type: string
+ max-by-define:
+ description: Makes the number of attributes and commands be specified by a define, not an enum value.
+ type: boolean
+ # End genetlink-c
+ # Start genetlink-legacy
+ kernel-policy:
+ description: |
+ Defines if the input policy in the kernel is global, per-operation, or split per operation type.
+ Default is split.
+ enum: [ split, per-op, global ]
+ # End genetlink-legacy
+
+ definitions:
+ description: List of type and constant definitions (enums, flags, defines).
+ type: array
+ items:
+ type: object
+ required: [ type, name ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ header:
+ description: For C-compatible languages, header which already defines this value.
+ type: string
+ type:
+ enum: [ const, enum, flags, struct ] # Trim
+ doc:
+ type: string
+ # For const
+ value:
+ description: For const - the value.
+ type: [ string, integer ]
+ # For enum and flags
+ value-start:
+ description: For enum or flags the literal initializer for the first value.
+ type: [ string, integer ]
+ entries:
+ description: For enum or flags array of values.
+ type: array
+ items:
+ oneOf:
+ - type: string
+ - type: object
+ required: [ name ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ value:
+ type: integer
+ doc:
+ type: string
+ render-max:
+ description: Render the max members for this enum.
+ type: boolean
+ # Start genetlink-c
+ enum-name:
+ description: Name for enum, if empty no name will be used.
+ type: [ string, "null" ]
+ name-prefix:
+ description: For enum the prefix of the values, optional.
+ type: string
+ # End genetlink-c
+ # Start genetlink-legacy
+ members:
+ description: List of struct members. Only scalars and strings members allowed.
+ type: array
+ items:
+ type: object
+ required: [ name, type ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ type:
+ description: The netlink attribute type
+ enum: [ u8, u16, u32, u64, s8, s16, s32, s64, string, binary ]
+ len:
+ $ref: '#/$defs/len-or-define'
+ byte-order:
+ enum: [ little-endian, big-endian ]
+ doc:
+ description: Documentation for the struct member attribute.
+ type: string
+ enum:
+ description: Name of the enum type used for the attribute.
+ type: string
+ enum-as-flags:
+ description: |
+ Treat the enum as flags. In most cases enum is either used as flags or as values.
+ Sometimes, however, both forms are necessary, in which case header contains the enum
+ form while specific attributes may request to convert the values into a bitfield.
+ type: boolean
+ display-hint: &display-hint
+ description: |
+ Optional format indicator that is intended only for choosing
+ the right formatting mechanism when displaying values of this
+ type.
+ enum: [ hex, mac, fddi, ipv4, ipv6, uuid ]
+ # End genetlink-legacy
+
+ attribute-sets:
+ description: Definition of attribute spaces for this family.
+ type: array
+ items:
+ description: Definition of a single attribute space.
+ type: object
+ required: [ name, attributes ]
+ additionalProperties: False
+ properties:
+ name:
+ description: |
+ Name used when referring to this space in other definitions, not used outside of the spec.
+ type: string
+ name-prefix:
+ description: |
+ Prefix for the C enum name of the attributes. Default family[name]-set[name]-a-
+ type: string
+ enum-name:
+ description: Name for the enum type of the attribute.
+ type: string
+ doc:
+ description: Documentation of the space.
+ type: string
+ subset-of:
+ description: |
+ Name of another space which this is a logical part of. Sub-spaces can be used to define
+ a limited group of attributes which are used in a nest.
+ type: string
+ # Start genetlink-c
+ attr-cnt-name:
+ description: The explicit name for constant holding the count of attributes (last attr + 1).
+ type: string
+ attr-max-name:
+ description: The explicit name for last member of attribute enum.
+ type: string
+ # End genetlink-c
+ attributes:
+ description: List of attributes in the space.
+ type: array
+ items:
+ type: object
+ required: [ name, type ]
+ additionalProperties: False
+ properties:
+ name:
+ type: string
+ type: &attr-type
+ description: The netlink attribute type
+ enum: [ unused, pad, flag, binary, u8, u16, u32, u64, s32, s64,
+ string, nest, array-nest, nest-type-value ]
+ doc:
+ description: Documentation of the attribute.
+ type: string
+ value:
+ description: Value for the enum item representing this attribute in the uAPI.
+ $ref: '#/$defs/uint'
+ type-value:
+ description: Name of the value extracted from the type of a nest-type-value attribute.
+ type: array
+ items:
+ type: string
+ byte-order:
+ enum: [ little-endian, big-endian ]
+ multi-attr:
+ type: boolean
+ nested-attributes:
+ description: Name of the space (sub-space) used inside the attribute.
+ type: string
+ enum:
+ description: Name of the enum type used for the attribute.
+ type: string
+ enum-as-flags:
+ description: |
+ Treat the enum as flags. In most cases enum is either used as flags or as values.
+ Sometimes, however, both forms are necessary, in which case header contains the enum
+ form while specific attributes may request to convert the values into a bitfield.
+ type: boolean
+ checks:
+ description: Kernel input validation.
+ type: object
+ additionalProperties: False
+ properties:
+ flags-mask:
+ description: Name of the flags constant on which to base mask (unsigned scalar types only).
+ type: string
+ min:
+ description: Min value for an integer attribute.
+ type: integer
+ min-len:
+ description: Min length for a binary attribute.
+ $ref: '#/$defs/len-or-define'
+ max-len:
+ description: Max length for a string or a binary attribute.
+ $ref: '#/$defs/len-or-define'
+ sub-type: *attr-type
+ display-hint: *display-hint
+ # Start genetlink-c
+ name-prefix:
+ type: string
+ # End genetlink-c
+ # Start genetlink-legacy
+ struct:
+ description: Name of the struct type used for the attribute.
+ type: string
+ # End genetlink-legacy
+
+ # Make sure name-prefix does not appear in subsets (subsets inherit naming)
+ dependencies:
+ name-prefix:
+ not:
+ required: [ subset-of ]
+ subset-of:
+ not:
+ required: [ name-prefix ]
+
+ operations:
+ description: Operations supported by the protocol.
+ type: object
+ required: [ list ]
+ additionalProperties: False
+ properties:
+ enum-model:
+ description: |
+ The model of assigning values to the operations.
+ "unified" is the recommended model where all message types belong
+ to a single enum.
+ "directional" has the messages sent to the kernel and from the kernel
+ enumerated separately.
+ enum: [ unified, directional ] # Trim
+ name-prefix:
+ description: |
+ Prefix for the C enum name of the command. The name is formed by concatenating
+ the prefix with the upper case name of the command, with dashes replaced by underscores.
+ type: string
+ enum-name:
+ description: Name for the enum type with commands.
+ type: string
+ async-prefix:
+ description: Same as name-prefix but used to render notifications and events to separate enum.
+ type: string
+ async-enum:
+ description: Name for the enum type with notifications/events.
+ type: string
+ # Start genetlink-legacy
+ fixed-header: &fixed-header
+ description: |
+ Name of the structure defining the optional fixed-length protocol
+ header. This header is placed in a message after the netlink and
+ genetlink headers and before any attributes.
+ type: string
+ # End genetlink-legacy
+ list:
+ description: List of commands
+ type: array
+ items:
+ type: object
+ additionalProperties: False
+ required: [ name, doc ]
+ properties:
+ name:
+ description: Name of the operation, also defining its C enum value in uAPI.
+ type: string
+ doc:
+ description: Documentation for the command.
+ type: string
+ value:
+ description: Value for the enum in the uAPI.
+ $ref: '#/$defs/uint'
+ attribute-set:
+ description: |
+ Attribute space from which attributes directly in the requests and replies
+ to this command are defined.
+ type: string
+ flags: &cmd_flags
+ description: Command flags.
+ type: array
+ items:
+ enum: [ admin-perm ]
+ dont-validate:
+ description: Kernel attribute validation flags.
+ type: array
+ items:
+ enum: [ strict, dump ]
+ # Start genetlink-legacy
+ fixed-header: *fixed-header
+ # End genetlink-legacy
+ do: &subop-type
+ description: Main command handler.
+ type: object
+ additionalProperties: False
+ properties:
+ request: &subop-attr-list
+ description: Definition of the request message for a given command.
+ type: object
+ additionalProperties: False
+ properties:
+ attributes:
+ description: |
+ Names of attributes from the attribute-set (not full attribute
+ definitions, just names).
+ type: array
+ items:
+ type: string
+ # Start genetlink-legacy
+ value:
+ description: |
+ ID of this message if value for request and response differ,
+ i.e. requests and responses have different message enums.
+ $ref: '#/$defs/uint'
+ # End genetlink-legacy
+ reply: *subop-attr-list
+ pre:
+ description: Hook for a function to run before the main callback (pre_doit or start).
+ type: string
+ post:
+ description: Hook for a function to run after the main callback (post_doit or done).
+ type: string
+ dump: *subop-type
+ notify:
+ description: Name of the command sharing the reply type with this notification.
+ type: string
+ event:
+ type: object
+ additionalProperties: False
+ properties:
+ attributes:
+ description: Explicit list of the attributes for the notification.
+ type: array
+ items:
+ type: string
+ mcgrp:
+ description: Name of the multicast group generating given notification.
+ type: string
+ mcast-groups:
+ description: List of multicast groups.
+ type: object
+ required: [ list ]
+ additionalProperties: False
+ properties:
+ list:
+ description: List of groups.
+ type: array
+ items:
+ type: object
+ required: [ name ]
+ additionalProperties: False
+ properties:
+ name:
+ description: |
+ The name for the group, used to form the define and the value of the define.
+ type: string
+ # Start genetlink-c
+ c-define-name:
+ description: Override for the name of the define in C uAPI.
+ type: string
+ # End genetlink-c
+ flags: *cmd_flags
+ # Start netlink-raw
+ value:
+ description: Value of the netlink multicast group in the uAPI.
+ type: integer
+ # End netlink-raw
diff --git a/Documentation/netlink/specs/rt_addr.yaml b/Documentation/netlink/specs/rt_addr.yaml
new file mode 100644
index 000000000000..cbee1cedb177
--- /dev/null
+++ b/Documentation/netlink/specs/rt_addr.yaml
@@ -0,0 +1,179 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: rt-addr
+protocol: netlink-raw
+protonum: 0
+
+doc:
+ Address configuration over rtnetlink.
+
+definitions:
+ -
+ name: ifaddrmsg
+ type: struct
+ members:
+ -
+ name: ifa-family
+ type: u8
+ -
+ name: ifa-prefixlen
+ type: u8
+ -
+ name: ifa-flags
+ type: u8
+ enum: ifa-flags
+ enum-as-flags: true
+ -
+ name: ifa-scope
+ type: u8
+ -
+ name: ifa-index
+ type: u32
+ -
+ name: ifa-cacheinfo
+ type: struct
+ members:
+ -
+ name: ifa-prefered
+ type: u32
+ -
+ name: ifa-valid
+ type: u32
+ -
+ name: cstamp
+ type: u32
+ -
+ name: tstamp
+ type: u32
+
+ -
+ name: ifa-flags
+ type: flags
+ entries:
+ -
+ name: secondary
+ -
+ name: nodad
+ -
+ name: optimistic
+ -
+ name: dadfailed
+ -
+ name: homeaddress
+ -
+ name: deprecated
+ -
+ name: tentative
+ -
+ name: permanent
+ -
+ name: managetempaddr
+ -
+ name: noprefixroute
+ -
+ name: mcautojoin
+ -
+ name: stable-privacy
+
+attribute-sets:
+ -
+ name: addr-attrs
+ attributes:
+ -
+ name: ifa-address
+ type: binary
+ display-hint: ipv4
+ -
+ name: ifa-local
+ type: binary
+ display-hint: ipv4
+ -
+ name: ifa-label
+ type: string
+ -
+ name: ifa-broadcast
+ type: binary
+ display-hint: ipv4
+ -
+ name: ifa-anycast
+ type: binary
+ -
+ name: ifa-cacheinfo
+ type: binary
+ struct: ifa-cacheinfo
+ -
+ name: ifa-multicast
+ type: binary
+ -
+ name: ifa-flags
+ type: u32
+ enum: ifa-flags
+ enum-as-flags: true
+ -
+ name: ifa-rt-priority
+ type: u32
+ -
+ name: ifa-target-netnsid
+ type: binary
+ -
+ name: ifa-proto
+ type: u8
+
+
+operations:
+ fixed-header: ifaddrmsg
+ enum-model: directional
+ list:
+ -
+ name: newaddr
+ doc: Add new address
+ attribute-set: addr-attrs
+ do:
+ request:
+ value: 20
+ attributes: &ifaddr-all
+ - ifa-family
+ - ifa-flags
+ - ifa-prefixlen
+ - ifa-scope
+ - ifa-index
+ - ifa-address
+ - ifa-label
+ - ifa-local
+ - ifa-cacheinfo
+ -
+ name: deladdr
+ doc: Remove address
+ attribute-set: addr-attrs
+ do:
+ request:
+ value: 21
+ attributes:
+ - ifa-family
+ - ifa-flags
+ - ifa-prefixlen
+ - ifa-scope
+ - ifa-index
+ - ifa-address
+ - ifa-local
+ -
+ name: getaddr
+ doc: Dump address information.
+ attribute-set: addr-attrs
+ dump:
+ request:
+ value: 22
+ attributes:
+ - ifa-index
+ reply:
+ value: 20
+ attributes: *ifaddr-all
+
+mcast-groups:
+ list:
+ -
+ name: rtnlgrp-ipv4-ifaddr
+ value: 5
+ -
+ name: rtnlgrp-ipv6-ifaddr
+ value: 9
diff --git a/Documentation/netlink/specs/rt_link.yaml b/Documentation/netlink/specs/rt_link.yaml
new file mode 100644
index 000000000000..d86a68f8475c
--- /dev/null
+++ b/Documentation/netlink/specs/rt_link.yaml
@@ -0,0 +1,1432 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: rt-link
+protocol: netlink-raw
+protonum: 0
+
+doc:
+ Link configuration over rtnetlink.
+
+definitions:
+ -
+ name: ifinfo-flags
+ type: flags
+ entries:
+ -
+ name: up
+ -
+ name: broadcast
+ -
+ name: debug
+ -
+ name: loopback
+ -
+ name: point-to-point
+ -
+ name: no-trailers
+ -
+ name: running
+ -
+ name: no-arp
+ -
+ name: promisc
+ -
+ name: all-multi
+ -
+ name: master
+ -
+ name: slave
+ -
+ name: multicast
+ -
+ name: portsel
+ -
+ name: auto-media
+ -
+ name: dynamic
+ -
+ name: lower-up
+ -
+ name: dormant
+ -
+ name: echo
+
+ -
+ name: rtgenmsg
+ type: struct
+ members:
+ -
+ name: family
+ type: u8
+ -
+ name: ifinfomsg
+ type: struct
+ members:
+ -
+ name: ifi-family
+ type: u8
+ -
+ name: padding
+ type: u8
+ -
+ name: ifi-type
+ type: u16
+ -
+ name: ifi-index
+ type: s32
+ -
+ name: ifi-flags
+ type: u32
+ enum: ifinfo-flags
+ enum-as-flags: true
+ -
+ name: ifi-change
+ type: u32
+ -
+ name: ifla-cacheinfo
+ type: struct
+ members:
+ -
+ name: max-reasm-len
+ type: u32
+ -
+ name: tstamp
+ type: u32
+ -
+ name: reachable-time
+ type: s32
+ -
+ name: retrans-time
+ type: u32
+ -
+ name: rtnl-link-stats
+ type: struct
+ members:
+ -
+ name: rx-packets
+ type: u32
+ -
+ name: tx-packets
+ type: u32
+ -
+ name: rx-bytes
+ type: u32
+ -
+ name: tx-bytes
+ type: u32
+ -
+ name: rx-errors
+ type: u32
+ -
+ name: tx-errors
+ type: u32
+ -
+ name: rx-dropped
+ type: u32
+ -
+ name: tx-dropped
+ type: u32
+ -
+ name: multicast
+ type: u32
+ -
+ name: collisions
+ type: u32
+ -
+ name: rx-length-errors
+ type: u32
+ -
+ name: rx-over-errors
+ type: u32
+ -
+ name: rx-crc-errors
+ type: u32
+ -
+ name: rx-frame-errors
+ type: u32
+ -
+ name: rx-fifo-errors
+ type: u32
+ -
+ name: rx-missed-errors
+ type: u32
+ -
+ name: tx-aborted-errors
+ type: u32
+ -
+ name: tx-carrier-errors
+ type: u32
+ -
+ name: tx-fifo-errors
+ type: u32
+ -
+ name: tx-heartbeat-errors
+ type: u32
+ -
+ name: tx-window-errors
+ type: u32
+ -
+ name: rx-compressed
+ type: u32
+ -
+ name: tx-compressed
+ type: u32
+ -
+ name: rx-nohandler
+ type: u32
+ -
+ name: rtnl-link-stats64
+ type: struct
+ members:
+ -
+ name: rx-packets
+ type: u64
+ -
+ name: tx-packets
+ type: u64
+ -
+ name: rx-bytes
+ type: u64
+ -
+ name: tx-bytes
+ type: u64
+ -
+ name: rx-errors
+ type: u64
+ -
+ name: tx-errors
+ type: u64
+ -
+ name: rx-dropped
+ type: u64
+ -
+ name: tx-dropped
+ type: u64
+ -
+ name: multicast
+ type: u64
+ -
+ name: collisions
+ type: u64
+ -
+ name: rx-length-errors
+ type: u64
+ -
+ name: rx-over-errors
+ type: u64
+ -
+ name: rx-crc-errors
+ type: u64
+ -
+ name: rx-frame-errors
+ type: u64
+ -
+ name: rx-fifo-errors
+ type: u64
+ -
+ name: rx-missed-errors
+ type: u64
+ -
+ name: tx-aborted-errors
+ type: u64
+ -
+ name: tx-carrier-errors
+ type: u64
+ -
+ name: tx-fifo-errors
+ type: u64
+ -
+ name: tx-heartbeat-errors
+ type: u64
+ -
+ name: tx-window-errors
+ type: u64
+ -
+ name: rx-compressed
+ type: u64
+ -
+ name: tx-compressed
+ type: u64
+ -
+ name: rx-nohandler
+ type: u64
+ -
+ name: rx-otherhost-dropped
+ type: u64
+ -
+ name: rtnl-link-ifmap
+ type: struct
+ members:
+ -
+ name: mem-start
+ type: u64
+ -
+ name: mem-end
+ type: u64
+ -
+ name: base-addr
+ type: u64
+ -
+ name: irq
+ type: u16
+ -
+ name: dma
+ type: u8
+ -
+ name: port
+ type: u8
+ -
+ name: ipv4-devconf
+ type: struct
+ members:
+ -
+ name: forwarding
+ type: u32
+ -
+ name: mc-forwarding
+ type: u32
+ -
+ name: proxy-arp
+ type: u32
+ -
+ name: accept-redirects
+ type: u32
+ -
+ name: secure-redirects
+ type: u32
+ -
+ name: send-redirects
+ type: u32
+ -
+ name: shared-media
+ type: u32
+ -
+ name: rp-filter
+ type: u32
+ -
+ name: accept-source-route
+ type: u32
+ -
+ name: bootp-relay
+ type: u32
+ -
+ name: log-martians
+ type: u32
+ -
+ name: tag
+ type: u32
+ -
+ name: arpfilter
+ type: u32
+ -
+ name: medium-id
+ type: u32
+ -
+ name: noxfrm
+ type: u32
+ -
+ name: nopolicy
+ type: u32
+ -
+ name: force-igmp-version
+ type: u32
+ -
+ name: arp-announce
+ type: u32
+ -
+ name: arp-ignore
+ type: u32
+ -
+ name: promote-secondaries
+ type: u32
+ -
+ name: arp-accept
+ type: u32
+ -
+ name: arp-notify
+ type: u32
+ -
+ name: accept-local
+ type: u32
+ -
+ name: src-vmark
+ type: u32
+ -
+ name: proxy-arp-pvlan
+ type: u32
+ -
+ name: route-localnet
+ type: u32
+ -
+ name: igmpv2-unsolicited-report-interval
+ type: u32
+ -
+ name: igmpv3-unsolicited-report-interval
+ type: u32
+ -
+ name: ignore-routes-with-linkdown
+ type: u32
+ -
+ name: drop-unicast-in-l2-multicast
+ type: u32
+ -
+ name: drop-gratuitous-arp
+ type: u32
+ -
+ name: bc-forwarding
+ type: u32
+ -
+ name: arp-evict-nocarrier
+ type: u32
+ -
+ name: ipv6-devconf
+ type: struct
+ members:
+ -
+ name: forwarding
+ type: u32
+ -
+ name: hoplimit
+ type: u32
+ -
+ name: mtu6
+ type: u32
+ -
+ name: accept-ra
+ type: u32
+ -
+ name: accept-redirects
+ type: u32
+ -
+ name: autoconf
+ type: u32
+ -
+ name: dad-transmits
+ type: u32
+ -
+ name: rtr-solicits
+ type: u32
+ -
+ name: rtr-solicit-interval
+ type: u32
+ -
+ name: rtr-solicit-delay
+ type: u32
+ -
+ name: use-tempaddr
+ type: u32
+ -
+ name: temp-valid-lft
+ type: u32
+ -
+ name: temp-prefered-lft
+ type: u32
+ -
+ name: regen-max-retry
+ type: u32
+ -
+ name: max-desync-factor
+ type: u32
+ -
+ name: max-addresses
+ type: u32
+ -
+ name: force-mld-version
+ type: u32
+ -
+ name: accept-ra-defrtr
+ type: u32
+ -
+ name: accept-ra-pinfo
+ type: u32
+ -
+ name: accept-ra-rtr-pref
+ type: u32
+ -
+ name: rtr-probe-interval
+ type: u32
+ -
+ name: accept-ra-rt-info-max-plen
+ type: u32
+ -
+ name: proxy-ndp
+ type: u32
+ -
+ name: optimistic-dad
+ type: u32
+ -
+ name: accept-source-route
+ type: u32
+ -
+ name: mc-forwarding
+ type: u32
+ -
+ name: disable-ipv6
+ type: u32
+ -
+ name: accept-dad
+ type: u32
+ -
+ name: force-tllao
+ type: u32
+ -
+ name: ndisc-notify
+ type: u32
+ -
+ name: mldv1-unsolicited-report-interval
+ type: u32
+ -
+ name: mldv2-unsolicited-report-interval
+ type: u32
+ -
+ name: suppress-frag-ndisc
+ type: u32
+ -
+ name: accept-ra-from-local
+ type: u32
+ -
+ name: use-optimistic
+ type: u32
+ -
+ name: accept-ra-mtu
+ type: u32
+ -
+ name: stable-secret
+ type: u32
+ -
+ name: use-oif-addrs-only
+ type: u32
+ -
+ name: accept-ra-min-hop-limit
+ type: u32
+ -
+ name: ignore-routes-with-linkdown
+ type: u32
+ -
+ name: drop-unicast-in-l2-multicast
+ type: u32
+ -
+ name: drop-unsolicited-na
+ type: u32
+ -
+ name: keep-addr-on-down
+ type: u32
+ -
+ name: rtr-solicit-max-interval
+ type: u32
+ -
+ name: seg6-enabled
+ type: u32
+ -
+ name: seg6-require-hmac
+ type: u32
+ -
+ name: enhanced-dad
+ type: u32
+ -
+ name: addr-gen-mode
+ type: u8
+ -
+ name: disable-policy
+ type: u32
+ -
+ name: accept-ra-rt-info-min-plen
+ type: u32
+ -
+ name: ndisc-tclass
+ type: u32
+ -
+ name: rpl-seg-enabled
+ type: u32
+ -
+ name: ra-defrtr-metric
+ type: u32
+ -
+ name: ioam6-enabled
+ type: u32
+ -
+ name: ioam6-id
+ type: u32
+ -
+ name: ioam6-id-wide
+ type: u32
+ -
+ name: ndisc-evict-nocarrier
+ type: u32
+ -
+ name: accept-untracked-na
+ type: u32
+ -
+ name: ifla-icmp6-stats
+ type: struct
+ members:
+ -
+ name: inmsgs
+ type: u64
+ -
+ name: inerrors
+ type: u64
+ -
+ name: outmsgs
+ type: u64
+ -
+ name: outerrors
+ type: u64
+ -
+ name: csumerrors
+ type: u64
+ -
+ name: ratelimithost
+ type: u64
+ -
+ name: ifla-inet6-stats
+ type: struct
+ members:
+ -
+ name: inpkts
+ type: u64
+ -
+ name: inoctets
+ type: u64
+ -
+ name: indelivers
+ type: u64
+ -
+ name: outforwdatagrams
+ type: u64
+ -
+ name: outpkts
+ type: u64
+ -
+ name: outoctets
+ type: u64
+ -
+ name: inhdrerrors
+ type: u64
+ -
+ name: intoobigerrors
+ type: u64
+ -
+ name: innoroutes
+ type: u64
+ -
+ name: inaddrerrors
+ type: u64
+ -
+ name: inunknownprotos
+ type: u64
+ -
+ name: intruncatedpkts
+ type: u64
+ -
+ name: indiscards
+ type: u64
+ -
+ name: outdiscards
+ type: u64
+ -
+ name: outnoroutes
+ type: u64
+ -
+ name: reasmtimeout
+ type: u64
+ -
+ name: reasmreqds
+ type: u64
+ -
+ name: reasmoks
+ type: u64
+ -
+ name: reasmfails
+ type: u64
+ -
+ name: fragoks
+ type: u64
+ -
+ name: fragfails
+ type: u64
+ -
+ name: fragcreates
+ type: u64
+ -
+ name: inmcastpkts
+ type: u64
+ -
+ name: outmcastpkts
+ type: u64
+ -
+ name: inbcastpkts
+ type: u64
+ -
+ name: outbcastpkts
+ type: u64
+ -
+ name: inmcastoctets
+ type: u64
+ -
+ name: outmcastoctets
+ type: u64
+ -
+ name: inbcastoctets
+ type: u64
+ -
+ name: outbcastoctets
+ type: u64
+ -
+ name: csumerrors
+ type: u64
+ -
+ name: noectpkts
+ type: u64
+ -
+ name: ect1-pkts
+ type: u64
+ -
+ name: ect0-pkts
+ type: u64
+ -
+ name: cepkts
+ type: u64
+ -
+ name: reasm-overlaps
+ type: u64
+ - name: br-boolopt-multi
+ type: struct
+ members:
+ -
+ name: optval
+ type: u32
+ -
+ name: optmask
+ type: u32
+ -
+ name: if_stats_msg
+ type: struct
+ members:
+ -
+ name: family
+ type: u8
+ -
+ name: pad1
+ type: u8
+ -
+ name: pad2
+ type: u16
+ -
+ name: ifindex
+ type: u32
+ -
+ name: filter-mask
+ type: u32
+
+
+attribute-sets:
+ -
+ name: link-attrs
+ name-prefix: ifla-
+ attributes:
+ -
+ name: address
+ type: binary
+ display-hint: mac
+ -
+ name: broadcast
+ type: binary
+ display-hint: mac
+ -
+ name: ifname
+ type: string
+ -
+ name: mtu
+ type: u32
+ -
+ name: link
+ type: u32
+ -
+ name: qdisc
+ type: string
+ -
+ name: stats
+ type: binary
+ struct: rtnl-link-stats
+ -
+ name: cost
+ type: string
+ -
+ name: priority
+ type: string
+ -
+ name: master
+ type: u32
+ -
+ name: wireless
+ type: string
+ -
+ name: protinfo
+ type: string
+ -
+ name: txqlen
+ type: u32
+ -
+ name: map
+ type: binary
+ struct: rtnl-link-ifmap
+ -
+ name: weight
+ type: u32
+ -
+ name: operstate
+ type: u8
+ -
+ name: linkmode
+ type: u8
+ -
+ name: linkinfo
+ type: nest
+ nested-attributes: linkinfo-attrs
+ -
+ name: net-ns-pid
+ type: u32
+ -
+ name: ifalias
+ type: string
+ -
+ name: num-vf
+ type: u32
+ -
+ name: vfinfo-list
+ type: nest
+ nested-attributes: vfinfo-attrs
+ -
+ name: stats64
+ type: binary
+ struct: rtnl-link-stats64
+ -
+ name: vf-ports
+ type: nest
+ nested-attributes: vf-ports-attrs
+ -
+ name: port-self
+ type: nest
+ nested-attributes: port-self-attrs
+ -
+ name: af-spec
+ type: nest
+ nested-attributes: af-spec-attrs
+ -
+ name: group
+ type: u32
+ -
+ name: net-ns-fd
+ type: u32
+ -
+ name: ext-mask
+ type: u32
+ -
+ name: promiscuity
+ type: u32
+ -
+ name: num-tx-queues
+ type: u32
+ -
+ name: num-rx-queues
+ type: u32
+ -
+ name: carrier
+ type: u8
+ -
+ name: phys-port-id
+ type: binary
+ -
+ name: carrier-changes
+ type: u32
+ -
+ name: phys-switch-id
+ type: binary
+ -
+ name: link-netnsid
+ type: s32
+ -
+ name: phys-port-name
+ type: string
+ -
+ name: proto-down
+ type: u8
+ -
+ name: gso-max-segs
+ type: u32
+ -
+ name: gso-max-size
+ type: u32
+ -
+ name: pad
+ type: pad
+ -
+ name: xdp
+ type: nest
+ nested-attributes: xdp-attrs
+ -
+ name: event
+ type: u32
+ -
+ name: new-netnsid
+ type: s32
+ -
+ name: target-netnsid
+ type: s32
+ -
+ name: carrier-up-count
+ type: u32
+ -
+ name: carrier-down-count
+ type: u32
+ -
+ name: new-ifindex
+ type: s32
+ -
+ name: min-mtu
+ type: u32
+ -
+ name: max-mtu
+ type: u32
+ -
+ name: prop-list
+ type: nest
+ nested-attributes: link-attrs
+ -
+ name: alt-ifname
+ type: string
+ multi-attr: true
+ -
+ name: perm-address
+ type: binary
+ display-hint: mac
+ -
+ name: proto-down-reason
+ type: string
+ -
+ name: parent-dev-name
+ type: string
+ -
+ name: parent-dev-bus-name
+ type: string
+ -
+ name: gro-max-size
+ type: u32
+ -
+ name: tso-max-size
+ type: u32
+ -
+ name: tso-max-segs
+ type: u32
+ -
+ name: allmulti
+ type: u32
+ -
+ name: devlink-port
+ type: binary
+ -
+ name: gso-ipv4-max-size
+ type: u32
+ -
+ name: gro-ipv4-max-size
+ type: u32
+ -
+ name: af-spec-attrs
+ attributes:
+ -
+ name: "inet"
+ type: nest
+ value: 2
+ nested-attributes: ifla-attrs
+ -
+ name: "inet6"
+ type: nest
+ value: 10
+ nested-attributes: ifla6-attrs
+ -
+ name: "mctp"
+ type: nest
+ value: 45
+ nested-attributes: mctp-attrs
+ -
+ name: vfinfo-attrs
+ attributes: []
+ -
+ name: vf-ports-attrs
+ attributes: []
+ -
+ name: port-self-attrs
+ attributes: []
+ -
+ name: linkinfo-attrs
+ attributes:
+ -
+ name: kind
+ type: string
+ -
+ name: data
+ type: binary
+ # kind specific nest, e.g. linkinfo-bridge-attrs
+ -
+ name: xstats
+ type: binary
+ -
+ name: slave-kind
+ type: string
+ -
+ name: slave-data
+ type: binary
+ # kind specific nest
+ -
+ name: linkinfo-bridge-attrs
+ attributes:
+ -
+ name: forward-delay
+ type: u32
+ -
+ name: hello-time
+ type: u32
+ -
+ name: max-age
+ type: u32
+ -
+ name: ageing-time
+ type: u32
+ -
+ name: stp-state
+ type: u32
+ -
+ name: priority
+ type: u16
+ -
+ name: vlan-filtering
+ type: u8
+ -
+ name: vlan-protocol
+ type: u16
+ -
+ name: group-fwd-mask
+ type: u16
+ -
+ name: root-id
+ type: binary
+ -
+ name: bridge-id
+ type: binary
+ -
+ name: root-port
+ type: u16
+ -
+ name: root-path-cost
+ type: u32
+ -
+ name: topology-change
+ type: u8
+ -
+ name: topology-change-detected
+ type: u8
+ -
+ name: hello-timer
+ type: u64
+ -
+ name: tcn-timer
+ type: u64
+ -
+ name: topology-change-timer
+ type: u64
+ -
+ name: gc-timer
+ type: u64
+ -
+ name: group-addr
+ type: binary
+ -
+ name: fdb-flush
+ type: binary
+ -
+ name: mcast-router
+ type: u8
+ -
+ name: mcast-snooping
+ type: u8
+ -
+ name: mcast-query-use-ifaddr
+ type: u8
+ -
+ name: mcast-querier
+ type: u8
+ -
+ name: mcast-hash-elasticity
+ type: u32
+ -
+ name: mcast-hash-max
+ type: u32
+ -
+ name: mcast-last-member-cnt
+ type: u32
+ -
+ name: mcast-startup-query-cnt
+ type: u32
+ -
+ name: mcast-last-member-intvl
+ type: u64
+ -
+ name: mcast-membership-intvl
+ type: u64
+ -
+ name: mcast-querier-intvl
+ type: u64
+ -
+ name: mcast-query-intvl
+ type: u64
+ -
+ name: mcast-query-response-intvl
+ type: u64
+ -
+ name: mcast-startup-query-intvl
+ type: u64
+ -
+ name: nf-call-iptables
+ type: u8
+ -
+ name: nf-call-ip6-tables
+ type: u8
+ -
+ name: nf-call-arptables
+ type: u8
+ -
+ name: vlan-default-pvid
+ type: u16
+ -
+ name: pad
+ type: pad
+ -
+ name: vlan-stats-enabled
+ type: u8
+ -
+ name: mcast-stats-enabled
+ type: u8
+ -
+ name: mcast-igmp-version
+ type: u8
+ -
+ name: mcast-mld-version
+ type: u8
+ -
+ name: vlan-stats-per-port
+ type: u8
+ -
+ name: multi-boolopt
+ type: binary
+ struct: br-boolopt-multi
+ -
+ name: mcast-querier-state
+ type: binary
+ -
+ name: xdp-attrs
+ attributes:
+ -
+ name: fd
+ type: s32
+ -
+ name: attached
+ type: u8
+ -
+ name: flags
+ type: u32
+ -
+ name: prog-id
+ type: u32
+ -
+ name: drv-prog-id
+ type: u32
+ -
+ name: skb-prog-id
+ type: u32
+ -
+ name: hw-prog-id
+ type: u32
+ -
+ name: expected-fd
+ type: s32
+ -
+ name: ifla-attrs
+ attributes:
+ -
+ name: conf
+ type: binary
+ struct: ipv4-devconf
+ -
+ name: ifla6-attrs
+ attributes:
+ -
+ name: flags
+ type: u32
+ -
+ name: conf
+ type: binary
+ struct: ipv6-devconf
+ -
+ name: stats
+ type: binary
+ struct: ifla-inet6-stats
+ -
+ name: mcast
+ type: binary
+ -
+ name: cacheinfo
+ type: binary
+ struct: ifla-cacheinfo
+ -
+ name: icmp6-stats
+ type: binary
+ struct: ifla-icmp6-stats
+ -
+ name: token
+ type: binary
+ -
+ name: addr-gen-mode
+ type: u8
+ -
+ name: ra-mtu
+ type: u32
+ -
+ name: mctp-attrs
+ attributes:
+ -
+ name: mctp-net
+ type: u32
+ -
+ name: stats-attrs
+ name-prefix: ifla-stats-
+ attributes:
+ -
+ name: link-64
+ type: binary
+ struct: rtnl-link-stats64
+ -
+ name: link-xstats
+ type: binary
+ -
+ name: link-xstats-slave
+ type: binary
+ -
+ name: link-offload-xstats
+ type: nest
+ nested-attributes: link-offload-xstats
+ -
+ name: af-spec
+ type: binary
+ -
+ name: link-offload-xstats
+ attributes:
+ -
+ name: cpu-hit
+ type: binary
+ -
+ name: hw-s-info
+ type: array-nest
+ nested-attributes: hw-s-info-one
+ -
+ name: l3-stats
+ type: binary
+ -
+ name: hw-s-info-one
+ attributes:
+ -
+ name: request
+ type: u8
+ -
+ name: used
+ type: u8
+
+operations:
+ enum-model: directional
+ list:
+ -
+ name: newlink
+ doc: Create a new link.
+ attribute-set: link-attrs
+ fixed-header: ifinfomsg
+ do:
+ request:
+ value: 16
+ attributes: &link-new-attrs
+ - ifi-index
+ - ifname
+ - net-ns-pid
+ - net-ns-fd
+ - target-netnsid
+ - link-netnsid
+ - linkinfo
+ - group
+ - num-tx-queues
+ - num-rx-queues
+ - address
+ - broadcast
+ - mtu
+ - txqlen
+ - operstate
+ - linkmode
+ - group
+ - gso-max-size
+ - gso-max-segs
+ - gro-max-size
+ - gso-ipv4-max-size
+ - gro-ipv4-max-size
+ - af-spec
+ -
+ name: dellink
+ doc: Delete an existing link.
+ attribute-set: link-attrs
+ fixed-header: ifinfomsg
+ do:
+ request:
+ value: 17
+ attributes:
+ - ifi-index
+ - ifname
+ -
+ name: getlink
+ doc: Get / dump information about a link.
+ attribute-set: link-attrs
+ fixed-header: ifinfomsg
+ do:
+ request:
+ value: 18
+ attributes:
+ - ifi-index
+ - ifname
+ - alt-ifname
+ - ext-mask
+ - target-netnsid
+ reply:
+ value: 16
+ attributes: &link-all-attrs
+ - ifi-family
+ - ifi-type
+ - ifi-index
+ - ifi-flags
+ - ifi-change
+ - address
+ - broadcast
+ - ifname
+ - mtu
+ - link
+ - qdisc
+ - stats
+ - cost
+ - priority
+ - master
+ - wireless
+ - protinfo
+ - txqlen
+ - map
+ - weight
+ - operstate
+ - linkmode
+ - linkinfo
+ - net-ns-pid
+ - ifalias
+ - num-vf
+ - vfinfo-list
+ - stats64
+ - vf-ports
+ - port-self
+ - af-spec
+ - group
+ - net-ns-fd
+ - ext-mask
+ - promiscuity
+ - num-tx-queues
+ - num-rx-queues
+ - carrier
+ - phys-port-id
+ - carrier-changes
+ - phys-switch-id
+ - link-netnsid
+ - phys-port-name
+ - proto-down
+ - gso-max-segs
+ - gso-max-size
+ - pad
+ - xdp
+ - event
+ - new-netnsid
+ - if-netnsid
+ - target-netnsid
+ - carrier-up-count
+ - carrier-down-count
+ - new-ifindex
+ - min-mtu
+ - max-mtu
+ - prop-list
+ - alt-ifname
+ - perm-address
+ - proto-down-reason
+ - parent-dev-name
+ - parent-dev-bus-name
+ - gro-max-size
+ - tso-max-size
+ - tso-max-segs
+ - allmulti
+ - devlink-port
+ - gso-ipv4-max-size
+ - gro-ipv4-max-size
+ dump:
+ request:
+ value: 18
+ attributes:
+ - target-netnsid
+ - ext-mask
+ - master
+ - linkinfo
+ reply:
+ value: 16
+ attributes: *link-all-attrs
+ -
+ name: setlink
+ doc: Set information about a link.
+ attribute-set: link-attrs
+ fixed-header: ifinfomsg
+ do:
+ request:
+ value: 19
+ attributes: *link-all-attrs
+ -
+ name: getstats
+ doc: Get / dump link stats.
+ attribute-set: stats-attrs
+ fixed-header: if_stats_msg
+ do:
+ request:
+ value: 94
+ attributes:
+ - ifindex
+ reply:
+ value: 92
+ attributes: &link-stats-attrs
+ - family
+ - ifindex
+ - filter-mask
+ - link-64
+ - link-xstats
+ - link-xstats-slave
+ - link-offload-xstats
+ - af-spec
+ dump:
+ request:
+ value: 94
+ reply:
+ value: 92
+ attributes: *link-stats-attrs
+
+mcast-groups:
+ list:
+ -
+ name: rtnlgrp-link
+ value: 1
+ -
+ name: rtnlgrp-stats
+ value: 36
diff --git a/Documentation/netlink/specs/rt_route.yaml b/Documentation/netlink/specs/rt_route.yaml
new file mode 100644
index 000000000000..f4368be0caed
--- /dev/null
+++ b/Documentation/netlink/specs/rt_route.yaml
@@ -0,0 +1,327 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: rt-route
+protocol: netlink-raw
+protonum: 0
+
+doc:
+ Route configuration over rtnetlink.
+
+definitions:
+ -
+ name: rtm-type
+ name-prefix: rtn-
+ type: enum
+ entries:
+ - unspec
+ - unicast
+ - local
+ - broadcast
+ - anycast
+ - multicast
+ - blackhole
+ - unreachable
+ - prohibit
+ - throw
+ - nat
+ - xresolve
+ -
+ name: rtmsg
+ type: struct
+ members:
+ -
+ name: rtm-family
+ type: u8
+ -
+ name: rtm-dst-len
+ type: u8
+ -
+ name: rtm-src-len
+ type: u8
+ -
+ name: rtm-tos
+ type: u8
+ -
+ name: rtm-table
+ type: u8
+ -
+ name: rtm-protocol
+ type: u8
+ -
+ name: rtm-scope
+ type: u8
+ -
+ name: rtm-type
+ type: u8
+ enum: rtm-type
+ -
+ name: rtm-flags
+ type: u32
+ -
+ name: rta-cacheinfo
+ type: struct
+ members:
+ -
+ name: rta-clntref
+ type: u32
+ -
+ name: rta-lastuse
+ type: u32
+ -
+ name: rta-expires
+ type: u32
+ -
+ name: rta-error
+ type: u32
+ -
+ name: rta-used
+ type: u32
+
+attribute-sets:
+ -
+ name: route-attrs
+ attributes:
+ -
+ name: rta-dst
+ type: binary
+ display-hint: ipv4
+ -
+ name: rta-src
+ type: binary
+ display-hint: ipv4
+ -
+ name: rta-iif
+ type: u32
+ -
+ name: rta-oif
+ type: u32
+ -
+ name: rta-gateway
+ type: binary
+ display-hint: ipv4
+ -
+ name: rta-priority
+ type: u32
+ -
+ name: rta-prefsrc
+ type: binary
+ display-hint: ipv4
+ -
+ name: rta-metrics
+ type: nest
+ nested-attributes: rta-metrics
+ -
+ name: rta-multipath
+ type: binary
+ -
+ name: rta-protoinfo # not used
+ type: binary
+ -
+ name: rta-flow
+ type: u32
+ -
+ name: rta-cacheinfo
+ type: binary
+ struct: rta-cacheinfo
+ -
+ name: rta-session # not used
+ type: binary
+ -
+ name: rta-mp-algo # not used
+ type: binary
+ -
+ name: rta-table
+ type: u32
+ -
+ name: rta-mark
+ type: u32
+ -
+ name: rta-mfc-stats
+ type: binary
+ -
+ name: rta-via
+ type: binary
+ -
+ name: rta-newdst
+ type: binary
+ -
+ name: rta-pref
+ type: u8
+ -
+ name: rta-encap-type
+ type: u16
+ -
+ name: rta-encap
+ type: binary # tunnel specific nest
+ -
+ name: rta-expires
+ type: u32
+ -
+ name: rta-pad
+ type: binary
+ -
+ name: rta-uid
+ type: u32
+ -
+ name: rta-ttl-propagate
+ type: u8
+ -
+ name: rta-ip-proto
+ type: u8
+ -
+ name: rta-sport
+ type: u16
+ -
+ name: rta-dport
+ type: u16
+ -
+ name: rta-nh-id
+ type: u32
+ -
+ name: rta-metrics
+ attributes:
+ -
+ name: rtax-unspec
+ type: unused
+ value: 0
+ -
+ name: rtax-lock
+ type: u32
+ -
+ name: rtax-mtu
+ type: u32
+ -
+ name: rtax-window
+ type: u32
+ -
+ name: rtax-rtt
+ type: u32
+ -
+ name: rtax-rttvar
+ type: u32
+ -
+ name: rtax-ssthresh
+ type: u32
+ -
+ name: rtax-cwnd
+ type: u32
+ -
+ name: rtax-advmss
+ type: u32
+ -
+ name: rtax-reordering
+ type: u32
+ -
+ name: rtax-hoplimit
+ type: u32
+ -
+ name: rtax-initcwnd
+ type: u32
+ -
+ name: rtax-features
+ type: u32
+ -
+ name: rtax-rto-min
+ type: u32
+ -
+ name: rtax-initrwnd
+ type: u32
+ -
+ name: rtax-quickack
+ type: u32
+ -
+ name: rtax-cc-algo
+ type: string
+ -
+ name: rtax-fastopen-no-cookie
+ type: u32
+
+operations:
+ enum-model: directional
+ list:
+ -
+ name: getroute
+ doc: Dump route information.
+ attribute-set: route-attrs
+ fixed-header: rtmsg
+ do:
+ request:
+ value: 26
+ attributes:
+ - rtm-family
+ - rta-src
+ - rtm-src-len
+ - rta-dst
+ - rtm-dst-len
+ - rta-iif
+ - rta-oif
+ - rta-ip-proto
+ - rta-sport
+ - rta-dport
+ - rta-mark
+ - rta-uid
+ reply:
+ value: 24
+ attributes: &all-route-attrs
+ - rtm-family
+ - rtm-dst-len
+ - rtm-src-len
+ - rtm-tos
+ - rtm-table
+ - rtm-protocol
+ - rtm-scope
+ - rtm-type
+ - rtm-flags
+ - rta-dst
+ - rta-src
+ - rta-iif
+ - rta-oif
+ - rta-gateway
+ - rta-priority
+ - rta-prefsrc
+ - rta-metrics
+ - rta-multipath
+ - rta-flow
+ - rta-cacheinfo
+ - rta-table
+ - rta-mark
+ - rta-mfc-stats
+ - rta-via
+ - rta-newdst
+ - rta-pref
+ - rta-encap-type
+ - rta-encap
+ - rta-expires
+ - rta-pad
+ - rta-uid
+ - rta-ttl-propagate
+ - rta-ip-proto
+ - rta-sport
+ - rta-dport
+ - rta-nh-id
+ dump:
+ request:
+ value: 26
+ attributes:
+ - rtm-family
+ reply:
+ value: 24
+ attributes: *all-route-attrs
+ -
+ name: newroute
+ doc: Create a new route
+ attribute-set: route-attrs
+ fixed-header: rtmsg
+ do:
+ request:
+ value: 24
+ attributes: *all-route-attrs
+ -
+ name: delroute
+ doc: Delete an existing route
+ attribute-set: route-attrs
+ fixed-header: rtmsg
+ do:
+ request:
+ value: 25
+ attributes: *all-route-attrs
diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/switchdev.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/switchdev.rst
index 6e3f5ee8b0d0..b617e93d7c2c 100644
--- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/switchdev.rst
+++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/switchdev.rst
@@ -190,6 +190,26 @@ explicitly enable the VF migratable capability.
mlx5 driver support devlink port function attr mechanism to setup migratable
capability. (refer to Documentation/networking/devlink/devlink-port.rst)
+IPsec crypto capability setup
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+User who wants mlx5 PCI VFs to be able to perform IPsec crypto offloading need
+to explicitly enable the VF ipsec_crypto capability. Enabling IPsec capability
+for VFs is supported starting with ConnectX6dx devices and above. When a VF has
+IPsec capability enabled, any IPsec offloading is blocked on the PF.
+
+mlx5 driver support devlink port function attr mechanism to setup ipsec_crypto
+capability. (refer to Documentation/networking/devlink/devlink-port.rst)
+
+IPsec packet capability setup
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+User who wants mlx5 PCI VFs to be able to perform IPsec packet offloading need
+to explicitly enable the VF ipsec_packet capability. Enabling IPsec capability
+for VFs is supported starting with ConnectX6dx devices and above. When a VF has
+IPsec capability enabled, any IPsec offloading is blocked on the PF.
+
+mlx5 driver support devlink port function attr mechanism to setup ipsec_packet
+capability. (refer to Documentation/networking/devlink/devlink-port.rst)
+
SF state setup
--------------
diff --git a/Documentation/networking/devlink/devlink-port.rst b/Documentation/networking/devlink/devlink-port.rst
index 3da590953ce8..f5adb910427a 100644
--- a/Documentation/networking/devlink/devlink-port.rst
+++ b/Documentation/networking/devlink/devlink-port.rst
@@ -128,6 +128,12 @@ Users may also set the RoCE capability of the function using
Users may also set the function as migratable using
'devlink port function set migratable' command.
+Users may also set the IPsec crypto capability of the function using
+`devlink port function set ipsec_crypto` command.
+
+Users may also set the IPsec packet capability of the function using
+`devlink port function set ipsec_packet` command.
+
Function attributes
===================
@@ -240,6 +246,55 @@ Attach VF to the VM.
Start the VM.
Perform live migration.
+IPsec crypto capability setup
+-----------------------------
+When user enables IPsec crypto capability for a VF, user application can offload
+XFRM state crypto operation (Encrypt/Decrypt) to this VF.
+
+When IPsec crypto capability is disabled (default) for a VF, the XFRM state is
+processed in software by the kernel.
+
+- Get IPsec crypto capability of the VF device::
+
+ $ devlink port show pci/0000:06:00.0/2
+ pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1
+ function:
+ hw_addr 00:00:00:00:00:00 ipsec_crypto disabled
+
+- Set IPsec crypto capability of the VF device::
+
+ $ devlink port function set pci/0000:06:00.0/2 ipsec_crypto enable
+
+ $ devlink port show pci/0000:06:00.0/2
+ pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1
+ function:
+ hw_addr 00:00:00:00:00:00 ipsec_crypto enabled
+
+IPsec packet capability setup
+-----------------------------
+When user enables IPsec packet capability for a VF, user application can offload
+XFRM state and policy crypto operation (Encrypt/Decrypt) to this VF, as well as
+IPsec encapsulation.
+
+When IPsec packet capability is disabled (default) for a VF, the XFRM state and
+policy is processed in software by the kernel.
+
+- Get IPsec packet capability of the VF device::
+
+ $ devlink port show pci/0000:06:00.0/2
+ pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1
+ function:
+ hw_addr 00:00:00:00:00:00 ipsec_packet disabled
+
+- Set IPsec packet capability of the VF device::
+
+ $ devlink port function set pci/0000:06:00.0/2 ipsec_packet enable
+
+ $ devlink port show pci/0000:06:00.0/2
+ pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1
+ function:
+ hw_addr 00:00:00:00:00:00 ipsec_packet enabled
+
Subfunction
============
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 5561dae94f85..0bbd040f6a55 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -31,8 +31,8 @@ you probably needn't concern yourself with pcmciautils.
====================== =============== ========================================
GNU C 5.1 gcc --version
Clang/LLVM (optional) 11.0.0 clang --version
-Rust (optional) 1.68.2 rustc --version
-bindgen (optional) 0.56.0 bindgen --version
+Rust (optional) 1.71.1 rustc --version
+bindgen (optional) 0.65.1 bindgen --version
GNU make 3.82 make --version
bash 4.2 bash --version
binutils 2.25 ld -v
diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst
index a8931512ed98..f382914f4191 100644
--- a/Documentation/rust/quick-start.rst
+++ b/Documentation/rust/quick-start.rst
@@ -38,7 +38,9 @@ and run::
rustup override set $(scripts/min-tool-version.sh rustc)
-Otherwise, fetch a standalone installer from:
+This will configure your working directory to use the correct version of
+``rustc`` without affecting your default toolchain. If you are not using
+``rustup``, fetch a standalone installer from:
https://forge.rust-lang.org/infra/other-installation-methods.html#standalone
@@ -56,16 +58,17 @@ If ``rustup`` is being used, run::
The components are installed per toolchain, thus upgrading the Rust compiler
version later on requires re-adding the component.
-Otherwise, if a standalone installer is used, the Rust repository may be cloned
-into the installation folder of the toolchain::
+Otherwise, if a standalone installer is used, the Rust source tree may be
+downloaded into the toolchain's installation folder::
- git clone --recurse-submodules \
- --branch $(scripts/min-tool-version.sh rustc) \
- https://github.com/rust-lang/rust \
- $(rustc --print sysroot)/lib/rustlib/src/rust
+ curl -L "https://static.rust-lang.org/dist/rust-src-$(scripts/min-tool-version.sh rustc).tar.gz" |
+ tar -xzf - -C "$(rustc --print sysroot)/lib" \
+ "rust-src-$(scripts/min-tool-version.sh rustc)/rust-src/lib/" \
+ --strip-components=3
In this case, upgrading the Rust compiler version later on requires manually
-updating this clone.
+updating the source tree (this can be done by removing ``$(rustc --print
+sysroot)/lib/rustlib/src/rust`` then rerunning the above command).
libclang
@@ -98,7 +101,24 @@ the ``bindgen`` tool. A particular version is required.
Install it via (note that this will download and build the tool from source)::
- cargo install --locked --version $(scripts/min-tool-version.sh bindgen) bindgen
+ cargo install --locked --version $(scripts/min-tool-version.sh bindgen) bindgen-cli
+
+``bindgen`` needs to find a suitable ``libclang`` in order to work. If it is
+not found (or a different ``libclang`` than the one found should be used),
+the process can be tweaked using the environment variables understood by
+``clang-sys`` (the Rust bindings crate that ``bindgen`` uses to access
+``libclang``):
+
+* ``LLVM_CONFIG_PATH`` can be pointed to an ``llvm-config`` executable.
+
+* Or ``LIBCLANG_PATH`` can be pointed to a ``libclang`` shared library
+ or to the directory containing it.
+
+* Or ``CLANG_PATH`` can be pointed to a ``clang`` executable.
+
+For details, please see ``clang-sys``'s documentation at:
+
+ https://github.com/KyleMayes/clang-sys#environment-variables
Requirements: Developing
@@ -179,7 +199,9 @@ be used with many editors to enable syntax highlighting, completion, go to
definition, and other features.
``rust-analyzer`` needs a configuration file, ``rust-project.json``, which
-can be generated by the ``rust-analyzer`` Make target.
+can be generated by the ``rust-analyzer`` Make target::
+
+ make LLVM=1 rust-analyzer
Configuration
diff --git a/Documentation/scheduler/sched-design-CFS.rst b/Documentation/scheduler/sched-design-CFS.rst
index 03db55504515..f68919800f05 100644
--- a/Documentation/scheduler/sched-design-CFS.rst
+++ b/Documentation/scheduler/sched-design-CFS.rst
@@ -94,7 +94,7 @@ other HZ detail. Thus the CFS scheduler has no notion of "timeslices" in the
way the previous scheduler had, and has no heuristics whatsoever. There is
only one central tunable (you have to switch on CONFIG_SCHED_DEBUG):
- /sys/kernel/debug/sched/min_granularity_ns
+ /sys/kernel/debug/sched/base_slice_ns
which can be used to tune the scheduler from "desktop" (i.e., low latencies) to
"server" (i.e., good batching) workloads. It defaults to a setting suitable
diff --git a/Documentation/userspace-api/netlink/genetlink-legacy.rst b/Documentation/userspace-api/netlink/genetlink-legacy.rst
index 802875a37a27..40b82ad5d54a 100644
--- a/Documentation/userspace-api/netlink/genetlink-legacy.rst
+++ b/Documentation/userspace-api/netlink/genetlink-legacy.rst
@@ -8,11 +8,8 @@ This document describes the many additional quirks and properties
required to describe older Generic Netlink families which form
the ``genetlink-legacy`` protocol level.
-The spec is a work in progress, some of the quirks are just documented
-for future reference.
-
-Specification (defined)
-=======================
+Specification
+=============
Attribute type nests
--------------------
@@ -156,16 +153,27 @@ it will be allocated 3 for the request (``a`` is the previous operation
with a request section and the value of 2) and 8 for response (``c`` is
the previous operation in the "from-kernel" direction).
-Other quirks (todo)
-===================
+Other quirks
+============
Structures
----------
Legacy families can define C structures both to be used as the contents of
an attribute and as a fixed message header. Structures are defined in
-``definitions`` and referenced in operations or attributes. Note that
-structures defined in YAML are implicitly packed according to C
+``definitions`` and referenced in operations or attributes.
+
+members
+~~~~~~~
+
+ - ``name`` - The attribute name of the struct member
+ - ``type`` - One of the scalar types ``u8``, ``u16``, ``u32``, ``u64``, ``s8``,
+ ``s16``, ``s32``, ``s64``, ``string`` or ``binary``.
+ - ``byte-order`` - ``big-endian`` or ``little-endian``
+ - ``doc``, ``enum``, ``enum-as-flags``, ``display-hint`` - Same as for
+ :ref:`attribute definitions <attribute_properties>`
+
+Note that structures defined in YAML are implicitly packed according to C
conventions. For example, the following struct is 4 bytes, not 6 bytes:
.. code-block:: c
diff --git a/Documentation/userspace-api/netlink/index.rst b/Documentation/userspace-api/netlink/index.rst
index 26f3720cb3be..62725dafbbdb 100644
--- a/Documentation/userspace-api/netlink/index.rst
+++ b/Documentation/userspace-api/netlink/index.rst
@@ -14,5 +14,6 @@ Netlink documentation for users.
specs
c-code-gen
genetlink-legacy
+ netlink-raw
See also :ref:`Documentation/core-api/netlink.rst <kernel_netlink>`.
diff --git a/Documentation/userspace-api/netlink/netlink-raw.rst b/Documentation/userspace-api/netlink/netlink-raw.rst
new file mode 100644
index 000000000000..f07fb9b9c101
--- /dev/null
+++ b/Documentation/userspace-api/netlink/netlink-raw.rst
@@ -0,0 +1,58 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+
+======================================================
+Netlink specification support for raw Netlink families
+======================================================
+
+This document describes the additional properties required by raw Netlink
+families such as ``NETLINK_ROUTE`` which use the ``netlink-raw`` protocol
+specification.
+
+Specification
+=============
+
+The netlink-raw schema extends the :doc:`genetlink-legacy <genetlink-legacy>`
+schema with properties that are needed to specify the protocol numbers and
+multicast IDs used by raw netlink families. See :ref:`classic_netlink` for more
+information.
+
+Globals
+-------
+
+protonum
+~~~~~~~~
+
+The ``protonum`` property is used to specify the protocol number to use when
+opening a netlink socket.
+
+.. code-block:: yaml
+
+ # SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+ name: rt-addr
+ protocol: netlink-raw
+ protonum: 0 # part of the NETLINK_ROUTE protocol
+
+
+Multicast group properties
+--------------------------
+
+value
+~~~~~
+
+The ``value`` property is used to specify the group ID to use for multicast
+group registration.
+
+.. code-block:: yaml
+
+ mcast-groups:
+ list:
+ -
+ name: rtnlgrp-ipv4-ifaddr
+ value: 5
+ -
+ name: rtnlgrp-ipv6-ifaddr
+ value: 9
+ -
+ name: rtnlgrp-mctp-ifaddr
+ value: 34
diff --git a/Documentation/userspace-api/netlink/specs.rst b/Documentation/userspace-api/netlink/specs.rst
index 2e4acde890b7..cc4e2430997e 100644
--- a/Documentation/userspace-api/netlink/specs.rst
+++ b/Documentation/userspace-api/netlink/specs.rst
@@ -68,6 +68,10 @@ The following sections describe the properties of the most modern ``genetlink``
schema. See the documentation of :doc:`genetlink-c <c-code-gen>`
for information on how C names are derived from name properties.
+See also :ref:`Documentation/core-api/netlink.rst <kernel_netlink>` for
+information on the Netlink specification properties that are only relevant to
+the kernel space and not part of the user space API.
+
genetlink
=========
@@ -180,6 +184,8 @@ attributes
List of attributes in the set.
+.. _attribute_properties:
+
Attribute properties
--------------------
@@ -264,6 +270,13 @@ a C array of u32 values can be specified with ``type: binary`` and
``sub-type: u32``. Binary types and legacy array formats are described in
more detail in :doc:`genetlink-legacy`.
+display-hint
+~~~~~~~~~~~~
+
+Optional format indicator that is intended only for choosing the right
+formatting mechanism when displaying values of this type. Currently supported
+hints are ``hex``, ``mac``, ``fddi``, ``ipv4``, ``ipv6`` and ``uuid``.
+
operations
----------
diff --git a/MAINTAINERS b/MAINTAINERS
index 5f527a9ae4b2..d8927aadd094 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -470,7 +470,6 @@ F: drivers/hwmon/adm1029.c
ADM8211 WIRELESS DRIVER
L: linux-wireless@vger.kernel.org
S: Orphan
-W: https://wireless.wiki.kernel.org/
F: drivers/net/wireless/admtek/adm8211.*
ADP1653 FLASH CONTROLLER DRIVER
@@ -915,6 +914,18 @@ S: Supported
F: drivers/crypto/ccp/sev*
F: include/uapi/linux/psp-sev.h
+AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER - DBC SUPPORT
+M: Mario Limonciello <mario.limonciello@amd.com>
+L: linux-crypto@vger.kernel.org
+S: Supported
+F: drivers/crypto/ccp/dbc.c
+F: drivers/crypto/ccp/dbc.h
+F: drivers/crypto/ccp/platform-access.c
+F: drivers/crypto/ccp/platform-access.h
+F: include/uapi/linux/psp-dbc.h
+F: tools/crypto/ccp/*.c
+F: tools/crypto/ccp/*.py
+
AMD DISPLAY CORE
M: Harry Wentland <harry.wentland@amd.com>
M: Leo Li <sunpeng.li@amd.com>
@@ -4138,7 +4149,7 @@ BROADCOM BCM6348/BCM6358 SPI controller DRIVER
M: Jonas Gorski <jonas.gorski@gmail.com>
L: linux-spi@vger.kernel.org
S: Odd Fixes
-F: Documentation/devicetree/bindings/spi/spi-bcm63xx.txt
+F: Documentation/devicetree/bindings/spi/brcm,bcm63xx-spi.yaml
F: drivers/spi/spi-bcm63xx.c
BROADCOM ETHERNET PHY DRIVERS
@@ -4207,7 +4218,7 @@ BROADCOM KONA GPIO DRIVER
M: Ray Jui <rjui@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
S: Supported
-F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
+F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.yaml
F: drivers/gpio/gpio-bcm-kona.c
BROADCOM MPI3 STORAGE CONTROLLER DRIVER
@@ -4831,6 +4842,7 @@ F: drivers/input/touchscreen/chipone_icn8505.c
CHROME HARDWARE PLATFORM SUPPORT
M: Benson Leung <bleung@chromium.org>
+M: Tzung-Bi Shih <tzungbi@kernel.org>
L: chrome-platform@lists.linux.dev
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrome-platform/linux.git
@@ -4898,7 +4910,11 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: patches@opensource.cirrus.com
S: Maintained
F: Documentation/devicetree/bindings/sound/cirrus,cs*
+F: drivers/mfd/cs42l43*
+F: drivers/pinctrl/cirrus/pinctrl-cs42l43*
+F: drivers/spi/spi-cs42l43*
F: include/dt-bindings/sound/cs*
+F: include/linux/mfd/cs42l43*
F: include/sound/cs*
F: sound/pci/hda/cs*
F: sound/pci/hda/hda_cs_dsp_ctl.*
@@ -6021,7 +6037,7 @@ F: Documentation/devicetree/bindings/mfd/da90*.txt
F: Documentation/devicetree/bindings/mfd/dlg,da90*.yaml
F: Documentation/devicetree/bindings/regulator/da92*.txt
F: Documentation/devicetree/bindings/regulator/dlg,da9*.yaml
-F: Documentation/devicetree/bindings/regulator/slg51000.txt
+F: Documentation/devicetree/bindings/regulator/dlg,slg51000.yaml
F: Documentation/devicetree/bindings/sound/da[79]*.txt
F: Documentation/devicetree/bindings/thermal/da90??-thermal.txt
F: Documentation/devicetree/bindings/watchdog/da90??-wdt.txt
@@ -8788,6 +8804,15 @@ S: Supported
F: Documentation/networking/device_drivers/ethernet/google/gve.rst
F: drivers/net/ethernet/google
+GOOGLE FIRMWARE DRIVERS
+M: Tzung-Bi Shih <tzungbi@kernel.org>
+R: Brian Norris <briannorris@chromium.org>
+R: Julius Werner <jwerner@chromium.org>
+L: chrome-platform@lists.linux.dev
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrome-platform/linux.git
+F: drivers/firmware/google/
+
GPD POCKET FAN DRIVER
M: Hans de Goede <hdegoede@redhat.com>
L: platform-driver-x86@vger.kernel.org
@@ -9325,7 +9350,7 @@ F: drivers/crypto/hisilicon/hpre/hpre_crypto.c
F: drivers/crypto/hisilicon/hpre/hpre_main.c
HISILICON HNS3 PMU DRIVER
-M: Guangbin Huang <huangguangbin2@huawei.com>
+M: Jijie Shao <shaojijie@huawei.com>
S: Supported
F: Documentation/admin-guide/perf/hns3-pmu.rst
F: drivers/perf/hisilicon/hns3_pmu.c
@@ -9363,7 +9388,7 @@ F: Documentation/devicetree/bindings/net/hisilicon*.txt
F: drivers/net/ethernet/hisilicon/
HISILICON PMU DRIVER
-M: Shaokun Zhang <zhangshaokun@hisilicon.com>
+M: Yicong Yang <yangyicong@hisilicon.com>
M: Jonathan Cameron <jonathan.cameron@huawei.com>
S: Supported
W: http://www.hisilicon.com
@@ -9477,10 +9502,8 @@ F: Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml
F: drivers/iio/pressure/mprls0025pa.c
HOST AP DRIVER
-M: Jouni Malinen <j@w1.fi>
L: linux-wireless@vger.kernel.org
S: Obsolete
-W: http://w1.fi/hostap-driver.html
F: drivers/net/wireless/intersil/hostap/
HP COMPAQ TC1100 TABLET WMI EXTRAS DRIVER
@@ -9514,6 +9537,12 @@ S: Maintained
W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi
F: fs/hpfs/
+HS3001 Hardware Temperature and Humidity Sensor
+M: Andre Werner <andre.werner@systec-electronic.com>
+L: linux-hwmon@vger.kernel.org
+S: Maintained
+F: drivers/hwmon/hs3001.c
+
HSI SUBSYSTEM
M: Sebastian Reichel <sre@kernel.org>
S: Maintained
@@ -11403,6 +11432,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git k
F: Documentation/dev-tools/kunit/
F: include/kunit/
F: lib/kunit/
+F: rust/kernel/kunit.rs
+F: scripts/rustdoc_test_*
F: tools/testing/kunit/
KERNEL USERMODE HELPER
@@ -12289,6 +12320,16 @@ F: Documentation/devicetree/bindings/clock/loongson,ls2k-clk.yaml
F: drivers/clk/clk-loongson2.c
F: include/dt-bindings/clock/loongson,ls2k-clk.h
+LOONGSON SPI DRIVER
+M: Yinbo Zhu <zhuyinbo@loongson.cn>
+L: linux-spi@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/spi/loongson,ls2k-spi.yaml
+F: drivers/spi/spi-loongson-core.c
+F: drivers/spi/spi-loongson-pci.c
+F: drivers/spi/spi-loongson-plat.c
+F: drivers/spi/spi-loongson.h
+
LOONGSON-2 SOC SERIES GUTS DRIVER
M: Yinbo Zhu <zhuyinbo@loongson.cn>
L: loongarch@lists.linux.dev
@@ -15017,6 +15058,7 @@ F: include/linux/power/bq27xxx_battery.h
NOLIBC HEADER FILE
M: Willy Tarreau <w@1wt.eu>
+M: Thomas Weißschuh <linux@weissschuh.net>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/nolibc.git
F: tools/include/nolibc/
@@ -17076,6 +17118,7 @@ F: drivers/net/ppp/pptp.c
PRESSURE STALL INFORMATION (PSI)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Suren Baghdasaryan <surenb@google.com>
+R: Peter Ziljstra <peterz@infradead.org>
S: Maintained
F: include/linux/psi*
F: kernel/sched/psi.c
@@ -17485,6 +17528,7 @@ M: Kalle Valo <kvalo@kernel.org>
M: Jeff Johnson <quic_jjohnson@quicinc.com>
L: ath12k@lists.infradead.org
S: Supported
+W: https://wireless.wiki.kernel.org/en/users/Drivers/ath12k
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath12k/
@@ -18016,8 +18060,6 @@ REALTEK WIRELESS DRIVER (rtlwifi family)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
-W: https://wireless.wiki.kernel.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
F: drivers/net/wireless/realtek/rtlwifi/
REALTEK WIRELESS DRIVER (rtw88)
@@ -18545,7 +18587,6 @@ F: drivers/media/dvb-frontends/rtl2832_sdr*
RTL8180 WIRELESS DRIVER
L: linux-wireless@vger.kernel.org
S: Orphan
-W: https://wireless.wiki.kernel.org/
F: drivers/net/wireless/realtek/rtl818x/rtl8180/
RTL8187 WIRELESS DRIVER
@@ -18553,14 +18594,12 @@ M: Hin-Tak Leung <hintak.leung@gmail.com>
M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
S: Maintained
-W: https://wireless.wiki.kernel.org/
F: drivers/net/wireless/realtek/rtl818x/rtl8187/
RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
M: Jes Sorensen <Jes.Sorensen@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-devel
F: drivers/net/wireless/realtek/rtl8xxxu/
RTRS TRANSPORT DRIVERS
@@ -18589,6 +18628,8 @@ R: Boqun Feng <boqun.feng@gmail.com>
R: Gary Guo <gary@garyguo.net>
R: Björn Roy Baron <bjorn3_gh@protonmail.com>
R: Benno Lossin <benno.lossin@proton.me>
+R: Andreas Hindborg <a.hindborg@samsung.com>
+R: Alice Ryhl <aliceryhl@google.com>
L: rust-for-linux@vger.kernel.org
S: Supported
W: https://github.com/Rust-for-Linux/linux
@@ -18630,7 +18671,7 @@ L: linux-s390@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
F: Documentation/driver-api/s390-drivers.rst
-F: Documentation/s390/
+F: Documentation/arch/s390/
F: arch/s390/
F: drivers/s390/
F: drivers/watchdog/diag288_wdt.c
@@ -18691,7 +18732,7 @@ M: Niklas Schnelle <schnelle@linux.ibm.com>
M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
-F: Documentation/s390/pci.rst
+F: Documentation/arch/s390/pci.rst
F: arch/s390/pci/
F: drivers/pci/hotplug/s390_pci_hpc.c
@@ -18708,7 +18749,7 @@ M: Halil Pasic <pasic@linux.ibm.com>
M: Jason Herne <jjherne@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
-F: Documentation/s390/vfio-ap*
+F: Documentation/arch/s390/vfio-ap*
F: drivers/s390/crypto/vfio_ap*
S390 VFIO-CCW DRIVER
@@ -18718,7 +18759,7 @@ R: Halil Pasic <pasic@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: kvm@vger.kernel.org
S: Supported
-F: Documentation/s390/vfio-ccw.rst
+F: Documentation/arch/s390/vfio-ccw.rst
F: drivers/s390/cio/vfio_ccw*
F: include/uapi/linux/vfio_ccw.h
@@ -19311,7 +19352,6 @@ F: drivers/misc/sgi-gru/
SGI XP/XPC/XPNET DRIVER
M: Robin Holt <robinmholt@gmail.com>
M: Steve Wahl <steve.wahl@hpe.com>
-R: Mike Travis <mike.travis@hpe.com>
S: Maintained
F: drivers/misc/sgi-xp/
@@ -19622,13 +19662,6 @@ M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
F: drivers/net/ethernet/smsc/smc91x.*
-SMM665 HARDWARE MONITOR DRIVER
-M: Guenter Roeck <linux@roeck-us.net>
-L: linux-hwmon@vger.kernel.org
-S: Maintained
-F: Documentation/hwmon/smm665.rst
-F: drivers/hwmon/smm665.c
-
SMSC EMC2103 HARDWARE MONITOR DRIVER
M: Steve Glendinning <steve.glendinning@shawell.net>
L: linux-hwmon@vger.kernel.org
@@ -21088,6 +21121,39 @@ S: Maintained
F: Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml
F: sound/soc/ti/
+TEXAS INSTRUMENTS AUDIO (ASoC/HDA) DRIVERS
+M: Shenghao Ding <shenghao-ding@ti.com>
+M: Kevin Lu <kevin-lu@ti.com>
+M: Baojun Xu <x1077012@ti.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/tas2552.txt
+F: Documentation/devicetree/bindings/sound/tas2562.yaml
+F: Documentation/devicetree/bindings/sound/tas2770.yaml
+F: Documentation/devicetree/bindings/sound/tas27xx.yaml
+F: Documentation/devicetree/bindings/sound/ti,pcm1681.txt
+F: Documentation/devicetree/bindings/sound/ti,pcm3168a.yaml
+F: Documentation/devicetree/bindings/sound/ti,tlv320*.yaml
+F: Documentation/devicetree/bindings/sound/tlv320adcx140.yaml
+F: Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+F: Documentation/devicetree/bindings/sound/tpa6130a2.txt
+F: include/sound/tas2*.h
+F: include/sound/tlv320*.h
+F: include/sound/tpa6130a2-plat.h
+F: sound/pci/hda/tas2781_hda_i2c.c
+F: sound/soc/codecs/pcm1681.c
+F: sound/soc/codecs/pcm1789*.*
+F: sound/soc/codecs/pcm179x*.*
+F: sound/soc/codecs/pcm186x*.*
+F: sound/soc/codecs/pcm3008.*
+F: sound/soc/codecs/pcm3060*.*
+F: sound/soc/codecs/pcm3168a*.*
+F: sound/soc/codecs/pcm5102a.c
+F: sound/soc/codecs/pcm512x*.*
+F: sound/soc/codecs/tas2*.*
+F: sound/soc/codecs/tlv320*.*
+F: sound/soc/codecs/tpa6130a2.*
+
TEXAS INSTRUMENTS DMA DRIVERS
M: Peter Ujfalusi <peter.ujfalusi@gmail.com>
L: dmaengine@vger.kernel.org
@@ -21445,7 +21511,6 @@ L: linux-wireless@vger.kernel.org
S: Orphan
W: https://wireless.wiki.kernel.org/en/users/Drivers/wl12xx
W: https://wireless.wiki.kernel.org/en/users/Drivers/wl1251
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
F: drivers/net/wireless/ti/
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
@@ -23176,7 +23241,8 @@ F: arch/x86/platform
X86 PLATFORM UV HPE SUPERDOME FLEX
M: Steve Wahl <steve.wahl@hpe.com>
-R: Mike Travis <mike.travis@hpe.com>
+R: Justin Ernst <justin.ernst@hpe.com>
+R: Kyle Meyer <kyle.meyer@hpe.com>
R: Dimitri Sivanich <dimitri.sivanich@hpe.com>
R: Russ Anderson <russ.anderson@hpe.com>
S: Supported
diff --git a/Makefile b/Makefile
index 4739c21a63e2..4f283d915e54 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 5
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
@@ -467,6 +467,7 @@ export rust_common_flags := --edition=2021 \
-Dclippy::let_unit_value -Dclippy::mut_mut \
-Dclippy::needless_bitwise_bool \
-Dclippy::needless_continue \
+ -Dclippy::no_mangle_with_rust_abi \
-Wclippy::dbg_macro
KBUILD_HOSTCFLAGS := $(KBUILD_USERHOSTCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS)
@@ -1289,7 +1290,7 @@ prepare0: archprepare
# All the preparing..
prepare: prepare0
ifdef CONFIG_RUST
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh -v
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh
$(Q)$(MAKE) $(build)=rust
endif
@@ -1825,7 +1826,7 @@ $(DOC_TARGETS):
# "Is Rust available?" target
PHONY += rustavailable
rustavailable:
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh -v && echo "Rust is available!"
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust_is_available.sh && echo "Rust is available!"
# Documentation target
#
@@ -1859,11 +1860,6 @@ rustfmt:
rustfmtcheck: rustfmt_flags = --check
rustfmtcheck: rustfmt
-# IDE support targets
-PHONY += rust-analyzer
-rust-analyzer:
- $(Q)$(MAKE) $(build)=rust $@
-
# Misc
# ---------------------------------------------------------------------------
@@ -1924,6 +1920,7 @@ help:
@echo ' modules - default target, build the module(s)'
@echo ' modules_install - install the module'
@echo ' clean - remove generated files in module directory only'
+ @echo ' rust-analyzer - generate rust-project.json rust-analyzer support file'
@echo ''
__external_modules_error:
@@ -2065,6 +2062,11 @@ quiet_cmd_tags = GEN $@
tags TAGS cscope gtags: FORCE
$(call cmd,tags)
+# IDE support targets
+PHONY += rust-analyzer
+rust-analyzer:
+ $(Q)$(MAKE) $(build)=rust $@
+
# Script to generate missing namespace dependencies
# ---------------------------------------------------------------------------
diff --git a/arch/Kconfig b/arch/Kconfig
index aff2746c8af2..63c5d6a2022b 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -34,6 +34,9 @@ config ARCH_HAS_SUBPAGE_FAULTS
config HOTPLUG_SMT
bool
+config SMT_NUM_THREADS_DYNAMIC
+ bool
+
# Selected by HOTPLUG_CORE_SYNC_DEAD or HOTPLUG_CORE_SYNC_FULL
config HOTPLUG_CORE_SYNC
bool
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index d98701ee36c6..5db88b627439 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -97,7 +97,7 @@ struct osf_dirent {
unsigned int d_ino;
unsigned short d_reclen;
unsigned short d_namlen;
- char d_name[1];
+ char d_name[];
};
struct osf_dirent_callback {
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 1f13995d00d7..ad37569d0507 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -491,3 +491,4 @@
559 common futex_waitv sys_futex_waitv
560 common set_mempolicy_home_node sys_ni_syscall
561 common cachestat sys_cachestat
+562 common fchmodat2 sys_fchmodat2
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index dfeed440254a..fe4326d938c1 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -25,6 +25,9 @@ static inline int syscall_get_nr(struct task_struct *task,
if (IS_ENABLED(CONFIG_AEABI) && !IS_ENABLED(CONFIG_OABI_COMPAT))
return task_thread_info(task)->abi_syscall;
+ if (task_thread_info(task)->abi_syscall == -1)
+ return -1;
+
return task_thread_info(task)->abi_syscall & __NR_SYSCALL_MASK;
}
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index bcc4c9ec3aa4..5c31e9de7a60 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -90,6 +90,7 @@ slow_work_pending:
cmp r0, #0
beq no_work_pending
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
+ str scno, [tsk, #TI_ABI_SYSCALL] @ make sure tracers see update
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
ENDPROC(ret_fast_syscall)
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 054e9199f30d..dc0fb7a81371 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -626,7 +626,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
hw->address &= ~alignment_mask;
hw->ctrl.len <<= offset;
- if (is_default_overflow_handler(bp)) {
+ if (uses_default_overflow_handler(bp)) {
/*
* Mismatch breakpoints are required for single-stepping
* breakpoints.
@@ -798,7 +798,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
* Otherwise, insert a temporary mismatch breakpoint so that
* we can single-step over the watchpoint trigger.
*/
- if (!is_default_overflow_handler(wp))
+ if (!uses_default_overflow_handler(wp))
continue;
step:
enable_single_step(wp, instruction_pointer(regs));
@@ -811,7 +811,7 @@ step:
info->trigger = addr;
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
perf_bp_event(wp, regs);
- if (is_default_overflow_handler(wp))
+ if (uses_default_overflow_handler(wp))
enable_single_step(wp, instruction_pointer(regs));
}
@@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
info->trigger = addr;
pr_debug("breakpoint fired: address = 0x%x\n", addr);
perf_bp_event(bp, regs);
- if (is_default_overflow_handler(bp))
+ if (uses_default_overflow_handler(bp))
enable_single_step(bp, addr);
goto unlock;
}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 2d8e2516906b..fef32d73f912 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -783,8 +783,9 @@ long arch_ptrace(struct task_struct *child, long request,
break;
case PTRACE_SET_SYSCALL:
- task_thread_info(child)->abi_syscall = data &
- __NR_SYSCALL_MASK;
+ if (data != -1)
+ data &= __NR_SYSCALL_MASK;
+ task_thread_info(child)->abi_syscall = data;
ret = 0;
break;
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 8ebed8a13874..c572d6c3dee0 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -465,3 +465,4 @@
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a2511b30d0f6..29db061db9bb 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1793,9 +1793,6 @@ config ARM64_PAN
The feature is detected at runtime, and will remain as a 'nop'
instruction if the cpu does not implement the feature.
-config AS_HAS_LDAPR
- def_bool $(as-instr,.arch_extension rcpc)
-
config AS_HAS_LSE_ATOMICS
def_bool $(as-instr,.arch_extension lse)
@@ -1933,6 +1930,9 @@ config AS_HAS_ARMV8_3
config AS_HAS_CFI_NEGATE_RA_STATE
def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n)
+config AS_HAS_LDAPR
+ def_bool $(as-instr,.arch_extension rcpc)
+
endmenu # "ARMv8.3 architectural features"
menu "ARMv8.4 architectural features"
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index 4818e204c2ac..fbe64dce66e0 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -81,11 +81,6 @@ aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
obj-$(CONFIG_CRYPTO_AES_ARM64_BS) += aes-neon-bs.o
aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
-CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
-
-$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
- $(call if_changed_rule,cc_o_c)
-
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $(<) void $(@)
diff --git a/arch/arm64/crypto/aes-glue-ce.c b/arch/arm64/crypto/aes-glue-ce.c
new file mode 100644
index 000000000000..7d309ceeddf3
--- /dev/null
+++ b/arch/arm64/crypto/aes-glue-ce.c
@@ -0,0 +1,2 @@
+#define USE_V8_CRYPTO_EXTENSIONS
+#include "aes-glue.c"
diff --git a/arch/arm64/crypto/aes-glue-neon.c b/arch/arm64/crypto/aes-glue-neon.c
new file mode 100644
index 000000000000..8ba046321064
--- /dev/null
+++ b/arch/arm64/crypto/aes-glue-neon.c
@@ -0,0 +1 @@
+#include "aes-glue.c"
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index bd68e1b7f29f..4d537d56eb84 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -42,6 +42,9 @@
#define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \
spe_interrupt) + sizeof(u16))
+#define ACPI_MADT_GICC_TRBE (offsetof(struct acpi_madt_generic_interrupt, \
+ trbe_interrupt) + sizeof(u16))
+
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 4cf2cb053bc8..f482b994c608 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -30,28 +30,16 @@ int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md,
bool has_bti);
-#define arch_efi_call_virt_setup() \
-({ \
- efi_virtmap_load(); \
- __efi_fpsimd_begin(); \
- raw_spin_lock(&efi_rt_lock); \
-})
-
#undef arch_efi_call_virt
#define arch_efi_call_virt(p, f, args...) \
__efi_rt_asm_wrapper((p)->f, #f, args)
-#define arch_efi_call_virt_teardown() \
-({ \
- raw_spin_unlock(&efi_rt_lock); \
- __efi_fpsimd_end(); \
- efi_virtmap_unload(); \
-})
-
-extern raw_spinlock_t efi_rt_lock;
extern u64 *efi_rt_stack_top;
efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
+void arch_efi_call_virt_setup(void);
+void arch_efi_call_virt_teardown(void);
+
/*
* efi_rt_stack_top[-1] contains the value the stack pointer had before
* switching to the EFI runtime stack.
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 692b1ec663b2..521267478d18 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -138,6 +138,7 @@
#define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16)
#define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16)
#define KERNEL_HWCAP_MOPS __khwcap2_feature(MOPS)
+#define KERNEL_HWCAP_HBC __khwcap2_feature(HBC)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 577773870b66..85d26143faa5 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -118,31 +118,4 @@
#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY)
#endif
-/*
- * To make optimal use of block mappings when laying out the linear
- * mapping, round down the base of physical memory to a size that can
- * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
- * (64k granule), or a multiple that can be mapped using contiguous bits
- * in the page tables: 32 * PMD_SIZE (16k granule)
- */
-#if defined(CONFIG_ARM64_4K_PAGES)
-#define ARM64_MEMSTART_SHIFT PUD_SHIFT
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
-#else
-#define ARM64_MEMSTART_SHIFT PMD_SHIFT
-#endif
-
-/*
- * sparsemem vmemmap imposes an additional requirement on the alignment of
- * memstart_addr, due to the fact that the base of the vmemmap region
- * has a direct correspondence, and needs to appear sufficiently aligned
- * in the virtual address space.
- */
-#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
-#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS)
-#else
-#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT)
-#endif
-
#endif /* __ASM_KERNEL_PGTABLE_H */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 4384eaa0aeb7..94b68850cb9f 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -64,7 +64,6 @@ extern void arm64_memblock_init(void);
extern void paging_init(void);
extern void bootmem_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
-extern void init_mem_pgprot(void);
extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 0bd18de9fd97..72c2e8431360 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -103,6 +103,7 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
+#define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY))
#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
@@ -120,7 +121,7 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
(__boundary - 1 < (end) - 1) ? __boundary : (end); \
})
-#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
+#define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte))
#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
@@ -212,7 +213,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
* clear), set the PTE_DIRTY bit.
*/
if (pte_hw_dirty(pte))
- pte = pte_mkdirty(pte);
+ pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
@@ -823,7 +824,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
PTE_ATTRINDX_MASK;
/* preserve the hardware dirty information */
if (pte_hw_dirty(pte))
- pte = pte_mkdirty(pte);
+ pte = set_pte_bit(pte, __pgprot(PTE_DIRTY));
+
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
index 4292d9bafb9d..484cb6972e99 100644
--- a/arch/arm64/include/asm/sdei.h
+++ b/arch/arm64/include/asm/sdei.h
@@ -17,6 +17,9 @@
#include <asm/virt.h>
+DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event);
+DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event);
+
extern unsigned long sdei_exit_mode;
/* Software Delegated Exception entry point from firmware*/
@@ -29,6 +32,9 @@ asmlinkage void __sdei_asm_entry_trampoline(unsigned long event_num,
unsigned long pc,
unsigned long pstate);
+/* Abort a running handler. Context is discarded. */
+void __sdei_handler_abort(void);
+
/*
* The above entry point does the minimum to call C code. This function does
* anything else, before calling the driver.
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index b481935e9314..16464bf9a8aa 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -803,15 +803,21 @@
/*
* For registers without architectural names, or simply unsupported by
* GAS.
+ *
+ * __check_r forces warnings to be generated by the compiler when
+ * evaluating r which wouldn't normally happen due to being passed to
+ * the assembler via __stringify(r).
*/
#define read_sysreg_s(r) ({ \
u64 __val; \
+ u32 __maybe_unused __check_r = (u32)(r); \
asm volatile(__mrs_s("%0", r) : "=r" (__val)); \
__val; \
})
#define write_sysreg_s(v, r) do { \
u64 __val = (u64)(v); \
+ u32 __maybe_unused __check_r = (u32)(r); \
asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \
} while (0)
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 64a514f90131..bd77253b62e0 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -39,7 +39,7 @@
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 452
+#define __NR_compat_syscalls 453
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index d952a28463e0..78b68311ec81 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -909,6 +909,8 @@ __SYSCALL(__NR_futex_waitv, sys_futex_waitv)
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
#define __NR_cachestat 451
__SYSCALL(__NR_cachestat, sys_cachestat)
+#define __NR_fchmodat2 452
+__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index a2cac4305b1e..53026f45a509 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -103,5 +103,6 @@
#define HWCAP2_SME_B16B16 (1UL << 41)
#define HWCAP2_SME_F16F16 (1UL << 42)
#define HWCAP2_MOPS (1UL << 43)
+#define HWCAP2_HBC (1UL << 44)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f9d456fe132d..a5f533f63b60 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -222,7 +222,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
@@ -2708,12 +2708,8 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.desc = "Enhanced Virtualization Traps",
.capability = ARM64_HAS_EVT,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .sys_reg = SYS_ID_AA64MMFR2_EL1,
- .sign = FTR_UNSIGNED,
- .field_pos = ID_AA64MMFR2_EL1_EVT_SHIFT,
- .field_width = 4,
- .min_field_value = ID_AA64MMFR2_EL1_EVT_IMP,
.matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP)
},
{},
};
@@ -2844,6 +2840,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR2_EL1, RPRES, IMP, CAP_HWCAP, KERNEL_HWCAP_RPRES),
HWCAP_CAP(ID_AA64ISAR2_EL1, WFxT, IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
HWCAP_CAP(ID_AA64ISAR2_EL1, MOPS, IMP, CAP_HWCAP, KERNEL_HWCAP_MOPS),
+ HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC),
#ifdef CONFIG_ARM64_SME
HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index d1f68599c29f..f372295207fb 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -9,8 +9,6 @@
#include <linux/acpi.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/psci.h>
#ifdef CONFIG_ACPI_PROCESSOR_IDLE
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 58622dc85917..98fda8500535 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -126,6 +126,7 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_SME_B16B16] = "smeb16b16",
[KERNEL_HWCAP_SME_F16F16] = "smef16f16",
[KERNEL_HWCAP_MOPS] = "mops",
+ [KERNEL_HWCAP_HBC] = "hbc",
};
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index baab8dd3ead3..49efbdbd6f7a 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -158,7 +158,21 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
return s;
}
-DEFINE_RAW_SPINLOCK(efi_rt_lock);
+static DEFINE_RAW_SPINLOCK(efi_rt_lock);
+
+void arch_efi_call_virt_setup(void)
+{
+ efi_virtmap_load();
+ __efi_fpsimd_begin();
+ raw_spin_lock(&efi_rt_lock);
+}
+
+void arch_efi_call_virt_teardown(void)
+{
+ raw_spin_unlock(&efi_rt_lock);
+ __efi_fpsimd_end();
+ efi_virtmap_unload();
+}
asmlinkage u64 *efi_rt_stack_top __ro_after_init;
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 6b2e0c367702..0fc94207e69a 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -355,6 +355,35 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
}
#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+/*
+ * As per the ABI exit SME streaming mode and clear the SVE state not
+ * shared with FPSIMD on syscall entry.
+ */
+static inline void fp_user_discard(void)
+{
+ /*
+ * If SME is active then exit streaming mode. If ZA is active
+ * then flush the SVE registers but leave userspace access to
+ * both SVE and SME enabled, otherwise disable SME for the
+ * task and fall through to disabling SVE too. This means
+ * that after a syscall we never have any streaming mode
+ * register state to track, if this changes the KVM code will
+ * need updating.
+ */
+ if (system_supports_sme())
+ sme_smstop_sm();
+
+ if (!system_supports_sve())
+ return;
+
+ if (test_thread_flag(TIF_SVE)) {
+ unsigned int sve_vq_minus_one;
+
+ sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
+ sve_flush_live(true, sve_vq_minus_one);
+ }
+}
+
UNHANDLED(el1t, 64, sync)
UNHANDLED(el1t, 64, irq)
UNHANDLED(el1t, 64, fiq)
@@ -644,6 +673,8 @@ static void noinstr el0_svc(struct pt_regs *regs)
{
enter_from_user_mode(regs);
cortex_a76_erratum_1463225_svc_handler();
+ fp_user_discard();
+ local_daif_restore(DAIF_PROCCTX);
do_el0_svc(regs);
exit_to_user_mode(regs);
}
@@ -783,6 +814,7 @@ static void noinstr el0_svc_compat(struct pt_regs *regs)
{
enter_from_user_mode(regs);
cortex_a76_erratum_1463225_svc_handler();
+ local_daif_restore(DAIF_PROCCTX);
do_el0_svc_compat(regs);
exit_to_user_mode(regs);
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a40e5e50fa55..6ad61de03d0a 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -986,9 +986,13 @@ SYM_CODE_START(__sdei_asm_handler)
mov x19, x1
-#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
+ /* Store the registered-event for crash_smp_send_stop() */
ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
-#endif
+ cbnz w4, 1f
+ adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
+ b 2f
+1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
+2: str x19, [x5]
#ifdef CONFIG_VMAP_STACK
/*
@@ -1055,6 +1059,14 @@ SYM_CODE_START(__sdei_asm_handler)
ldr_l x2, sdei_exit_mode
+ /* Clear the registered-event seen by crash_smp_send_stop() */
+ ldrb w3, [x4, #SDEI_EVENT_PRIORITY]
+ cbnz w3, 1f
+ adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
+ b 2f
+1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
+2: str xzr, [x5]
+
alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
sdei_handler_exit exit_mode=x2
alternative_else_nop_endif
@@ -1065,4 +1077,15 @@ alternative_else_nop_endif
#endif
SYM_CODE_END(__sdei_asm_handler)
NOKPROBE(__sdei_asm_handler)
+
+SYM_CODE_START(__sdei_handler_abort)
+ mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
+ adr x1, 1f
+ ldr_l x2, sdei_exit_mode
+ sdei_handler_exit exit_mode=x2
+ // exit the handler and jump to the next instruction.
+ // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx.
+1: ret
+SYM_CODE_END(__sdei_handler_abort)
+NOKPROBE(__sdei_handler_abort)
#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 087c05aa960e..91e44ac7150f 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1179,9 +1179,6 @@ void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
*/
u64 read_zcr_features(void)
{
- u64 zcr;
- unsigned int vq_max;
-
/*
* Set the maximum possible VL, and write zeroes to all other
* bits to see if they stick.
@@ -1189,12 +1186,8 @@ u64 read_zcr_features(void)
sve_kernel_enable(NULL);
write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
- zcr = read_sysreg_s(SYS_ZCR_EL1);
- zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
- vq_max = sve_vq_from_vl(sve_get_vl());
- zcr |= vq_max - 1; /* set LEN field to maximum effective value */
-
- return zcr;
+ /* Return LEN value that would be written to get the maximum VL */
+ return sve_vq_from_vl(sve_get_vl()) - 1;
}
void __init sve_setup(void)
@@ -1349,9 +1342,6 @@ void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
*/
u64 read_smcr_features(void)
{
- u64 smcr;
- unsigned int vq_max;
-
sme_kernel_enable(NULL);
/*
@@ -1360,12 +1350,8 @@ u64 read_smcr_features(void)
write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_LEN_MASK,
SYS_SMCR_EL1);
- smcr = read_sysreg_s(SYS_SMCR_EL1);
- smcr &= ~(u64)SMCR_ELx_LEN_MASK; /* Only the LEN field */
- vq_max = sve_vq_from_vl(sme_get_vl());
- smcr |= vq_max - 1; /* set LEN field to maximum effective value */
-
- return smcr;
+ /* Return LEN value that would be written to get the maximum VL */
+ return sve_vq_from_vl(sme_get_vl()) - 1;
}
void __init sme_setup(void)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 757a0de07f91..7b236994f0e1 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -113,7 +113,7 @@ SYM_CODE_START(primary_entry)
*/
#if VA_BITS > 48
mrs_s x0, SYS_ID_AA64MMFR2_EL1
- tst x0, #0xf << ID_AA64MMFR2_EL1_VARange_SHIFT
+ tst x0, ID_AA64MMFR2_EL1_VARange_MASK
mov x0, #VA_BITS
mov x25, #VA_BITS_MIN
csel x25, x25, x0, eq
@@ -756,7 +756,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva)
b.ne 2f
mrs_s x0, SYS_ID_AA64MMFR2_EL1
- and x0, x0, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
+ and x0, x0, ID_AA64MMFR2_EL1_VARange_MASK
cbnz x0, 2f
update_early_cpu_boot_status \
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index db2a1861bb97..35225632d70a 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -654,7 +654,7 @@ static int breakpoint_handler(unsigned long unused, unsigned long esr,
perf_bp_event(bp, regs);
/* Do we need to handle the stepping? */
- if (is_default_overflow_handler(bp))
+ if (uses_default_overflow_handler(bp))
step = 1;
unlock:
rcu_read_unlock();
@@ -733,7 +733,7 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
static int watchpoint_report(struct perf_event *wp, unsigned long addr,
struct pt_regs *regs)
{
- int step = is_default_overflow_handler(wp);
+ int step = uses_default_overflow_handler(wp);
struct arch_hw_breakpoint *info = counter_arch_bp(wp);
info->trigger = addr;
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index 2fe2491b692c..aee12c75b738 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -262,9 +262,9 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
if (!len)
return;
- len = min(len, ARRAY_SIZE(buf) - 1);
- strncpy(buf, cmdline, len);
- buf[len] = 0;
+ len = strscpy(buf, cmdline, ARRAY_SIZE(buf));
+ if (len == -E2BIG)
+ len = ARRAY_SIZE(buf) - 1;
if (strcmp(buf, "--") == 0)
return;
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 2276689b5411..f872c57e9909 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -11,8 +11,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/of_pci.h>
-#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 187aa2b175b4..20d7ef82de90 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -891,7 +891,8 @@ static int sve_set_common(struct task_struct *target,
break;
default:
WARN_ON_ONCE(1);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/*
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 830be01af32d..255d12f881c2 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -47,6 +47,9 @@ DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
#endif
+DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event);
+DEFINE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event);
+
static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
{
unsigned long *p;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index edd63894d61e..960b98b43506 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -1044,10 +1044,8 @@ void crash_smp_send_stop(void)
* If this cpu is the only one alive at this point in time, online or
* not, there are no stop messages to be sent around, so just back out.
*/
- if (num_other_online_cpus() == 0) {
- sdei_mask_local_cpu();
- return;
- }
+ if (num_other_online_cpus() == 0)
+ goto skip_ipi;
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
@@ -1066,7 +1064,9 @@ void crash_smp_send_stop(void)
pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(&mask));
+skip_ipi:
sdei_mask_local_cpu();
+ sdei_handler_abort();
}
bool smp_crash_stop_failed(void)
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index b1ae2f2eaf77..9a70d9746b66 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -8,7 +8,6 @@
#include <linux/randomize_kstack.h>
#include <linux/syscalls.h>
-#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exception.h>
#include <asm/fpsimd.h>
@@ -101,8 +100,6 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
* (Similarly for HVC and SMC elsewhere.)
*/
- local_daif_restore(DAIF_PROCCTX);
-
if (flags & _TIF_MTE_ASYNC_FAULT) {
/*
* Process the asynchronous tag check fault before the actual
@@ -153,38 +150,8 @@ trace_exit:
syscall_trace_exit(regs);
}
-/*
- * As per the ABI exit SME streaming mode and clear the SVE state not
- * shared with FPSIMD on syscall entry.
- */
-static inline void fp_user_discard(void)
-{
- /*
- * If SME is active then exit streaming mode. If ZA is active
- * then flush the SVE registers but leave userspace access to
- * both SVE and SME enabled, otherwise disable SME for the
- * task and fall through to disabling SVE too. This means
- * that after a syscall we never have any streaming mode
- * register state to track, if this changes the KVM code will
- * need updating.
- */
- if (system_supports_sme())
- sme_smstop_sm();
-
- if (!system_supports_sve())
- return;
-
- if (test_thread_flag(TIF_SVE)) {
- unsigned int sve_vq_minus_one;
-
- sve_vq_minus_one = sve_vq_from_vl(task_get_sve_vl(current)) - 1;
- sve_flush_live(true, sve_vq_minus_one);
- }
-}
-
void do_el0_svc(struct pt_regs *regs)
{
- fp_user_discard();
el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
}
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index 6028f1fe2d1c..45354f2ddf70 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -50,9 +50,7 @@ SECTIONS
. = ALIGN(4);
.altinstructions : {
- __alt_instructions = .;
*(.altinstructions)
- __alt_instructions_end = .;
}
.dynamic : { *(.dynamic) } :text :dynamic
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 9ddc025e4b86..2250253a6429 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -25,7 +25,7 @@ hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o
cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o
hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
-hyp-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
+hyp-obj-$(CONFIG_LIST_HARDENED) += list_debug.o
hyp-obj-y += $(lib-objs)
##
diff --git a/arch/arm64/kvm/hyp/nvhe/list_debug.c b/arch/arm64/kvm/hyp/nvhe/list_debug.c
index d68abd7ea124..46a2d4f2b3c6 100644
--- a/arch/arm64/kvm/hyp/nvhe/list_debug.c
+++ b/arch/arm64/kvm/hyp/nvhe/list_debug.c
@@ -26,8 +26,9 @@ static inline __must_check bool nvhe_check_data_corruption(bool v)
/* The predicates checked here are taken from lib/list_debug.c. */
-bool __list_add_valid(struct list_head *new, struct list_head *prev,
- struct list_head *next)
+__list_valid_slowpath
+bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev,
+ struct list_head *next)
{
if (NVHE_CHECK_DATA_CORRUPTION(next->prev != prev) ||
NVHE_CHECK_DATA_CORRUPTION(prev->next != next) ||
@@ -37,7 +38,8 @@ bool __list_add_valid(struct list_head *new, struct list_head *prev,
return true;
}
-bool __list_del_entry_valid(struct list_head *entry)
+__list_valid_slowpath
+bool __list_del_entry_valid_or_report(struct list_head *entry)
{
struct list_head *prev, *next;
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index d31c3a9290c5..4fcb88a445ef 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -73,6 +73,33 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
+/*
+ * To make optimal use of block mappings when laying out the linear
+ * mapping, round down the base of physical memory to a size that can
+ * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
+ * (64k granule), or a multiple that can be mapped using contiguous bits
+ * in the page tables: 32 * PMD_SIZE (16k granule)
+ */
+#if defined(CONFIG_ARM64_4K_PAGES)
+#define ARM64_MEMSTART_SHIFT PUD_SHIFT
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
+#else
+#define ARM64_MEMSTART_SHIFT PMD_SHIFT
+#endif
+
+/*
+ * sparsemem vmemmap imposes an additional requirement on the alignment of
+ * memstart_addr, due to the fact that the base of the vmemmap region
+ * has a direct correspondence, and needs to appear sufficiently aligned
+ * in the virtual address space.
+ */
+#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
+#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS)
+#else
+#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT)
+#endif
+
static int __init reserve_crashkernel_low(unsigned long long low_size)
{
unsigned long long low_base;
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 2baeec419f62..14fdf645edc8 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -447,7 +447,7 @@ SYM_FUNC_START(__cpu_setup)
* via capabilities.
*/
mrs x9, ID_AA64MMFR1_EL1
- and x9, x9, #0xf
+ and x9, x9, ID_AA64MMFR1_EL1_HAFDBS_MASK
cbz x9, 1f
orr tcr, tcr, #TCR_HA // hardware Access flag update
1:
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 87927eb824cc..58500a964238 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -11,7 +11,7 @@
#ifdef __KERNEL__
-#include <acpi/pdc_intel.h>
+#include <acpi/proc_cap_intel.h>
#include <linux/init.h>
#include <linux/numa.h>
@@ -69,9 +69,9 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#endif
static inline bool arch_has_acpi_pdc(void) { return true; }
-static inline void arch_acpi_set_pdc_bits(u32 *buf)
+static inline void arch_acpi_set_proc_cap_bits(u32 *cap)
{
- buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
+ *cap |= ACPI_PROC_CAP_EST_CAPABILITY_SMP;
}
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index f8c74ffeeefb..83d8609aec03 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -372,3 +372,4 @@
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index e71d5bf2cee0..465759f6b0ed 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -662,5 +662,3 @@ source "kernel/power/Kconfig"
source "drivers/acpi/Kconfig"
endmenu
-
-source "drivers/firmware/Kconfig"
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index b1e5db51b61c..ef87bab46754 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -83,8 +83,8 @@ KBUILD_CFLAGS_KERNEL += -fPIE
LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext
endif
-cflags-y += -ffreestanding
cflags-y += $(call cc-option, -mno-check-zero-division)
+cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset
load-y = 0x9000000000200000
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index 6b222f227342..93783fa24f6e 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += dma-contiguous.h
-generic-y += export.h
generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += early_ioremap.h
diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h
index b541f6248837..c2d8962fda00 100644
--- a/arch/loongarch/include/asm/fpu.h
+++ b/arch/loongarch/include/asm/fpu.h
@@ -173,16 +173,30 @@ static inline void restore_fp(struct task_struct *tsk)
_restore_fp(&tsk->thread.fpu);
}
-static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
+static inline void save_fpu_regs(struct task_struct *tsk)
{
+ unsigned int euen;
+
if (tsk == current) {
preempt_disable();
- if (is_fpu_owner())
+
+ euen = csr_read32(LOONGARCH_CSR_EUEN);
+
+#ifdef CONFIG_CPU_HAS_LASX
+ if (euen & CSR_EUEN_LASXEN)
+ _save_lasx(&current->thread.fpu);
+ else
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ if (euen & CSR_EUEN_LSXEN)
+ _save_lsx(&current->thread.fpu);
+ else
+#endif
+ if (euen & CSR_EUEN_FPEN)
_save_fp(&current->thread.fpu);
+
preempt_enable();
}
-
- return tsk->thread.fpu.fpr;
}
static inline int is_simd_owner(void)
diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/asm/local.h
index 83e995b30e47..c49675852bdc 100644
--- a/arch/loongarch/include/asm/local.h
+++ b/arch/loongarch/include/asm/local.h
@@ -63,8 +63,8 @@ static inline long local_cmpxchg(local_t *l, long old, long new)
static inline bool local_try_cmpxchg(local_t *l, long *old, long new)
{
- typeof(l->a.counter) *__old = (typeof(l->a.counter) *) old;
- return try_cmpxchg_local(&l->a.counter, __old, new);
+ return try_cmpxchg_local(&l->a.counter,
+ (typeof(l->a.counter) *) old, new);
}
#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h
index 35f0958163ac..f3ddaed9ef7f 100644
--- a/arch/loongarch/include/asm/ptrace.h
+++ b/arch/loongarch/include/asm/ptrace.h
@@ -162,7 +162,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long val
#define instruction_pointer(regs) ((regs)->csr_era)
#define profile_pc(regs) instruction_pointer(regs)
-extern void die(const char *, struct pt_regs *) __noreturn;
+extern void die(const char *str, struct pt_regs *regs);
static inline void die_if_kernel(const char *str, struct pt_regs *regs)
{
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
index 416b653bccb4..66ecb480c894 100644
--- a/arch/loongarch/include/asm/smp.h
+++ b/arch/loongarch/include/asm/smp.h
@@ -98,8 +98,6 @@ static inline void __cpu_die(unsigned int cpu)
{
loongson_cpu_die(cpu);
}
-
-extern void __noreturn play_dead(void);
#endif
#endif /* __ASM_SMP_H */
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
index f3df5f0a4509..501094a09f5d 100644
--- a/arch/loongarch/kernel/fpu.S
+++ b/arch/loongarch/kernel/fpu.S
@@ -6,12 +6,12 @@
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/asm-extable.h>
#include <asm/asm-offsets.h>
#include <asm/errno.h>
-#include <asm/export.h>
#include <asm/fpregdef.h>
#include <asm/loongarch.h>
#include <asm/regdef.h>
diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
index 021b59c248fa..fc55c4de2a11 100644
--- a/arch/loongarch/kernel/hw_breakpoint.c
+++ b/arch/loongarch/kernel/hw_breakpoint.c
@@ -207,8 +207,7 @@ static int hw_breakpoint_control(struct perf_event *bp,
write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
} else {
ctrl = encode_ctrl_reg(info->ctrl);
- write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE |
- 1 << MWPnCFG3_LoadEn | 1 << MWPnCFG3_StoreEn);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE);
}
enable = csr_read64(LOONGARCH_CSR_CRMD);
csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
diff --git a/arch/loongarch/kernel/mcount.S b/arch/loongarch/kernel/mcount.S
index cb8e5803de4b..3015896016a0 100644
--- a/arch/loongarch/kernel/mcount.S
+++ b/arch/loongarch/kernel/mcount.S
@@ -5,7 +5,7 @@
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
-#include <asm/export.h>
+#include <linux/export.h>
#include <asm/ftrace.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
diff --git a/arch/loongarch/kernel/mcount_dyn.S b/arch/loongarch/kernel/mcount_dyn.S
index e16ab0b98e5a..482aa553aa2d 100644
--- a/arch/loongarch/kernel/mcount_dyn.S
+++ b/arch/loongarch/kernel/mcount_dyn.S
@@ -3,7 +3,6 @@
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
-#include <asm/export.h>
#include <asm/ftrace.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index 2e04eb07abb6..4ee1e9d6a65f 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -61,13 +61,6 @@ EXPORT_SYMBOL(__stack_chk_guard);
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
-#ifdef CONFIG_HOTPLUG_CPU
-void __noreturn arch_cpu_idle_dead(void)
-{
- play_dead();
-}
-#endif
-
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index a0767c3a0f0a..f72adbf530c6 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -147,6 +147,8 @@ static int fpr_get(struct task_struct *target,
{
int r;
+ save_fpu_regs(target);
+
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
r = gfpr_get(target, &to);
else
@@ -278,6 +280,8 @@ static int simd_get(struct task_struct *target,
{
const unsigned int wr_size = NUM_FPU_REGS * regset->size;
+ save_fpu_regs(target);
+
if (!tsk_used_math(target)) {
/* The task hasn't used FP or LSX, fill with 0xff */
copy_pad_fprs(target, regset, &to, 0);
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 8ea1bbcf13a7..6667b0a90f81 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -317,7 +317,7 @@ void loongson_cpu_die(unsigned int cpu)
mb();
}
-void play_dead(void)
+void __noreturn arch_cpu_idle_dead(void)
{
register uint64_t addr;
register void (*init_fn)(void);
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index 8fb5e7a77145..89699db45cec 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -383,16 +383,15 @@ void show_registers(struct pt_regs *regs)
static DEFINE_RAW_SPINLOCK(die_lock);
-void __noreturn die(const char *str, struct pt_regs *regs)
+void die(const char *str, struct pt_regs *regs)
{
+ int ret;
static int die_counter;
- int sig = SIGSEGV;
oops_enter();
- if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
- SIGSEGV) == NOTIFY_STOP)
- sig = 0;
+ ret = notify_die(DIE_OOPS, str, regs, 0,
+ current->thread.trap_nr, SIGSEGV);
console_verbose();
raw_spin_lock_irq(&die_lock);
@@ -405,6 +404,9 @@ void __noreturn die(const char *str, struct pt_regs *regs)
oops_exit();
+ if (ret == NOTIFY_STOP)
+ return;
+
if (regs && kexec_should_crash(current))
crash_kexec(regs);
@@ -414,7 +416,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
if (panic_on_oops)
panic("Fatal exception");
- make_task_dead(sig);
+ make_task_dead(SIGSEGV);
}
static inline void setup_vint_size(unsigned int size)
diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S
index 9dcf71719387..0790eadce166 100644
--- a/arch/loongarch/lib/clear_user.S
+++ b/arch/loongarch/lib/clear_user.S
@@ -3,12 +3,12 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#include <linux/export.h>
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/asm-extable.h>
#include <asm/cpu.h>
-#include <asm/export.h>
#include <asm/regdef.h>
.irp to, 0, 1, 2, 3, 4, 5, 6, 7
diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S
index fecd08cad702..bfe3d2793d00 100644
--- a/arch/loongarch/lib/copy_user.S
+++ b/arch/loongarch/lib/copy_user.S
@@ -3,12 +3,12 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#include <linux/export.h>
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/asm-extable.h>
#include <asm/cpu.h>
-#include <asm/export.h>
#include <asm/regdef.h>
.irp to, 0, 1, 2, 3, 4, 5, 6, 7
diff --git a/arch/loongarch/lib/memcpy.S b/arch/loongarch/lib/memcpy.S
index 39ce6621c704..cc30b3b6252f 100644
--- a/arch/loongarch/lib/memcpy.S
+++ b/arch/loongarch/lib/memcpy.S
@@ -3,11 +3,11 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#include <linux/export.h>
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
-#include <asm/export.h>
#include <asm/regdef.h>
SYM_FUNC_START(memcpy)
diff --git a/arch/loongarch/lib/memmove.S b/arch/loongarch/lib/memmove.S
index 45b725ba7867..7dc76d1484b6 100644
--- a/arch/loongarch/lib/memmove.S
+++ b/arch/loongarch/lib/memmove.S
@@ -3,11 +3,11 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#include <linux/export.h>
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
-#include <asm/export.h>
#include <asm/regdef.h>
SYM_FUNC_START(memmove)
diff --git a/arch/loongarch/lib/memset.S b/arch/loongarch/lib/memset.S
index b39c6194e3ae..3f20f7996e8e 100644
--- a/arch/loongarch/lib/memset.S
+++ b/arch/loongarch/lib/memset.S
@@ -3,11 +3,11 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#include <linux/export.h>
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
-#include <asm/export.h>
#include <asm/regdef.h>
.macro fill_to_64 r0
diff --git a/arch/loongarch/lib/unaligned.S b/arch/loongarch/lib/unaligned.S
index 9177fd638f07..185f82d85810 100644
--- a/arch/loongarch/lib/unaligned.S
+++ b/arch/loongarch/lib/unaligned.S
@@ -9,7 +9,6 @@
#include <asm/asmmacro.h>
#include <asm/asm-extable.h>
#include <asm/errno.h>
-#include <asm/export.h>
#include <asm/regdef.h>
.L_fixup_handle_unaligned:
diff --git a/arch/loongarch/mm/page.S b/arch/loongarch/mm/page.S
index 4c874a7af0ad..7ad76551d313 100644
--- a/arch/loongarch/mm/page.S
+++ b/arch/loongarch/mm/page.S
@@ -2,9 +2,9 @@
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asm.h>
-#include <asm/export.h>
#include <asm/page.h>
#include <asm/regdef.h>
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
index 4ad78703de6f..ca17dd3a1915 100644
--- a/arch/loongarch/mm/tlbex.S
+++ b/arch/loongarch/mm/tlbex.S
@@ -3,7 +3,6 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <asm/asm.h>
-#include <asm/export.h>
#include <asm/loongarch.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 4383ed851063..6deb8faa564b 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -591,7 +591,6 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
@@ -635,6 +634,7 @@ CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
+CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index ec0f9c9f9562..802c161827f4 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -548,7 +548,6 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
@@ -591,6 +590,7 @@ CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
+CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 8656ae1f239e..2cb3d755873b 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -568,7 +568,6 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
@@ -612,6 +611,7 @@ CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
+CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 496fb6a415ea..b13552caa6b3 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -540,7 +540,6 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
@@ -583,6 +582,7 @@ CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
+CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 4add7ab9973b..f88356c45440 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -550,7 +550,6 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
@@ -593,6 +592,7 @@ CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
+CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 5845f1f71fd1..7c2ebb616fba 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
@@ -614,6 +613,7 @@ CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
+CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index bbb251bab81a..d3b272910b38 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -656,7 +656,6 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
@@ -700,6 +699,7 @@ CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
+CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 4f9cfc70c66d..4529bc4b843c 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -539,7 +539,6 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
@@ -582,6 +581,7 @@ CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
+CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 67c42b4822f0..30824032e4d5 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -540,7 +540,6 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
@@ -583,6 +582,7 @@ CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
+CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 85f19515200b..3911211410ed 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -557,7 +557,6 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
@@ -601,6 +600,7 @@ CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
+CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index b1b15acb5d5f..991730c50957 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -538,7 +538,6 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
@@ -580,6 +579,7 @@ CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
+CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 91d66c0f5ab6..e80d7509ab1d 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -538,7 +538,6 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_WP512=m
@@ -581,6 +580,7 @@ CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
+CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m
CONFIG_TEST_BITOPS=m
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 1b720299deb1..0dbf9c5c6fae 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
-generic-y += export.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
index 365f39f5e256..df1f6b450cc5 100644
--- a/arch/m68k/include/asm/div64.h
+++ b/arch/m68k/include/asm/div64.h
@@ -31,6 +31,9 @@
__rem; \
})
+/* defining this stops the unused helper function from being built */
+#define __div64_32 __div64_32
+
#endif /* CONFIG_CPU_HAS_NO_MULDIV64 */
#endif /* _M68K_DIV64_H */
diff --git a/arch/m68k/include/asm/string.h b/arch/m68k/include/asm/string.h
index f0f5021d6327..760cc13acdf4 100644
--- a/arch/m68k/include/asm/string.h
+++ b/arch/m68k/include/asm/string.h
@@ -41,6 +41,7 @@ static inline char *strncpy(char *dest, const char *src, size_t n)
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *, const void *, __kernel_size_t);
+extern int memcmp(const void *, const void *, __kernel_size_t);
#define memcmp(d, s, n) __builtin_memcmp(d, s, n)
#define __HAVE_ARCH_MEMSET
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index 4f504783371f..259ceb125367 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -451,3 +451,4 @@
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
diff --git a/arch/m68k/lib/divsi3.S b/arch/m68k/lib/divsi3.S
index 3a2143f51631..62787b4333e7 100644
--- a/arch/m68k/lib/divsi3.S
+++ b/arch/m68k/lib/divsi3.S
@@ -33,7 +33,7 @@ General Public License for more details. */
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
-#include <asm/export.h>
+#include <linux/export.h>
/* These are predefined by new versions of GNU cpp. */
diff --git a/arch/m68k/lib/modsi3.S b/arch/m68k/lib/modsi3.S
index 1c967649a4e0..1bcb742d0b76 100644
--- a/arch/m68k/lib/modsi3.S
+++ b/arch/m68k/lib/modsi3.S
@@ -33,7 +33,7 @@ General Public License for more details. */
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
-#include <asm/export.h>
+#include <linux/export.h>
/* These are predefined by new versions of GNU cpp. */
diff --git a/arch/m68k/lib/mulsi3.S b/arch/m68k/lib/mulsi3.S
index 855675e69a8a..c2853248249e 100644
--- a/arch/m68k/lib/mulsi3.S
+++ b/arch/m68k/lib/mulsi3.S
@@ -32,7 +32,7 @@ General Public License for more details. */
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
-#include <asm/export.h>
+#include <linux/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
diff --git a/arch/m68k/lib/udivsi3.S b/arch/m68k/lib/udivsi3.S
index 78440ae513bf..39ad70596293 100644
--- a/arch/m68k/lib/udivsi3.S
+++ b/arch/m68k/lib/udivsi3.S
@@ -32,7 +32,7 @@ General Public License for more details. */
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
-#include <asm/export.h>
+#include <linux/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
diff --git a/arch/m68k/lib/umodsi3.S b/arch/m68k/lib/umodsi3.S
index b6fd11f58948..6640eaa9eb03 100644
--- a/arch/m68k/lib/umodsi3.S
+++ b/arch/m68k/lib/umodsi3.S
@@ -32,7 +32,7 @@ General Public License for more details. */
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
-#include <asm/export.h>
+#include <linux/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 858d22bf275c..a3798c2637fd 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -457,3 +457,4 @@
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 5daf6fe8e3e9..e6ae3df0349d 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -101,8 +101,8 @@ static __inline__ long local_cmpxchg(local_t *l, long old, long new)
static __inline__ bool local_try_cmpxchg(local_t *l, long *old, long new)
{
- typeof(l->a.counter) *__old = (typeof(l->a.counter) *) old;
- return try_cmpxchg_local(&l->a.counter, __old, new);
+ return try_cmpxchg_local(&l->a.counter,
+ (typeof(l->a.counter) *) old, new);
}
#define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 1976317d4e8b..152034b8e0a0 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -390,3 +390,4 @@
449 n32 futex_waitv sys_futex_waitv
450 n32 set_mempolicy_home_node sys_set_mempolicy_home_node
451 n32 cachestat sys_cachestat
+452 n32 fchmodat2 sys_fchmodat2
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index cfda2511badf..cb5e757f6621 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -366,3 +366,4 @@
449 n64 futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 n64 cachestat sys_cachestat
+452 n64 fchmodat2 sys_fchmodat2
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 7692234c3768..1a646813afdc 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -439,3 +439,4 @@
449 o32 futex_waitv sys_futex_waitv
450 o32 set_mempolicy_home_node sys_set_mempolicy_home_node
451 o32 cachestat sys_cachestat
+452 o32 fchmodat2 sys_fchmodat2
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index a0a9145b6dd4..e97c175b56f9 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -450,3 +450,4 @@
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
index ad1872518992..f25024afdda5 100644
--- a/arch/powerpc/crypto/Kconfig
+++ b/arch/powerpc/crypto/Kconfig
@@ -111,4 +111,30 @@ config CRYPTO_AES_GCM_P10
Support for cryptographic acceleration instructions on Power10 or
later CPU. This module supports stitched acceleration for AES/GCM.
+config CRYPTO_CHACHA20_P10
+ tristate "Ciphers: ChaCha20, XChacha20, XChacha12 (P10 or later)"
+ depends on PPC64 && CPU_LITTLE_ENDIAN
+ select CRYPTO_SKCIPHER
+ select CRYPTO_LIB_CHACHA_GENERIC
+ select CRYPTO_ARCH_HAVE_LIB_CHACHA
+ help
+ Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
+ stream cipher algorithms
+
+ Architecture: PowerPC64
+ - Power10 or later
+ - Little-endian
+
+config CRYPTO_POLY1305_P10
+ tristate "Hash functions: Poly1305 (P10 or later)"
+ depends on PPC64 && CPU_LITTLE_ENDIAN
+ select CRYPTO_HASH
+ select CRYPTO_LIB_POLY1305_GENERIC
+ help
+ Poly1305 authenticator algorithm (RFC7539)
+
+ Architecture: PowerPC64
+ - Power10 or later
+ - Little-endian
+
endmenu
diff --git a/arch/powerpc/crypto/Makefile b/arch/powerpc/crypto/Makefile
index 7b4f516abec1..ebdac1b9eb9a 100644
--- a/arch/powerpc/crypto/Makefile
+++ b/arch/powerpc/crypto/Makefile
@@ -14,6 +14,8 @@ obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o
obj-$(CONFIG_CRYPTO_CRCT10DIF_VPMSUM) += crct10dif-vpmsum.o
obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o
obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
+obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o
+obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
md5-ppc-y := md5-asm.o md5-glue.o
@@ -23,6 +25,8 @@ sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
+chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o
+poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@
diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c
new file mode 100644
index 000000000000..74fb86b0d209
--- /dev/null
+++ b/arch/powerpc/crypto/chacha-p10-glue.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PowerPC P10 (ppc64le) accelerated ChaCha and XChaCha stream ciphers,
+ * including ChaCha20 (RFC7539)
+ *
+ * Copyright 2023- IBM Corp. All rights reserved.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/chacha.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <linux/sizes.h>
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+
+asmlinkage void chacha_p10le_8x(u32 *state, u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10);
+
+static void vsx_begin(void)
+{
+ preempt_disable();
+ enable_kernel_vsx();
+}
+
+static void vsx_end(void)
+{
+ disable_kernel_vsx();
+ preempt_enable();
+}
+
+static void chacha_p10_do_8x(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ unsigned int l = bytes & ~0x0FF;
+
+ if (l > 0) {
+ chacha_p10le_8x(state, dst, src, l, nrounds);
+ bytes -= l;
+ src += l;
+ dst += l;
+ state[12] += l / CHACHA_BLOCK_SIZE;
+ }
+
+ if (bytes > 0)
+ chacha_crypt_generic(state, dst, src, bytes, nrounds);
+}
+
+void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
+{
+ hchacha_block_generic(state, stream, nrounds);
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
+{
+ chacha_init_generic(state, key, iv);
+}
+EXPORT_SYMBOL(chacha_init_arch);
+
+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
+ int nrounds)
+{
+ if (!static_branch_likely(&have_p10) || bytes <= CHACHA_BLOCK_SIZE ||
+ !crypto_simd_usable())
+ return chacha_crypt_generic(state, dst, src, bytes, nrounds);
+
+ do {
+ unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+ vsx_begin();
+ chacha_p10_do_8x(state, dst, src, todo, nrounds);
+ vsx_end();
+
+ bytes -= todo;
+ src += todo;
+ dst += todo;
+ } while (bytes);
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+static int chacha_p10_stream_xor(struct skcipher_request *req,
+ const struct chacha_ctx *ctx, const u8 *iv)
+{
+ struct skcipher_walk walk;
+ u32 state[16];
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+ if (err)
+ return err;
+
+ chacha_init_generic(state, ctx->key, iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = rounddown(nbytes, walk.stride);
+
+ if (!crypto_simd_usable()) {
+ chacha_crypt_generic(state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes,
+ ctx->nrounds);
+ } else {
+ vsx_begin();
+ chacha_p10_do_8x(state, walk.dst.virt.addr,
+ walk.src.virt.addr, nbytes, ctx->nrounds);
+ vsx_end();
+ }
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int chacha_p10(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_p10_stream_xor(req, ctx, req->iv);
+}
+
+static int xchacha_p10(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha_ctx subctx;
+ u32 state[16];
+ u8 real_iv[16];
+
+ chacha_init_generic(state, ctx->key, req->iv);
+ hchacha_block_arch(state, subctx.key, ctx->nrounds);
+ subctx.nrounds = ctx->nrounds;
+
+ memcpy(&real_iv[0], req->iv + 24, 8);
+ memcpy(&real_iv[8], req->iv + 16, 8);
+ return chacha_p10_stream_xor(req, &subctx, real_iv);
+}
+
+static struct skcipher_alg algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-p10",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = chacha_p10,
+ .decrypt = chacha_p10,
+ }, {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-p10",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha20_setkey,
+ .encrypt = xchacha_p10,
+ .decrypt = xchacha_p10,
+ }, {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-p10",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = chacha12_setkey,
+ .encrypt = xchacha_p10,
+ .decrypt = xchacha_p10,
+ }
+};
+
+static int __init chacha_p10_init(void)
+{
+ static_branch_enable(&have_p10);
+
+ return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit chacha_p10_exit(void)
+{
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+module_cpu_feature_match(PPC_MODULE_FEATURE_P10, chacha_p10_init);
+module_exit(chacha_p10_exit);
+
+MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)");
+MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-p10");
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-p10");
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-p10");
diff --git a/arch/powerpc/crypto/chacha-p10le-8x.S b/arch/powerpc/crypto/chacha-p10le-8x.S
new file mode 100644
index 000000000000..17bedb66b822
--- /dev/null
+++ b/arch/powerpc/crypto/chacha-p10le-8x.S
@@ -0,0 +1,842 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#
+# Accelerated chacha20 implementation for ppc64le.
+#
+# Copyright 2023- IBM Corp. All rights reserved
+#
+#===================================================================================
+# Written by Danny Tsen <dtsen@us.ibm.com>
+#
+# chacha_p10le_8x(u32 *state, byte *dst, const byte *src,
+# size_t len, int nrounds);
+#
+# do rounds, 8 quarter rounds
+# 1. a += b; d ^= a; d <<<= 16;
+# 2. c += d; b ^= c; b <<<= 12;
+# 3. a += b; d ^= a; d <<<= 8;
+# 4. c += d; b ^= c; b <<<= 7
+#
+# row1 = (row1 + row2), row4 = row1 xor row4, row4 rotate each word by 16
+# row3 = (row3 + row4), row2 = row3 xor row2, row2 rotate each word by 12
+# row1 = (row1 + row2), row4 = row1 xor row4, row4 rotate each word by 8
+# row3 = (row3 + row4), row2 = row3 xor row2, row2 rotate each word by 7
+#
+# 4 blocks (a b c d)
+#
+# a0 b0 c0 d0
+# a1 b1 c1 d1
+# ...
+# a4 b4 c4 d4
+# ...
+# a8 b8 c8 d8
+# ...
+# a12 b12 c12 d12
+# a13 ...
+# a14 ...
+# a15 b15 c15 d15
+#
+# Column round (v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
+# Diagnal round (v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
+#
+
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
+#include <linux/linkage.h>
+
+.machine "any"
+.text
+
+.macro SAVE_GPR GPR OFFSET FRAME
+ std \GPR,\OFFSET(\FRAME)
+.endm
+
+.macro SAVE_VRS VRS OFFSET FRAME
+ li 16, \OFFSET
+ stvx \VRS, 16, \FRAME
+.endm
+
+.macro SAVE_VSX VSX OFFSET FRAME
+ li 16, \OFFSET
+ stxvx \VSX, 16, \FRAME
+.endm
+
+.macro RESTORE_GPR GPR OFFSET FRAME
+ ld \GPR,\OFFSET(\FRAME)
+.endm
+
+.macro RESTORE_VRS VRS OFFSET FRAME
+ li 16, \OFFSET
+ lvx \VRS, 16, \FRAME
+.endm
+
+.macro RESTORE_VSX VSX OFFSET FRAME
+ li 16, \OFFSET
+ lxvx \VSX, 16, \FRAME
+.endm
+
+.macro SAVE_REGS
+ mflr 0
+ std 0, 16(1)
+ stdu 1,-752(1)
+
+ SAVE_GPR 14, 112, 1
+ SAVE_GPR 15, 120, 1
+ SAVE_GPR 16, 128, 1
+ SAVE_GPR 17, 136, 1
+ SAVE_GPR 18, 144, 1
+ SAVE_GPR 19, 152, 1
+ SAVE_GPR 20, 160, 1
+ SAVE_GPR 21, 168, 1
+ SAVE_GPR 22, 176, 1
+ SAVE_GPR 23, 184, 1
+ SAVE_GPR 24, 192, 1
+ SAVE_GPR 25, 200, 1
+ SAVE_GPR 26, 208, 1
+ SAVE_GPR 27, 216, 1
+ SAVE_GPR 28, 224, 1
+ SAVE_GPR 29, 232, 1
+ SAVE_GPR 30, 240, 1
+ SAVE_GPR 31, 248, 1
+
+ addi 9, 1, 256
+ SAVE_VRS 20, 0, 9
+ SAVE_VRS 21, 16, 9
+ SAVE_VRS 22, 32, 9
+ SAVE_VRS 23, 48, 9
+ SAVE_VRS 24, 64, 9
+ SAVE_VRS 25, 80, 9
+ SAVE_VRS 26, 96, 9
+ SAVE_VRS 27, 112, 9
+ SAVE_VRS 28, 128, 9
+ SAVE_VRS 29, 144, 9
+ SAVE_VRS 30, 160, 9
+ SAVE_VRS 31, 176, 9
+
+ SAVE_VSX 14, 192, 9
+ SAVE_VSX 15, 208, 9
+ SAVE_VSX 16, 224, 9
+ SAVE_VSX 17, 240, 9
+ SAVE_VSX 18, 256, 9
+ SAVE_VSX 19, 272, 9
+ SAVE_VSX 20, 288, 9
+ SAVE_VSX 21, 304, 9
+ SAVE_VSX 22, 320, 9
+ SAVE_VSX 23, 336, 9
+ SAVE_VSX 24, 352, 9
+ SAVE_VSX 25, 368, 9
+ SAVE_VSX 26, 384, 9
+ SAVE_VSX 27, 400, 9
+ SAVE_VSX 28, 416, 9
+ SAVE_VSX 29, 432, 9
+ SAVE_VSX 30, 448, 9
+ SAVE_VSX 31, 464, 9
+.endm # SAVE_REGS
+
+.macro RESTORE_REGS
+ addi 9, 1, 256
+ RESTORE_VRS 20, 0, 9
+ RESTORE_VRS 21, 16, 9
+ RESTORE_VRS 22, 32, 9
+ RESTORE_VRS 23, 48, 9
+ RESTORE_VRS 24, 64, 9
+ RESTORE_VRS 25, 80, 9
+ RESTORE_VRS 26, 96, 9
+ RESTORE_VRS 27, 112, 9
+ RESTORE_VRS 28, 128, 9
+ RESTORE_VRS 29, 144, 9
+ RESTORE_VRS 30, 160, 9
+ RESTORE_VRS 31, 176, 9
+
+ RESTORE_VSX 14, 192, 9
+ RESTORE_VSX 15, 208, 9
+ RESTORE_VSX 16, 224, 9
+ RESTORE_VSX 17, 240, 9
+ RESTORE_VSX 18, 256, 9
+ RESTORE_VSX 19, 272, 9
+ RESTORE_VSX 20, 288, 9
+ RESTORE_VSX 21, 304, 9
+ RESTORE_VSX 22, 320, 9
+ RESTORE_VSX 23, 336, 9
+ RESTORE_VSX 24, 352, 9
+ RESTORE_VSX 25, 368, 9
+ RESTORE_VSX 26, 384, 9
+ RESTORE_VSX 27, 400, 9
+ RESTORE_VSX 28, 416, 9
+ RESTORE_VSX 29, 432, 9
+ RESTORE_VSX 30, 448, 9
+ RESTORE_VSX 31, 464, 9
+
+ RESTORE_GPR 14, 112, 1
+ RESTORE_GPR 15, 120, 1
+ RESTORE_GPR 16, 128, 1
+ RESTORE_GPR 17, 136, 1
+ RESTORE_GPR 18, 144, 1
+ RESTORE_GPR 19, 152, 1
+ RESTORE_GPR 20, 160, 1
+ RESTORE_GPR 21, 168, 1
+ RESTORE_GPR 22, 176, 1
+ RESTORE_GPR 23, 184, 1
+ RESTORE_GPR 24, 192, 1
+ RESTORE_GPR 25, 200, 1
+ RESTORE_GPR 26, 208, 1
+ RESTORE_GPR 27, 216, 1
+ RESTORE_GPR 28, 224, 1
+ RESTORE_GPR 29, 232, 1
+ RESTORE_GPR 30, 240, 1
+ RESTORE_GPR 31, 248, 1
+
+ addi 1, 1, 752
+ ld 0, 16(1)
+ mtlr 0
+.endm # RESTORE_REGS
+
+.macro QT_loop_8x
+ # QR(v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
+ xxlor 0, 32+25, 32+25
+ xxlor 32+25, 20, 20
+ vadduwm 0, 0, 4
+ vadduwm 1, 1, 5
+ vadduwm 2, 2, 6
+ vadduwm 3, 3, 7
+ vadduwm 16, 16, 20
+ vadduwm 17, 17, 21
+ vadduwm 18, 18, 22
+ vadduwm 19, 19, 23
+
+ vpermxor 12, 12, 0, 25
+ vpermxor 13, 13, 1, 25
+ vpermxor 14, 14, 2, 25
+ vpermxor 15, 15, 3, 25
+ vpermxor 28, 28, 16, 25
+ vpermxor 29, 29, 17, 25
+ vpermxor 30, 30, 18, 25
+ vpermxor 31, 31, 19, 25
+ xxlor 32+25, 0, 0
+ vadduwm 8, 8, 12
+ vadduwm 9, 9, 13
+ vadduwm 10, 10, 14
+ vadduwm 11, 11, 15
+ vadduwm 24, 24, 28
+ vadduwm 25, 25, 29
+ vadduwm 26, 26, 30
+ vadduwm 27, 27, 31
+ vxor 4, 4, 8
+ vxor 5, 5, 9
+ vxor 6, 6, 10
+ vxor 7, 7, 11
+ vxor 20, 20, 24
+ vxor 21, 21, 25
+ vxor 22, 22, 26
+ vxor 23, 23, 27
+
+ xxlor 0, 32+25, 32+25
+ xxlor 32+25, 21, 21
+ vrlw 4, 4, 25 #
+ vrlw 5, 5, 25
+ vrlw 6, 6, 25
+ vrlw 7, 7, 25
+ vrlw 20, 20, 25 #
+ vrlw 21, 21, 25
+ vrlw 22, 22, 25
+ vrlw 23, 23, 25
+ xxlor 32+25, 0, 0
+ vadduwm 0, 0, 4
+ vadduwm 1, 1, 5
+ vadduwm 2, 2, 6
+ vadduwm 3, 3, 7
+ vadduwm 16, 16, 20
+ vadduwm 17, 17, 21
+ vadduwm 18, 18, 22
+ vadduwm 19, 19, 23
+
+ xxlor 0, 32+25, 32+25
+ xxlor 32+25, 22, 22
+ vpermxor 12, 12, 0, 25
+ vpermxor 13, 13, 1, 25
+ vpermxor 14, 14, 2, 25
+ vpermxor 15, 15, 3, 25
+ vpermxor 28, 28, 16, 25
+ vpermxor 29, 29, 17, 25
+ vpermxor 30, 30, 18, 25
+ vpermxor 31, 31, 19, 25
+ xxlor 32+25, 0, 0
+ vadduwm 8, 8, 12
+ vadduwm 9, 9, 13
+ vadduwm 10, 10, 14
+ vadduwm 11, 11, 15
+ vadduwm 24, 24, 28
+ vadduwm 25, 25, 29
+ vadduwm 26, 26, 30
+ vadduwm 27, 27, 31
+ xxlor 0, 32+28, 32+28
+ xxlor 32+28, 23, 23
+ vxor 4, 4, 8
+ vxor 5, 5, 9
+ vxor 6, 6, 10
+ vxor 7, 7, 11
+ vxor 20, 20, 24
+ vxor 21, 21, 25
+ vxor 22, 22, 26
+ vxor 23, 23, 27
+ vrlw 4, 4, 28 #
+ vrlw 5, 5, 28
+ vrlw 6, 6, 28
+ vrlw 7, 7, 28
+ vrlw 20, 20, 28 #
+ vrlw 21, 21, 28
+ vrlw 22, 22, 28
+ vrlw 23, 23, 28
+ xxlor 32+28, 0, 0
+
+ # QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
+ xxlor 0, 32+25, 32+25
+ xxlor 32+25, 20, 20
+ vadduwm 0, 0, 5
+ vadduwm 1, 1, 6
+ vadduwm 2, 2, 7
+ vadduwm 3, 3, 4
+ vadduwm 16, 16, 21
+ vadduwm 17, 17, 22
+ vadduwm 18, 18, 23
+ vadduwm 19, 19, 20
+
+ vpermxor 15, 15, 0, 25
+ vpermxor 12, 12, 1, 25
+ vpermxor 13, 13, 2, 25
+ vpermxor 14, 14, 3, 25
+ vpermxor 31, 31, 16, 25
+ vpermxor 28, 28, 17, 25
+ vpermxor 29, 29, 18, 25
+ vpermxor 30, 30, 19, 25
+
+ xxlor 32+25, 0, 0
+ vadduwm 10, 10, 15
+ vadduwm 11, 11, 12
+ vadduwm 8, 8, 13
+ vadduwm 9, 9, 14
+ vadduwm 26, 26, 31
+ vadduwm 27, 27, 28
+ vadduwm 24, 24, 29
+ vadduwm 25, 25, 30
+ vxor 5, 5, 10
+ vxor 6, 6, 11
+ vxor 7, 7, 8
+ vxor 4, 4, 9
+ vxor 21, 21, 26
+ vxor 22, 22, 27
+ vxor 23, 23, 24
+ vxor 20, 20, 25
+
+ xxlor 0, 32+25, 32+25
+ xxlor 32+25, 21, 21
+ vrlw 5, 5, 25
+ vrlw 6, 6, 25
+ vrlw 7, 7, 25
+ vrlw 4, 4, 25
+ vrlw 21, 21, 25
+ vrlw 22, 22, 25
+ vrlw 23, 23, 25
+ vrlw 20, 20, 25
+ xxlor 32+25, 0, 0
+
+ vadduwm 0, 0, 5
+ vadduwm 1, 1, 6
+ vadduwm 2, 2, 7
+ vadduwm 3, 3, 4
+ vadduwm 16, 16, 21
+ vadduwm 17, 17, 22
+ vadduwm 18, 18, 23
+ vadduwm 19, 19, 20
+
+ xxlor 0, 32+25, 32+25
+ xxlor 32+25, 22, 22
+ vpermxor 15, 15, 0, 25
+ vpermxor 12, 12, 1, 25
+ vpermxor 13, 13, 2, 25
+ vpermxor 14, 14, 3, 25
+ vpermxor 31, 31, 16, 25
+ vpermxor 28, 28, 17, 25
+ vpermxor 29, 29, 18, 25
+ vpermxor 30, 30, 19, 25
+ xxlor 32+25, 0, 0
+
+ vadduwm 10, 10, 15
+ vadduwm 11, 11, 12
+ vadduwm 8, 8, 13
+ vadduwm 9, 9, 14
+ vadduwm 26, 26, 31
+ vadduwm 27, 27, 28
+ vadduwm 24, 24, 29
+ vadduwm 25, 25, 30
+
+ xxlor 0, 32+28, 32+28
+ xxlor 32+28, 23, 23
+ vxor 5, 5, 10
+ vxor 6, 6, 11
+ vxor 7, 7, 8
+ vxor 4, 4, 9
+ vxor 21, 21, 26
+ vxor 22, 22, 27
+ vxor 23, 23, 24
+ vxor 20, 20, 25
+ vrlw 5, 5, 28
+ vrlw 6, 6, 28
+ vrlw 7, 7, 28
+ vrlw 4, 4, 28
+ vrlw 21, 21, 28
+ vrlw 22, 22, 28
+ vrlw 23, 23, 28
+ vrlw 20, 20, 28
+ xxlor 32+28, 0, 0
+.endm
+
+.macro QT_loop_4x
+ # QR(v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
+ vadduwm 0, 0, 4
+ vadduwm 1, 1, 5
+ vadduwm 2, 2, 6
+ vadduwm 3, 3, 7
+ vpermxor 12, 12, 0, 20
+ vpermxor 13, 13, 1, 20
+ vpermxor 14, 14, 2, 20
+ vpermxor 15, 15, 3, 20
+ vadduwm 8, 8, 12
+ vadduwm 9, 9, 13
+ vadduwm 10, 10, 14
+ vadduwm 11, 11, 15
+ vxor 4, 4, 8
+ vxor 5, 5, 9
+ vxor 6, 6, 10
+ vxor 7, 7, 11
+ vrlw 4, 4, 21
+ vrlw 5, 5, 21
+ vrlw 6, 6, 21
+ vrlw 7, 7, 21
+ vadduwm 0, 0, 4
+ vadduwm 1, 1, 5
+ vadduwm 2, 2, 6
+ vadduwm 3, 3, 7
+ vpermxor 12, 12, 0, 22
+ vpermxor 13, 13, 1, 22
+ vpermxor 14, 14, 2, 22
+ vpermxor 15, 15, 3, 22
+ vadduwm 8, 8, 12
+ vadduwm 9, 9, 13
+ vadduwm 10, 10, 14
+ vadduwm 11, 11, 15
+ vxor 4, 4, 8
+ vxor 5, 5, 9
+ vxor 6, 6, 10
+ vxor 7, 7, 11
+ vrlw 4, 4, 23
+ vrlw 5, 5, 23
+ vrlw 6, 6, 23
+ vrlw 7, 7, 23
+
+ # QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
+ vadduwm 0, 0, 5
+ vadduwm 1, 1, 6
+ vadduwm 2, 2, 7
+ vadduwm 3, 3, 4
+ vpermxor 15, 15, 0, 20
+ vpermxor 12, 12, 1, 20
+ vpermxor 13, 13, 2, 20
+ vpermxor 14, 14, 3, 20
+ vadduwm 10, 10, 15
+ vadduwm 11, 11, 12
+ vadduwm 8, 8, 13
+ vadduwm 9, 9, 14
+ vxor 5, 5, 10
+ vxor 6, 6, 11
+ vxor 7, 7, 8
+ vxor 4, 4, 9
+ vrlw 5, 5, 21
+ vrlw 6, 6, 21
+ vrlw 7, 7, 21
+ vrlw 4, 4, 21
+ vadduwm 0, 0, 5
+ vadduwm 1, 1, 6
+ vadduwm 2, 2, 7
+ vadduwm 3, 3, 4
+ vpermxor 15, 15, 0, 22
+ vpermxor 12, 12, 1, 22
+ vpermxor 13, 13, 2, 22
+ vpermxor 14, 14, 3, 22
+ vadduwm 10, 10, 15
+ vadduwm 11, 11, 12
+ vadduwm 8, 8, 13
+ vadduwm 9, 9, 14
+ vxor 5, 5, 10
+ vxor 6, 6, 11
+ vxor 7, 7, 8
+ vxor 4, 4, 9
+ vrlw 5, 5, 23
+ vrlw 6, 6, 23
+ vrlw 7, 7, 23
+ vrlw 4, 4, 23
+.endm
+
+# Transpose
+.macro TP_4x a0 a1 a2 a3
+ xxmrghw 10, 32+\a0, 32+\a1 # a0, a1, b0, b1
+ xxmrghw 11, 32+\a2, 32+\a3 # a2, a3, b2, b3
+ xxmrglw 12, 32+\a0, 32+\a1 # c0, c1, d0, d1
+ xxmrglw 13, 32+\a2, 32+\a3 # c2, c3, d2, d3
+ xxpermdi 32+\a0, 10, 11, 0 # a0, a1, a2, a3
+ xxpermdi 32+\a1, 10, 11, 3 # b0, b1, b2, b3
+ xxpermdi 32+\a2, 12, 13, 0 # c0, c1, c2, c3
+ xxpermdi 32+\a3, 12, 13, 3 # d0, d1, d2, d3
+.endm
+
+# key stream = working state + state
+.macro Add_state S
+ vadduwm \S+0, \S+0, 16-\S
+ vadduwm \S+4, \S+4, 17-\S
+ vadduwm \S+8, \S+8, 18-\S
+ vadduwm \S+12, \S+12, 19-\S
+
+ vadduwm \S+1, \S+1, 16-\S
+ vadduwm \S+5, \S+5, 17-\S
+ vadduwm \S+9, \S+9, 18-\S
+ vadduwm \S+13, \S+13, 19-\S
+
+ vadduwm \S+2, \S+2, 16-\S
+ vadduwm \S+6, \S+6, 17-\S
+ vadduwm \S+10, \S+10, 18-\S
+ vadduwm \S+14, \S+14, 19-\S
+
+ vadduwm \S+3, \S+3, 16-\S
+ vadduwm \S+7, \S+7, 17-\S
+ vadduwm \S+11, \S+11, 18-\S
+ vadduwm \S+15, \S+15, 19-\S
+.endm
+
+#
+# write 256 bytes
+#
+.macro Write_256 S
+ add 9, 14, 5
+ add 16, 14, 4
+ lxvw4x 0, 0, 9
+ lxvw4x 1, 17, 9
+ lxvw4x 2, 18, 9
+ lxvw4x 3, 19, 9
+ lxvw4x 4, 20, 9
+ lxvw4x 5, 21, 9
+ lxvw4x 6, 22, 9
+ lxvw4x 7, 23, 9
+ lxvw4x 8, 24, 9
+ lxvw4x 9, 25, 9
+ lxvw4x 10, 26, 9
+ lxvw4x 11, 27, 9
+ lxvw4x 12, 28, 9
+ lxvw4x 13, 29, 9
+ lxvw4x 14, 30, 9
+ lxvw4x 15, 31, 9
+
+ xxlxor \S+32, \S+32, 0
+ xxlxor \S+36, \S+36, 1
+ xxlxor \S+40, \S+40, 2
+ xxlxor \S+44, \S+44, 3
+ xxlxor \S+33, \S+33, 4
+ xxlxor \S+37, \S+37, 5
+ xxlxor \S+41, \S+41, 6
+ xxlxor \S+45, \S+45, 7
+ xxlxor \S+34, \S+34, 8
+ xxlxor \S+38, \S+38, 9
+ xxlxor \S+42, \S+42, 10
+ xxlxor \S+46, \S+46, 11
+ xxlxor \S+35, \S+35, 12
+ xxlxor \S+39, \S+39, 13
+ xxlxor \S+43, \S+43, 14
+ xxlxor \S+47, \S+47, 15
+
+ stxvw4x \S+32, 0, 16
+ stxvw4x \S+36, 17, 16
+ stxvw4x \S+40, 18, 16
+ stxvw4x \S+44, 19, 16
+
+ stxvw4x \S+33, 20, 16
+ stxvw4x \S+37, 21, 16
+ stxvw4x \S+41, 22, 16
+ stxvw4x \S+45, 23, 16
+
+ stxvw4x \S+34, 24, 16
+ stxvw4x \S+38, 25, 16
+ stxvw4x \S+42, 26, 16
+ stxvw4x \S+46, 27, 16
+
+ stxvw4x \S+35, 28, 16
+ stxvw4x \S+39, 29, 16
+ stxvw4x \S+43, 30, 16
+ stxvw4x \S+47, 31, 16
+
+.endm
+
+#
+# chacha20_p10le_8x(u32 *state, byte *dst, const byte *src, size_t len, int nrounds);
+#
+SYM_FUNC_START(chacha_p10le_8x)
+.align 5
+ cmpdi 6, 0
+ ble Out_no_chacha
+
+ SAVE_REGS
+
+ # r17 - r31 mainly for Write_256 macro.
+ li 17, 16
+ li 18, 32
+ li 19, 48
+ li 20, 64
+ li 21, 80
+ li 22, 96
+ li 23, 112
+ li 24, 128
+ li 25, 144
+ li 26, 160
+ li 27, 176
+ li 28, 192
+ li 29, 208
+ li 30, 224
+ li 31, 240
+
+ mr 15, 6 # len
+ li 14, 0 # offset to inp and outp
+
+ lxvw4x 48, 0, 3 # vr16, constants
+ lxvw4x 49, 17, 3 # vr17, key 1
+ lxvw4x 50, 18, 3 # vr18, key 2
+ lxvw4x 51, 19, 3 # vr19, counter, nonce
+
+ # create (0, 1, 2, 3) counters
+ vspltisw 0, 0
+ vspltisw 1, 1
+ vspltisw 2, 2
+ vspltisw 3, 3
+ vmrghw 4, 0, 1
+ vmrglw 5, 2, 3
+ vsldoi 30, 4, 5, 8 # vr30 counter, 4 (0, 1, 2, 3)
+
+ vspltisw 21, 12
+ vspltisw 23, 7
+
+ addis 11, 2, permx@toc@ha
+ addi 11, 11, permx@toc@l
+ lxvw4x 32+20, 0, 11
+ lxvw4x 32+22, 17, 11
+
+ sradi 8, 7, 1
+
+ mtctr 8
+
+ # save constants to vsx
+ xxlor 16, 48, 48
+ xxlor 17, 49, 49
+ xxlor 18, 50, 50
+ xxlor 19, 51, 51
+
+ vspltisw 25, 4
+ vspltisw 26, 8
+
+ xxlor 25, 32+26, 32+26
+ xxlor 24, 32+25, 32+25
+
+ vadduwm 31, 30, 25 # counter = (0, 1, 2, 3) + (4, 4, 4, 4)
+ xxlor 30, 32+30, 32+30
+ xxlor 31, 32+31, 32+31
+
+ xxlor 20, 32+20, 32+20
+ xxlor 21, 32+21, 32+21
+ xxlor 22, 32+22, 32+22
+ xxlor 23, 32+23, 32+23
+
+ cmpdi 6, 512
+ blt Loop_last
+
+Loop_8x:
+ xxspltw 32+0, 16, 0
+ xxspltw 32+1, 16, 1
+ xxspltw 32+2, 16, 2
+ xxspltw 32+3, 16, 3
+
+ xxspltw 32+4, 17, 0
+ xxspltw 32+5, 17, 1
+ xxspltw 32+6, 17, 2
+ xxspltw 32+7, 17, 3
+ xxspltw 32+8, 18, 0
+ xxspltw 32+9, 18, 1
+ xxspltw 32+10, 18, 2
+ xxspltw 32+11, 18, 3
+ xxspltw 32+12, 19, 0
+ xxspltw 32+13, 19, 1
+ xxspltw 32+14, 19, 2
+ xxspltw 32+15, 19, 3
+ vadduwm 12, 12, 30 # increase counter
+
+ xxspltw 32+16, 16, 0
+ xxspltw 32+17, 16, 1
+ xxspltw 32+18, 16, 2
+ xxspltw 32+19, 16, 3
+
+ xxspltw 32+20, 17, 0
+ xxspltw 32+21, 17, 1
+ xxspltw 32+22, 17, 2
+ xxspltw 32+23, 17, 3
+ xxspltw 32+24, 18, 0
+ xxspltw 32+25, 18, 1
+ xxspltw 32+26, 18, 2
+ xxspltw 32+27, 18, 3
+ xxspltw 32+28, 19, 0
+ xxspltw 32+29, 19, 1
+ vadduwm 28, 28, 31 # increase counter
+ xxspltw 32+30, 19, 2
+ xxspltw 32+31, 19, 3
+
+.align 5
+quarter_loop_8x:
+ QT_loop_8x
+
+ bdnz quarter_loop_8x
+
+ xxlor 0, 32+30, 32+30
+ xxlor 32+30, 30, 30
+ vadduwm 12, 12, 30
+ xxlor 32+30, 0, 0
+ TP_4x 0, 1, 2, 3
+ TP_4x 4, 5, 6, 7
+ TP_4x 8, 9, 10, 11
+ TP_4x 12, 13, 14, 15
+
+ xxlor 0, 48, 48
+ xxlor 1, 49, 49
+ xxlor 2, 50, 50
+ xxlor 3, 51, 51
+ xxlor 48, 16, 16
+ xxlor 49, 17, 17
+ xxlor 50, 18, 18
+ xxlor 51, 19, 19
+ Add_state 0
+ xxlor 48, 0, 0
+ xxlor 49, 1, 1
+ xxlor 50, 2, 2
+ xxlor 51, 3, 3
+ Write_256 0
+ addi 14, 14, 256 # offset +=256
+ addi 15, 15, -256 # len -=256
+
+ xxlor 5, 32+31, 32+31
+ xxlor 32+31, 31, 31
+ vadduwm 28, 28, 31
+ xxlor 32+31, 5, 5
+ TP_4x 16+0, 16+1, 16+2, 16+3
+ TP_4x 16+4, 16+5, 16+6, 16+7
+ TP_4x 16+8, 16+9, 16+10, 16+11
+ TP_4x 16+12, 16+13, 16+14, 16+15
+
+ xxlor 32, 16, 16
+ xxlor 33, 17, 17
+ xxlor 34, 18, 18
+ xxlor 35, 19, 19
+ Add_state 16
+ Write_256 16
+ addi 14, 14, 256 # offset +=256
+ addi 15, 15, -256 # len +=256
+
+ xxlor 32+24, 24, 24
+ xxlor 32+25, 25, 25
+ xxlor 32+30, 30, 30
+ vadduwm 30, 30, 25
+ vadduwm 31, 30, 24
+ xxlor 30, 32+30, 32+30
+ xxlor 31, 32+31, 32+31
+
+ cmpdi 15, 0
+ beq Out_loop
+
+ cmpdi 15, 512
+ blt Loop_last
+
+ mtctr 8
+ b Loop_8x
+
+Loop_last:
+ lxvw4x 48, 0, 3 # vr16, constants
+ lxvw4x 49, 17, 3 # vr17, key 1
+ lxvw4x 50, 18, 3 # vr18, key 2
+ lxvw4x 51, 19, 3 # vr19, counter, nonce
+
+ vspltisw 21, 12
+ vspltisw 23, 7
+ addis 11, 2, permx@toc@ha
+ addi 11, 11, permx@toc@l
+ lxvw4x 32+20, 0, 11
+ lxvw4x 32+22, 17, 11
+
+ sradi 8, 7, 1
+ mtctr 8
+
+Loop_4x:
+ vspltw 0, 16, 0
+ vspltw 1, 16, 1
+ vspltw 2, 16, 2
+ vspltw 3, 16, 3
+
+ vspltw 4, 17, 0
+ vspltw 5, 17, 1
+ vspltw 6, 17, 2
+ vspltw 7, 17, 3
+ vspltw 8, 18, 0
+ vspltw 9, 18, 1
+ vspltw 10, 18, 2
+ vspltw 11, 18, 3
+ vspltw 12, 19, 0
+ vadduwm 12, 12, 30 # increase counter
+ vspltw 13, 19, 1
+ vspltw 14, 19, 2
+ vspltw 15, 19, 3
+
+.align 5
+quarter_loop:
+ QT_loop_4x
+
+ bdnz quarter_loop
+
+ vadduwm 12, 12, 30
+ TP_4x 0, 1, 2, 3
+ TP_4x 4, 5, 6, 7
+ TP_4x 8, 9, 10, 11
+ TP_4x 12, 13, 14, 15
+
+ Add_state 0
+ Write_256 0
+ addi 14, 14, 256 # offset += 256
+ addi 15, 15, -256 # len += 256
+
+ # Update state counter
+ vspltisw 25, 4
+ vadduwm 30, 30, 25
+
+ cmpdi 15, 0
+ beq Out_loop
+ cmpdi 15, 256
+ blt Out_loop
+
+ mtctr 8
+ b Loop_4x
+
+Out_loop:
+ RESTORE_REGS
+ blr
+
+Out_no_chacha:
+ li 3, 0
+ blr
+SYM_FUNC_END(chacha_p10le_8x)
+
+SYM_DATA_START_LOCAL(PERMX)
+.align 5
+permx:
+.long 0x22330011, 0x66774455, 0xaabb8899, 0xeeffccdd
+.long 0x11223300, 0x55667744, 0x99aabb88, 0xddeeffcc
+SYM_DATA_END(PERMX)
diff --git a/arch/powerpc/crypto/poly1305-p10-glue.c b/arch/powerpc/crypto/poly1305-p10-glue.c
new file mode 100644
index 000000000000..95dd708573ee
--- /dev/null
+++ b/arch/powerpc/crypto/poly1305-p10-glue.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Poly1305 authenticator algorithm, RFC7539.
+ *
+ * Copyright 2023- IBM Corp. All rights reserved.
+ */
+
+#include <crypto/algapi.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jump_label.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/poly1305.h>
+#include <crypto/internal/simd.h>
+#include <linux/cpufeature.h>
+#include <asm/unaligned.h>
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+
+asmlinkage void poly1305_p10le_4blocks(void *h, const u8 *m, u32 mlen);
+asmlinkage void poly1305_64s(void *h, const u8 *m, u32 mlen, int highbit);
+asmlinkage void poly1305_emit_64(void *h, void *s, u8 *dst);
+
+static void vsx_begin(void)
+{
+ preempt_disable();
+ enable_kernel_vsx();
+}
+
+static void vsx_end(void)
+{
+ disable_kernel_vsx();
+ preempt_enable();
+}
+
+static int crypto_poly1305_p10_init(struct shash_desc *desc)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ poly1305_core_init(&dctx->h);
+ dctx->buflen = 0;
+ dctx->rset = 0;
+ dctx->sset = false;
+
+ return 0;
+}
+
+static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx,
+ const u8 *inp, unsigned int len)
+{
+ unsigned int acc = 0;
+
+ if (unlikely(!dctx->sset)) {
+ if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) {
+ struct poly1305_core_key *key = &dctx->core_r;
+
+ key->key.r64[0] = get_unaligned_le64(&inp[0]);
+ key->key.r64[1] = get_unaligned_le64(&inp[8]);
+ inp += POLY1305_BLOCK_SIZE;
+ len -= POLY1305_BLOCK_SIZE;
+ acc += POLY1305_BLOCK_SIZE;
+ dctx->rset = 1;
+ }
+ if (len >= POLY1305_BLOCK_SIZE) {
+ dctx->s[0] = get_unaligned_le32(&inp[0]);
+ dctx->s[1] = get_unaligned_le32(&inp[4]);
+ dctx->s[2] = get_unaligned_le32(&inp[8]);
+ dctx->s[3] = get_unaligned_le32(&inp[12]);
+ acc += POLY1305_BLOCK_SIZE;
+ dctx->sset = true;
+ }
+ }
+ return acc;
+}
+
+static int crypto_poly1305_p10_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+ unsigned int bytes, used;
+
+ if (unlikely(dctx->buflen)) {
+ bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
+ memcpy(dctx->buf + dctx->buflen, src, bytes);
+ src += bytes;
+ srclen -= bytes;
+ dctx->buflen += bytes;
+
+ if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+ if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf,
+ POLY1305_BLOCK_SIZE))) {
+ vsx_begin();
+ poly1305_64s(&dctx->h, dctx->buf,
+ POLY1305_BLOCK_SIZE, 1);
+ vsx_end();
+ }
+ dctx->buflen = 0;
+ }
+ }
+
+ if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
+ bytes = round_down(srclen, POLY1305_BLOCK_SIZE);
+ used = crypto_poly1305_setdctxkey(dctx, src, bytes);
+ if (likely(used)) {
+ srclen -= used;
+ src += used;
+ }
+ if (crypto_simd_usable() && (srclen >= POLY1305_BLOCK_SIZE*4)) {
+ vsx_begin();
+ poly1305_p10le_4blocks(&dctx->h, src, srclen);
+ vsx_end();
+ src += srclen - (srclen % (POLY1305_BLOCK_SIZE * 4));
+ srclen %= POLY1305_BLOCK_SIZE * 4;
+ }
+ while (srclen >= POLY1305_BLOCK_SIZE) {
+ vsx_begin();
+ poly1305_64s(&dctx->h, src, POLY1305_BLOCK_SIZE, 1);
+ vsx_end();
+ srclen -= POLY1305_BLOCK_SIZE;
+ src += POLY1305_BLOCK_SIZE;
+ }
+ }
+
+ if (unlikely(srclen)) {
+ dctx->buflen = srclen;
+ memcpy(dctx->buf, src, srclen);
+ }
+
+ return 0;
+}
+
+static int crypto_poly1305_p10_final(struct shash_desc *desc, u8 *dst)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ if (unlikely(!dctx->sset))
+ return -ENOKEY;
+
+ if ((dctx->buflen)) {
+ dctx->buf[dctx->buflen++] = 1;
+ memset(dctx->buf + dctx->buflen, 0,
+ POLY1305_BLOCK_SIZE - dctx->buflen);
+ vsx_begin();
+ poly1305_64s(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
+ vsx_end();
+ dctx->buflen = 0;
+ }
+
+ poly1305_emit_64(&dctx->h, &dctx->s, dst);
+ return 0;
+}
+
+static struct shash_alg poly1305_alg = {
+ .digestsize = POLY1305_DIGEST_SIZE,
+ .init = crypto_poly1305_p10_init,
+ .update = crypto_poly1305_p10_update,
+ .final = crypto_poly1305_p10_final,
+ .descsize = sizeof(struct poly1305_desc_ctx),
+ .base = {
+ .cra_name = "poly1305",
+ .cra_driver_name = "poly1305-p10",
+ .cra_priority = 300,
+ .cra_blocksize = POLY1305_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ },
+};
+
+static int __init poly1305_p10_init(void)
+{
+ return crypto_register_shash(&poly1305_alg);
+}
+
+static void __exit poly1305_p10_exit(void)
+{
+ crypto_unregister_shash(&poly1305_alg);
+}
+
+module_cpu_feature_match(PPC_MODULE_FEATURE_P10, poly1305_p10_init);
+module_exit(poly1305_p10_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
+MODULE_DESCRIPTION("Optimized Poly1305 for P10");
+MODULE_ALIAS_CRYPTO("poly1305");
+MODULE_ALIAS_CRYPTO("poly1305-p10");
diff --git a/arch/powerpc/crypto/poly1305-p10le_64.S b/arch/powerpc/crypto/poly1305-p10le_64.S
new file mode 100644
index 000000000000..a3c1987f1ecd
--- /dev/null
+++ b/arch/powerpc/crypto/poly1305-p10le_64.S
@@ -0,0 +1,1075 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#
+# Accelerated poly1305 implementation for ppc64le.
+#
+# Copyright 2023- IBM Corp. All rights reserved
+#
+#===================================================================================
+# Written by Danny Tsen <dtsen@us.ibm.com>
+#
+# Poly1305 - this version mainly using vector/VSX/Scalar
+# - 26 bits limbs
+# - Handle multiple 64 byte blcok.
+#
+# Block size 16 bytes
+# key = (r, s)
+# clamp r &= 0x0FFFFFFC0FFFFFFC 0x0FFFFFFC0FFFFFFF
+# p = 2^130 - 5
+# a += m
+# a = (r + a) % p
+# a += s
+#
+# Improve performance by breaking down polynominal to the sum of products with
+# h4 = m1 * r⁴ + m2 * r³ + m3 * r² + m4 * r
+#
+# 07/22/21 - this revison based on the above sum of products. Setup r^4, r^3, r^2, r and s3, s2, s1, s0
+# to 9 vectors for multiplications.
+#
+# setup r^4, r^3, r^2, r vectors
+# vs [r^1, r^3, r^2, r^4]
+# vs0 = [r0,.....]
+# vs1 = [r1,.....]
+# vs2 = [r2,.....]
+# vs3 = [r3,.....]
+# vs4 = [r4,.....]
+# vs5 = [r1*5,...]
+# vs6 = [r2*5,...]
+# vs7 = [r2*5,...]
+# vs8 = [r4*5,...]
+#
+# Each word in a vector consists a member of a "r/s" in [a * r/s].
+#
+# r0, r4*5, r3*5, r2*5, r1*5;
+# r1, r0, r4*5, r3*5, r2*5;
+# r2, r1, r0, r4*5, r3*5;
+# r3, r2, r1, r0, r4*5;
+# r4, r3, r2, r1, r0 ;
+#
+#
+# poly1305_p10le_4blocks( uint8_t *k, uint32_t mlen, uint8_t *m)
+# k = 32 bytes key
+# r3 = k (r, s)
+# r4 = mlen
+# r5 = m
+#
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
+#include <linux/linkage.h>
+
+.machine "any"
+
+.text
+
+.macro SAVE_GPR GPR OFFSET FRAME
+ std \GPR,\OFFSET(\FRAME)
+.endm
+
+.macro SAVE_VRS VRS OFFSET FRAME
+ li 16, \OFFSET
+ stvx \VRS, 16, \FRAME
+.endm
+
+.macro SAVE_VSX VSX OFFSET FRAME
+ li 16, \OFFSET
+ stxvx \VSX, 16, \FRAME
+.endm
+
+.macro RESTORE_GPR GPR OFFSET FRAME
+ ld \GPR,\OFFSET(\FRAME)
+.endm
+
+.macro RESTORE_VRS VRS OFFSET FRAME
+ li 16, \OFFSET
+ lvx \VRS, 16, \FRAME
+.endm
+
+.macro RESTORE_VSX VSX OFFSET FRAME
+ li 16, \OFFSET
+ lxvx \VSX, 16, \FRAME
+.endm
+
+.macro SAVE_REGS
+ mflr 0
+ std 0, 16(1)
+ stdu 1,-752(1)
+
+ SAVE_GPR 14, 112, 1
+ SAVE_GPR 15, 120, 1
+ SAVE_GPR 16, 128, 1
+ SAVE_GPR 17, 136, 1
+ SAVE_GPR 18, 144, 1
+ SAVE_GPR 19, 152, 1
+ SAVE_GPR 20, 160, 1
+ SAVE_GPR 21, 168, 1
+ SAVE_GPR 22, 176, 1
+ SAVE_GPR 23, 184, 1
+ SAVE_GPR 24, 192, 1
+ SAVE_GPR 25, 200, 1
+ SAVE_GPR 26, 208, 1
+ SAVE_GPR 27, 216, 1
+ SAVE_GPR 28, 224, 1
+ SAVE_GPR 29, 232, 1
+ SAVE_GPR 30, 240, 1
+ SAVE_GPR 31, 248, 1
+
+ addi 9, 1, 256
+ SAVE_VRS 20, 0, 9
+ SAVE_VRS 21, 16, 9
+ SAVE_VRS 22, 32, 9
+ SAVE_VRS 23, 48, 9
+ SAVE_VRS 24, 64, 9
+ SAVE_VRS 25, 80, 9
+ SAVE_VRS 26, 96, 9
+ SAVE_VRS 27, 112, 9
+ SAVE_VRS 28, 128, 9
+ SAVE_VRS 29, 144, 9
+ SAVE_VRS 30, 160, 9
+ SAVE_VRS 31, 176, 9
+
+ SAVE_VSX 14, 192, 9
+ SAVE_VSX 15, 208, 9
+ SAVE_VSX 16, 224, 9
+ SAVE_VSX 17, 240, 9
+ SAVE_VSX 18, 256, 9
+ SAVE_VSX 19, 272, 9
+ SAVE_VSX 20, 288, 9
+ SAVE_VSX 21, 304, 9
+ SAVE_VSX 22, 320, 9
+ SAVE_VSX 23, 336, 9
+ SAVE_VSX 24, 352, 9
+ SAVE_VSX 25, 368, 9
+ SAVE_VSX 26, 384, 9
+ SAVE_VSX 27, 400, 9
+ SAVE_VSX 28, 416, 9
+ SAVE_VSX 29, 432, 9
+ SAVE_VSX 30, 448, 9
+ SAVE_VSX 31, 464, 9
+.endm # SAVE_REGS
+
+.macro RESTORE_REGS
+ addi 9, 1, 256
+ RESTORE_VRS 20, 0, 9
+ RESTORE_VRS 21, 16, 9
+ RESTORE_VRS 22, 32, 9
+ RESTORE_VRS 23, 48, 9
+ RESTORE_VRS 24, 64, 9
+ RESTORE_VRS 25, 80, 9
+ RESTORE_VRS 26, 96, 9
+ RESTORE_VRS 27, 112, 9
+ RESTORE_VRS 28, 128, 9
+ RESTORE_VRS 29, 144, 9
+ RESTORE_VRS 30, 160, 9
+ RESTORE_VRS 31, 176, 9
+
+ RESTORE_VSX 14, 192, 9
+ RESTORE_VSX 15, 208, 9
+ RESTORE_VSX 16, 224, 9
+ RESTORE_VSX 17, 240, 9
+ RESTORE_VSX 18, 256, 9
+ RESTORE_VSX 19, 272, 9
+ RESTORE_VSX 20, 288, 9
+ RESTORE_VSX 21, 304, 9
+ RESTORE_VSX 22, 320, 9
+ RESTORE_VSX 23, 336, 9
+ RESTORE_VSX 24, 352, 9
+ RESTORE_VSX 25, 368, 9
+ RESTORE_VSX 26, 384, 9
+ RESTORE_VSX 27, 400, 9
+ RESTORE_VSX 28, 416, 9
+ RESTORE_VSX 29, 432, 9
+ RESTORE_VSX 30, 448, 9
+ RESTORE_VSX 31, 464, 9
+
+ RESTORE_GPR 14, 112, 1
+ RESTORE_GPR 15, 120, 1
+ RESTORE_GPR 16, 128, 1
+ RESTORE_GPR 17, 136, 1
+ RESTORE_GPR 18, 144, 1
+ RESTORE_GPR 19, 152, 1
+ RESTORE_GPR 20, 160, 1
+ RESTORE_GPR 21, 168, 1
+ RESTORE_GPR 22, 176, 1
+ RESTORE_GPR 23, 184, 1
+ RESTORE_GPR 24, 192, 1
+ RESTORE_GPR 25, 200, 1
+ RESTORE_GPR 26, 208, 1
+ RESTORE_GPR 27, 216, 1
+ RESTORE_GPR 28, 224, 1
+ RESTORE_GPR 29, 232, 1
+ RESTORE_GPR 30, 240, 1
+ RESTORE_GPR 31, 248, 1
+
+ addi 1, 1, 752
+ ld 0, 16(1)
+ mtlr 0
+.endm # RESTORE_REGS
+
+#
+# p[0] = a0*r0 + a1*r4*5 + a2*r3*5 + a3*r2*5 + a4*r1*5;
+# p[1] = a0*r1 + a1*r0 + a2*r4*5 + a3*r3*5 + a4*r2*5;
+# p[2] = a0*r2 + a1*r1 + a2*r0 + a3*r4*5 + a4*r3*5;
+# p[3] = a0*r3 + a1*r2 + a2*r1 + a3*r0 + a4*r4*5;
+# p[4] = a0*r4 + a1*r3 + a2*r2 + a3*r1 + a4*r0 ;
+#
+# [r^2, r^3, r^1, r^4]
+# [m3, m2, m4, m1]
+#
+# multiply odd and even words
+.macro mul_odd
+ vmulouw 14, 4, 26
+ vmulouw 10, 5, 3
+ vmulouw 11, 6, 2
+ vmulouw 12, 7, 1
+ vmulouw 13, 8, 0
+ vmulouw 15, 4, 27
+ vaddudm 14, 14, 10
+ vaddudm 14, 14, 11
+ vmulouw 10, 5, 26
+ vmulouw 11, 6, 3
+ vaddudm 14, 14, 12
+ vaddudm 14, 14, 13 # x0
+ vaddudm 15, 15, 10
+ vaddudm 15, 15, 11
+ vmulouw 12, 7, 2
+ vmulouw 13, 8, 1
+ vaddudm 15, 15, 12
+ vaddudm 15, 15, 13 # x1
+ vmulouw 16, 4, 28
+ vmulouw 10, 5, 27
+ vmulouw 11, 6, 26
+ vaddudm 16, 16, 10
+ vaddudm 16, 16, 11
+ vmulouw 12, 7, 3
+ vmulouw 13, 8, 2
+ vaddudm 16, 16, 12
+ vaddudm 16, 16, 13 # x2
+ vmulouw 17, 4, 29
+ vmulouw 10, 5, 28
+ vmulouw 11, 6, 27
+ vaddudm 17, 17, 10
+ vaddudm 17, 17, 11
+ vmulouw 12, 7, 26
+ vmulouw 13, 8, 3
+ vaddudm 17, 17, 12
+ vaddudm 17, 17, 13 # x3
+ vmulouw 18, 4, 30
+ vmulouw 10, 5, 29
+ vmulouw 11, 6, 28
+ vaddudm 18, 18, 10
+ vaddudm 18, 18, 11
+ vmulouw 12, 7, 27
+ vmulouw 13, 8, 26
+ vaddudm 18, 18, 12
+ vaddudm 18, 18, 13 # x4
+.endm
+
+.macro mul_even
+ vmuleuw 9, 4, 26
+ vmuleuw 10, 5, 3
+ vmuleuw 11, 6, 2
+ vmuleuw 12, 7, 1
+ vmuleuw 13, 8, 0
+ vaddudm 14, 14, 9
+ vaddudm 14, 14, 10
+ vaddudm 14, 14, 11
+ vaddudm 14, 14, 12
+ vaddudm 14, 14, 13 # x0
+
+ vmuleuw 9, 4, 27
+ vmuleuw 10, 5, 26
+ vmuleuw 11, 6, 3
+ vmuleuw 12, 7, 2
+ vmuleuw 13, 8, 1
+ vaddudm 15, 15, 9
+ vaddudm 15, 15, 10
+ vaddudm 15, 15, 11
+ vaddudm 15, 15, 12
+ vaddudm 15, 15, 13 # x1
+
+ vmuleuw 9, 4, 28
+ vmuleuw 10, 5, 27
+ vmuleuw 11, 6, 26
+ vmuleuw 12, 7, 3
+ vmuleuw 13, 8, 2
+ vaddudm 16, 16, 9
+ vaddudm 16, 16, 10
+ vaddudm 16, 16, 11
+ vaddudm 16, 16, 12
+ vaddudm 16, 16, 13 # x2
+
+ vmuleuw 9, 4, 29
+ vmuleuw 10, 5, 28
+ vmuleuw 11, 6, 27
+ vmuleuw 12, 7, 26
+ vmuleuw 13, 8, 3
+ vaddudm 17, 17, 9
+ vaddudm 17, 17, 10
+ vaddudm 17, 17, 11
+ vaddudm 17, 17, 12
+ vaddudm 17, 17, 13 # x3
+
+ vmuleuw 9, 4, 30
+ vmuleuw 10, 5, 29
+ vmuleuw 11, 6, 28
+ vmuleuw 12, 7, 27
+ vmuleuw 13, 8, 26
+ vaddudm 18, 18, 9
+ vaddudm 18, 18, 10
+ vaddudm 18, 18, 11
+ vaddudm 18, 18, 12
+ vaddudm 18, 18, 13 # x4
+.endm
+
+#
+# poly1305_setup_r
+#
+# setup r^4, r^3, r^2, r vectors
+# [r, r^3, r^2, r^4]
+# vs0 = [r0,...]
+# vs1 = [r1,...]
+# vs2 = [r2,...]
+# vs3 = [r3,...]
+# vs4 = [r4,...]
+# vs5 = [r4*5,...]
+# vs6 = [r3*5,...]
+# vs7 = [r2*5,...]
+# vs8 = [r1*5,...]
+#
+# r0, r4*5, r3*5, r2*5, r1*5;
+# r1, r0, r4*5, r3*5, r2*5;
+# r2, r1, r0, r4*5, r3*5;
+# r3, r2, r1, r0, r4*5;
+# r4, r3, r2, r1, r0 ;
+#
+.macro poly1305_setup_r
+
+ # save r
+ xxlor 26, 58, 58
+ xxlor 27, 59, 59
+ xxlor 28, 60, 60
+ xxlor 29, 61, 61
+ xxlor 30, 62, 62
+
+ xxlxor 31, 31, 31
+
+# [r, r^3, r^2, r^4]
+ # compute r^2
+ vmr 4, 26
+ vmr 5, 27
+ vmr 6, 28
+ vmr 7, 29
+ vmr 8, 30
+ bl do_mul # r^2 r^1
+ xxpermdi 58, 58, 36, 0x3 # r0
+ xxpermdi 59, 59, 37, 0x3 # r1
+ xxpermdi 60, 60, 38, 0x3 # r2
+ xxpermdi 61, 61, 39, 0x3 # r3
+ xxpermdi 62, 62, 40, 0x3 # r4
+ xxpermdi 36, 36, 36, 0x3
+ xxpermdi 37, 37, 37, 0x3
+ xxpermdi 38, 38, 38, 0x3
+ xxpermdi 39, 39, 39, 0x3
+ xxpermdi 40, 40, 40, 0x3
+ vspltisb 13, 2
+ vsld 9, 27, 13
+ vsld 10, 28, 13
+ vsld 11, 29, 13
+ vsld 12, 30, 13
+ vaddudm 0, 9, 27
+ vaddudm 1, 10, 28
+ vaddudm 2, 11, 29
+ vaddudm 3, 12, 30
+
+ bl do_mul # r^4 r^3
+ vmrgow 26, 26, 4
+ vmrgow 27, 27, 5
+ vmrgow 28, 28, 6
+ vmrgow 29, 29, 7
+ vmrgow 30, 30, 8
+ vspltisb 13, 2
+ vsld 9, 27, 13
+ vsld 10, 28, 13
+ vsld 11, 29, 13
+ vsld 12, 30, 13
+ vaddudm 0, 9, 27
+ vaddudm 1, 10, 28
+ vaddudm 2, 11, 29
+ vaddudm 3, 12, 30
+
+ # r^2 r^4
+ xxlor 0, 58, 58
+ xxlor 1, 59, 59
+ xxlor 2, 60, 60
+ xxlor 3, 61, 61
+ xxlor 4, 62, 62
+ xxlor 5, 32, 32
+ xxlor 6, 33, 33
+ xxlor 7, 34, 34
+ xxlor 8, 35, 35
+
+ vspltw 9, 26, 3
+ vspltw 10, 26, 2
+ vmrgow 26, 10, 9
+ vspltw 9, 27, 3
+ vspltw 10, 27, 2
+ vmrgow 27, 10, 9
+ vspltw 9, 28, 3
+ vspltw 10, 28, 2
+ vmrgow 28, 10, 9
+ vspltw 9, 29, 3
+ vspltw 10, 29, 2
+ vmrgow 29, 10, 9
+ vspltw 9, 30, 3
+ vspltw 10, 30, 2
+ vmrgow 30, 10, 9
+
+ vsld 9, 27, 13
+ vsld 10, 28, 13
+ vsld 11, 29, 13
+ vsld 12, 30, 13
+ vaddudm 0, 9, 27
+ vaddudm 1, 10, 28
+ vaddudm 2, 11, 29
+ vaddudm 3, 12, 30
+.endm
+
+SYM_FUNC_START_LOCAL(do_mul)
+ mul_odd
+
+ # do reduction ( h %= p )
+ # carry reduction
+ vspltisb 9, 2
+ vsrd 10, 14, 31
+ vsrd 11, 17, 31
+ vand 7, 17, 25
+ vand 4, 14, 25
+ vaddudm 18, 18, 11
+ vsrd 12, 18, 31
+ vaddudm 15, 15, 10
+
+ vsrd 11, 15, 31
+ vand 8, 18, 25
+ vand 5, 15, 25
+ vaddudm 4, 4, 12
+ vsld 10, 12, 9
+ vaddudm 6, 16, 11
+
+ vsrd 13, 6, 31
+ vand 6, 6, 25
+ vaddudm 4, 4, 10
+ vsrd 10, 4, 31
+ vaddudm 7, 7, 13
+
+ vsrd 11, 7, 31
+ vand 7, 7, 25
+ vand 4, 4, 25
+ vaddudm 5, 5, 10
+ vaddudm 8, 8, 11
+ blr
+SYM_FUNC_END(do_mul)
+
+#
+# init key
+#
+.macro do_poly1305_init
+ addis 10, 2, rmask@toc@ha
+ addi 10, 10, rmask@toc@l
+
+ ld 11, 0(10)
+ ld 12, 8(10)
+
+ li 14, 16
+ li 15, 32
+ addis 10, 2, cnum@toc@ha
+ addi 10, 10, cnum@toc@l
+ lvx 25, 0, 10 # v25 - mask
+ lvx 31, 14, 10 # v31 = 1a
+ lvx 19, 15, 10 # v19 = 1 << 24
+ lxv 24, 48(10) # vs24
+ lxv 25, 64(10) # vs25
+
+ # initialize
+ # load key from r3 to vectors
+ ld 9, 24(3)
+ ld 10, 32(3)
+ and. 9, 9, 11
+ and. 10, 10, 12
+
+ # break 26 bits
+ extrdi 14, 9, 26, 38
+ extrdi 15, 9, 26, 12
+ extrdi 16, 9, 12, 0
+ mtvsrdd 58, 0, 14
+ insrdi 16, 10, 14, 38
+ mtvsrdd 59, 0, 15
+ extrdi 17, 10, 26, 24
+ mtvsrdd 60, 0, 16
+ extrdi 18, 10, 24, 0
+ mtvsrdd 61, 0, 17
+ mtvsrdd 62, 0, 18
+
+ # r1 = r1 * 5, r2 = r2 * 5, r3 = r3 * 5, r4 = r4 * 5
+ li 9, 5
+ mtvsrdd 36, 0, 9
+ vmulouw 0, 27, 4 # v0 = rr0
+ vmulouw 1, 28, 4 # v1 = rr1
+ vmulouw 2, 29, 4 # v2 = rr2
+ vmulouw 3, 30, 4 # v3 = rr3
+.endm
+
+#
+# poly1305_p10le_4blocks( uint8_t *k, uint32_t mlen, uint8_t *m)
+# k = 32 bytes key
+# r3 = k (r, s)
+# r4 = mlen
+# r5 = m
+#
+SYM_FUNC_START(poly1305_p10le_4blocks)
+.align 5
+ cmpdi 5, 64
+ blt Out_no_poly1305
+
+ SAVE_REGS
+
+ do_poly1305_init
+
+ li 21, 0 # counter to message
+
+ poly1305_setup_r
+
+ # load previous H state
+ # break/convert r6 to 26 bits
+ ld 9, 0(3)
+ ld 10, 8(3)
+ ld 19, 16(3)
+ sldi 19, 19, 24
+ mtvsrdd 41, 0, 19
+ extrdi 14, 9, 26, 38
+ extrdi 15, 9, 26, 12
+ extrdi 16, 9, 12, 0
+ mtvsrdd 36, 0, 14
+ insrdi 16, 10, 14, 38
+ mtvsrdd 37, 0, 15
+ extrdi 17, 10, 26, 24
+ mtvsrdd 38, 0, 16
+ extrdi 18, 10, 24, 0
+ mtvsrdd 39, 0, 17
+ mtvsrdd 40, 0, 18
+ vor 8, 8, 9
+
+ # input m1 m2
+ add 20, 4, 21
+ xxlor 49, 24, 24
+ xxlor 50, 25, 25
+ lxvw4x 43, 0, 20
+ addi 17, 20, 16
+ lxvw4x 44, 0, 17
+ vperm 14, 11, 12, 17
+ vperm 15, 11, 12, 18
+ vand 9, 14, 25 # a0
+ vsrd 10, 14, 31 # >> 26
+ vsrd 11, 10, 31 # 12 bits left
+ vand 10, 10, 25 # a1
+ vspltisb 13, 12
+ vand 16, 15, 25
+ vsld 12, 16, 13
+ vor 11, 11, 12
+ vand 11, 11, 25 # a2
+ vspltisb 13, 14
+ vsrd 12, 15, 13 # >> 14
+ vsrd 13, 12, 31 # >> 26, a4
+ vand 12, 12, 25 # a3
+
+ vaddudm 20, 4, 9
+ vaddudm 21, 5, 10
+ vaddudm 22, 6, 11
+ vaddudm 23, 7, 12
+ vaddudm 24, 8, 13
+
+ # m3 m4
+ addi 17, 17, 16
+ lxvw4x 43, 0, 17
+ addi 17, 17, 16
+ lxvw4x 44, 0, 17
+ vperm 14, 11, 12, 17
+ vperm 15, 11, 12, 18
+ vand 9, 14, 25 # a0
+ vsrd 10, 14, 31 # >> 26
+ vsrd 11, 10, 31 # 12 bits left
+ vand 10, 10, 25 # a1
+ vspltisb 13, 12
+ vand 16, 15, 25
+ vsld 12, 16, 13
+ vspltisb 13, 14
+ vor 11, 11, 12
+ vand 11, 11, 25 # a2
+ vsrd 12, 15, 13 # >> 14
+ vsrd 13, 12, 31 # >> 26, a4
+ vand 12, 12, 25 # a3
+
+ # Smash 4 message blocks into 5 vectors of [m4, m2, m3, m1]
+ vmrgow 4, 9, 20
+ vmrgow 5, 10, 21
+ vmrgow 6, 11, 22
+ vmrgow 7, 12, 23
+ vmrgow 8, 13, 24
+ vaddudm 8, 8, 19
+
+ addi 5, 5, -64 # len -= 64
+ addi 21, 21, 64 # offset += 64
+
+ li 9, 64
+ divdu 31, 5, 9
+
+ cmpdi 31, 0
+ ble Skip_block_loop
+
+ mtctr 31
+
+# h4 = m1 * r⁴ + m2 * r³ + m3 * r² + m4 * r
+# Rewrite the polynominal sum of product as follows,
+# h1 = (h0 + m1) * r^2, h2 = (h0 + m2) * r^2
+# h3 = (h1 + m3) * r^2, h4 = (h2 + m4) * r^2 --> (h0 + m1) r*4 + (h3 + m3) r^2, (h0 + m2) r^4 + (h0 + m4) r^2
+# .... Repeat
+# h5 = (h3 + m5) * r^2, h6 = (h4 + m6) * r^2 -->
+# h7 = (h5 + m7) * r^2, h8 = (h6 + m8) * r^1 --> m5 * r^4 + m6 * r^3 + m7 * r^2 + m8 * r
+#
+loop_4blocks:
+
+ # Multiply odd words and even words
+ mul_odd
+ mul_even
+ # carry reduction
+ vspltisb 9, 2
+ vsrd 10, 14, 31
+ vsrd 11, 17, 31
+ vand 7, 17, 25
+ vand 4, 14, 25
+ vaddudm 18, 18, 11
+ vsrd 12, 18, 31
+ vaddudm 15, 15, 10
+
+ vsrd 11, 15, 31
+ vand 8, 18, 25
+ vand 5, 15, 25
+ vaddudm 4, 4, 12
+ vsld 10, 12, 9
+ vaddudm 6, 16, 11
+
+ vsrd 13, 6, 31
+ vand 6, 6, 25
+ vaddudm 4, 4, 10
+ vsrd 10, 4, 31
+ vaddudm 7, 7, 13
+
+ vsrd 11, 7, 31
+ vand 7, 7, 25
+ vand 4, 4, 25
+ vaddudm 5, 5, 10
+ vaddudm 8, 8, 11
+
+ # input m1 m2 m3 m4
+ add 20, 4, 21
+ xxlor 49, 24, 24
+ xxlor 50, 25, 25
+ lxvw4x 43, 0, 20
+ addi 17, 20, 16
+ lxvw4x 44, 0, 17
+ vperm 14, 11, 12, 17
+ vperm 15, 11, 12, 18
+ addi 17, 17, 16
+ lxvw4x 43, 0, 17
+ addi 17, 17, 16
+ lxvw4x 44, 0, 17
+ vperm 17, 11, 12, 17
+ vperm 18, 11, 12, 18
+
+ vand 20, 14, 25 # a0
+ vand 9, 17, 25 # a0
+ vsrd 21, 14, 31 # >> 26
+ vsrd 22, 21, 31 # 12 bits left
+ vsrd 10, 17, 31 # >> 26
+ vsrd 11, 10, 31 # 12 bits left
+
+ vand 21, 21, 25 # a1
+ vand 10, 10, 25 # a1
+
+ vspltisb 13, 12
+ vand 16, 15, 25
+ vsld 23, 16, 13
+ vor 22, 22, 23
+ vand 22, 22, 25 # a2
+ vand 16, 18, 25
+ vsld 12, 16, 13
+ vor 11, 11, 12
+ vand 11, 11, 25 # a2
+ vspltisb 13, 14
+ vsrd 23, 15, 13 # >> 14
+ vsrd 24, 23, 31 # >> 26, a4
+ vand 23, 23, 25 # a3
+ vsrd 12, 18, 13 # >> 14
+ vsrd 13, 12, 31 # >> 26, a4
+ vand 12, 12, 25 # a3
+
+ vaddudm 4, 4, 20
+ vaddudm 5, 5, 21
+ vaddudm 6, 6, 22
+ vaddudm 7, 7, 23
+ vaddudm 8, 8, 24
+
+ # Smash 4 message blocks into 5 vectors of [m4, m2, m3, m1]
+ vmrgow 4, 9, 4
+ vmrgow 5, 10, 5
+ vmrgow 6, 11, 6
+ vmrgow 7, 12, 7
+ vmrgow 8, 13, 8
+ vaddudm 8, 8, 19
+
+ addi 5, 5, -64 # len -= 64
+ addi 21, 21, 64 # offset += 64
+
+ bdnz loop_4blocks
+
+Skip_block_loop:
+ xxlor 58, 0, 0
+ xxlor 59, 1, 1
+ xxlor 60, 2, 2
+ xxlor 61, 3, 3
+ xxlor 62, 4, 4
+ xxlor 32, 5, 5
+ xxlor 33, 6, 6
+ xxlor 34, 7, 7
+ xxlor 35, 8, 8
+
+ # Multiply odd words and even words
+ mul_odd
+ mul_even
+
+ # Sum the products.
+ xxpermdi 41, 31, 46, 0
+ xxpermdi 42, 31, 47, 0
+ vaddudm 4, 14, 9
+ xxpermdi 36, 31, 36, 3
+ vaddudm 5, 15, 10
+ xxpermdi 37, 31, 37, 3
+ xxpermdi 43, 31, 48, 0
+ vaddudm 6, 16, 11
+ xxpermdi 38, 31, 38, 3
+ xxpermdi 44, 31, 49, 0
+ vaddudm 7, 17, 12
+ xxpermdi 39, 31, 39, 3
+ xxpermdi 45, 31, 50, 0
+ vaddudm 8, 18, 13
+ xxpermdi 40, 31, 40, 3
+
+ # carry reduction
+ vspltisb 9, 2
+ vsrd 10, 4, 31
+ vsrd 11, 7, 31
+ vand 7, 7, 25
+ vand 4, 4, 25
+ vaddudm 8, 8, 11
+ vsrd 12, 8, 31
+ vaddudm 5, 5, 10
+
+ vsrd 11, 5, 31
+ vand 8, 8, 25
+ vand 5, 5, 25
+ vaddudm 4, 4, 12
+ vsld 10, 12, 9
+ vaddudm 6, 6, 11
+
+ vsrd 13, 6, 31
+ vand 6, 6, 25
+ vaddudm 4, 4, 10
+ vsrd 10, 4, 31
+ vaddudm 7, 7, 13
+
+ vsrd 11, 7, 31
+ vand 7, 7, 25
+ vand 4, 4, 25
+ vaddudm 5, 5, 10
+ vsrd 10, 5, 31
+ vand 5, 5, 25
+ vaddudm 6, 6, 10
+ vaddudm 8, 8, 11
+
+ b do_final_update
+
+do_final_update:
+ # combine 26 bit limbs
+ # v4, v5, v6, v7 and v8 are 26 bit vectors
+ vsld 5, 5, 31
+ vor 20, 4, 5
+ vspltisb 11, 12
+ vsrd 12, 6, 11
+ vsld 6, 6, 31
+ vsld 6, 6, 31
+ vor 20, 20, 6
+ vspltisb 11, 14
+ vsld 7, 7, 11
+ vor 21, 7, 12
+ mfvsrld 16, 40 # save last 2 bytes
+ vsld 8, 8, 11
+ vsld 8, 8, 31
+ vor 21, 21, 8
+ mfvsrld 17, 52
+ mfvsrld 19, 53
+ srdi 16, 16, 24
+
+ std 17, 0(3)
+ std 19, 8(3)
+ stw 16, 16(3)
+
+Out_loop:
+ li 3, 0
+
+ RESTORE_REGS
+
+ blr
+
+Out_no_poly1305:
+ li 3, 0
+ blr
+SYM_FUNC_END(poly1305_p10le_4blocks)
+
+#
+# =======================================================================
+# The following functions implement 64 x 64 bits multiplication poly1305.
+#
+SYM_FUNC_START_LOCAL(Poly1305_init_64)
+ # mask 0x0FFFFFFC0FFFFFFC
+ # mask 0x0FFFFFFC0FFFFFFF
+ addis 10, 2, rmask@toc@ha
+ addi 10, 10, rmask@toc@l
+ ld 11, 0(10)
+ ld 12, 8(10)
+
+ # initialize
+ # load key from r3
+ ld 9, 24(3)
+ ld 10, 32(3)
+ and. 9, 9, 11 # cramp mask r0
+ and. 10, 10, 12 # cramp mask r1
+
+ srdi 21, 10, 2
+ add 19, 21, 10 # s1: r19 - (r1 >> 2) *5
+
+ # setup r and s
+ li 25, 0
+ mtvsrdd 32+0, 9, 19 # r0, s1
+ mtvsrdd 32+1, 10, 9 # r1, r0
+ mtvsrdd 32+2, 19, 25 # s1
+ mtvsrdd 32+3, 9, 25 # r0
+
+ blr
+SYM_FUNC_END(Poly1305_init_64)
+
+# Poly1305_mult
+# v6 = (h0, h1), v8 = h2
+# v0 = (r0, s1), v1 = (r1, r0), v2 = s1, v3 = r0
+#
+# Output: v7, v10, v11
+#
+SYM_FUNC_START_LOCAL(Poly1305_mult)
+ #
+ # d0 = h0 * r0 + h1 * s1
+ vmsumudm 7, 6, 0, 9 # h0 * r0, h1 * s1
+
+ # d1 = h0 * r1 + h1 * r0 + h2 * s1
+ vmsumudm 11, 6, 1, 9 # h0 * r1, h1 * r0
+ vmsumudm 10, 8, 2, 11 # d1 += h2 * s1
+
+ # d2 = r0
+ vmsumudm 11, 8, 3, 9 # d2 = h2 * r0
+ blr
+SYM_FUNC_END(Poly1305_mult)
+
+#
+# carry reduction
+# h %=p
+#
+# Input: v7, v10, v11
+# Output: r27, r28, r29
+#
+SYM_FUNC_START_LOCAL(Carry_reduction)
+ mfvsrld 27, 32+7
+ mfvsrld 28, 32+10
+ mfvsrld 29, 32+11
+ mfvsrd 20, 32+7 # h0.h
+ mfvsrd 21, 32+10 # h1.h
+
+ addc 28, 28, 20
+ adde 29, 29, 21
+ srdi 22, 29, 0x2
+ sldi 23, 22, 0x2
+ add 23, 23, 22 # (h2 & 3) * 5
+ addc 27, 27, 23 # h0
+ addze 28, 28 # h1
+ andi. 29, 29, 0x3 # h2
+ blr
+SYM_FUNC_END(Carry_reduction)
+
+#
+# poly1305 multiplication
+# h *= r, h %= p
+# d0 = h0 * r0 + h1 * s1
+# d1 = h0 * r1 + h1 * r0 + h2 * s1
+# d2 = h0 * r0
+#
+#
+# unsigned int poly1305_test_64s(unisgned char *state, const byte *src, size_t len, highbit)
+# - no highbit if final leftover block (highbit = 0)
+#
+SYM_FUNC_START(poly1305_64s)
+ cmpdi 5, 0
+ ble Out_no_poly1305_64
+
+ mflr 0
+ std 0, 16(1)
+ stdu 1,-400(1)
+
+ SAVE_GPR 14, 112, 1
+ SAVE_GPR 15, 120, 1
+ SAVE_GPR 16, 128, 1
+ SAVE_GPR 17, 136, 1
+ SAVE_GPR 18, 144, 1
+ SAVE_GPR 19, 152, 1
+ SAVE_GPR 20, 160, 1
+ SAVE_GPR 21, 168, 1
+ SAVE_GPR 22, 176, 1
+ SAVE_GPR 23, 184, 1
+ SAVE_GPR 24, 192, 1
+ SAVE_GPR 25, 200, 1
+ SAVE_GPR 26, 208, 1
+ SAVE_GPR 27, 216, 1
+ SAVE_GPR 28, 224, 1
+ SAVE_GPR 29, 232, 1
+ SAVE_GPR 30, 240, 1
+ SAVE_GPR 31, 248, 1
+
+ # Init poly1305
+ bl Poly1305_init_64
+
+ li 25, 0 # offset to inp and outp
+
+ add 11, 25, 4
+
+ # load h
+ # h0, h1, h2?
+ ld 27, 0(3)
+ ld 28, 8(3)
+ lwz 29, 16(3)
+
+ li 30, 16
+ divdu 31, 5, 30
+
+ mtctr 31
+
+ mr 24, 6 # highbit
+
+Loop_block_64:
+ vxor 9, 9, 9
+
+ ld 20, 0(11)
+ ld 21, 8(11)
+ addi 11, 11, 16
+
+ addc 27, 27, 20
+ adde 28, 28, 21
+ adde 29, 29, 24
+
+ li 22, 0
+ mtvsrdd 32+6, 27, 28 # h0, h1
+ mtvsrdd 32+8, 29, 22 # h2
+
+ bl Poly1305_mult
+
+ bl Carry_reduction
+
+ bdnz Loop_block_64
+
+ std 27, 0(3)
+ std 28, 8(3)
+ stw 29, 16(3)
+
+ li 3, 0
+
+ RESTORE_GPR 14, 112, 1
+ RESTORE_GPR 15, 120, 1
+ RESTORE_GPR 16, 128, 1
+ RESTORE_GPR 17, 136, 1
+ RESTORE_GPR 18, 144, 1
+ RESTORE_GPR 19, 152, 1
+ RESTORE_GPR 20, 160, 1
+ RESTORE_GPR 21, 168, 1
+ RESTORE_GPR 22, 176, 1
+ RESTORE_GPR 23, 184, 1
+ RESTORE_GPR 24, 192, 1
+ RESTORE_GPR 25, 200, 1
+ RESTORE_GPR 26, 208, 1
+ RESTORE_GPR 27, 216, 1
+ RESTORE_GPR 28, 224, 1
+ RESTORE_GPR 29, 232, 1
+ RESTORE_GPR 30, 240, 1
+ RESTORE_GPR 31, 248, 1
+
+ addi 1, 1, 400
+ ld 0, 16(1)
+ mtlr 0
+
+ blr
+
+Out_no_poly1305_64:
+ li 3, 0
+ blr
+SYM_FUNC_END(poly1305_64s)
+
+#
+# Input: r3 = h, r4 = s, r5 = mac
+# mac = h + s
+#
+SYM_FUNC_START(poly1305_emit_64)
+ ld 10, 0(3)
+ ld 11, 8(3)
+ ld 12, 16(3)
+
+ # compare modulus
+ # h + 5 + (-p)
+ mr 6, 10
+ mr 7, 11
+ mr 8, 12
+ addic. 6, 6, 5
+ addze 7, 7
+ addze 8, 8
+ srdi 9, 8, 2 # overflow?
+ cmpdi 9, 0
+ beq Skip_h64
+ mr 10, 6
+ mr 11, 7
+ mr 12, 8
+
+Skip_h64:
+ ld 6, 0(4)
+ ld 7, 8(4)
+ addc 10, 10, 6
+ adde 11, 11, 7
+ addze 12, 12
+
+ std 10, 0(5)
+ std 11, 8(5)
+ blr
+SYM_FUNC_END(poly1305_emit_64)
+
+SYM_DATA_START_LOCAL(RMASK)
+.align 5
+rmask:
+.byte 0xff, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f
+cnum:
+.long 0x03ffffff, 0x00000000, 0x03ffffff, 0x00000000
+.long 0x1a, 0x00, 0x1a, 0x00
+.long 0x01000000, 0x01000000, 0x01000000, 0x01000000
+.long 0x00010203, 0x04050607, 0x10111213, 0x14151617
+.long 0x08090a0b, 0x0c0d0e0f, 0x18191a1b, 0x1c1d1e1f
+SYM_DATA_END(RMASK)
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 8c0b08b7a80e..20e50586e8a2 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -538,3 +538,4 @@
449 common futex_waitv sys_futex_waitv
450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c
index 0dc85556dec5..ec98e526167e 100644
--- a/arch/powerpc/mm/book3s64/subpage_prot.c
+++ b/arch/powerpc/mm/book3s64/subpage_prot.c
@@ -145,6 +145,7 @@ static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
static const struct mm_walk_ops subpage_walk_ops = {
.pmd_entry = subpage_walk_pmd_entry,
+ .walk_lock = PGWALK_WRLOCK_VERIFY,
};
static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index ea807aa0c31a..38c5be34c895 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -86,7 +86,7 @@ spufs_new_inode(struct super_block *sb, umode_t mode)
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
out:
return inode;
}
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 10e7a7ad175a..bea7b73e895d 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -580,15 +580,15 @@ config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
and Zifencei are supported in binutils from version 2.36 onwards.
To make life easier, and avoid forcing toolchains that default to a
newer ISA spec to version 2.2, relax the check to binutils >= 2.36.
- For clang < 17 or GCC < 11.1.0, for which this is not possible, this is
- dealt with in CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC.
+ For clang < 17 or GCC < 11.3.0, for which this is not possible or need
+ special treatment, this is dealt with in TOOLCHAIN_NEEDS_OLD_ISA_SPEC.
config TOOLCHAIN_NEEDS_OLD_ISA_SPEC
def_bool y
depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
# https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
- # https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=b03be74bad08c382da47e048007a78fa3fb4ef49
- depends on (CC_IS_CLANG && CLANG_VERSION < 170000) || (CC_IS_GCC && GCC_VERSION < 110100)
+ # https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=d29f5d6ab513c52fd872f532c492e35ae9fd6671
+ depends on (CC_IS_CLANG && CLANG_VERSION < 170000) || (CC_IS_GCC && GCC_VERSION < 110300)
help
Certain versions of clang and GCC do not support zicsr and zifencei via
-march. This option causes an older ISA spec compatible with these older
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
index 29e9a0d84b16..8a6a128ec57f 100644
--- a/arch/riscv/include/asm/efi.h
+++ b/arch/riscv/include/asm/efi.h
@@ -21,12 +21,6 @@ extern void efi_init(void);
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool);
-#define arch_efi_call_virt_setup() ({ \
- sync_kernel_mappings(efi_mm.pgd); \
- efi_virtmap_load(); \
- })
-#define arch_efi_call_virt_teardown() efi_virtmap_unload()
-
#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
/* Load initrd anywhere in system RAM */
@@ -46,8 +40,8 @@ static inline unsigned long efi_get_kimg_min_align(void)
#define EFI_KIMG_PREFERRED_ADDRESS efi_get_kimg_min_align()
-void efi_virtmap_load(void);
-void efi_virtmap_unload(void);
+void arch_efi_call_virt_setup(void);
+void arch_efi_call_virt_teardown(void);
unsigned long stext_offset(void);
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index 3d78930cab51..c5ee07b3df07 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -70,8 +70,9 @@ static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest)
"csrr %1, " __stringify(CSR_VTYPE) "\n\t"
"csrr %2, " __stringify(CSR_VL) "\n\t"
"csrr %3, " __stringify(CSR_VCSR) "\n\t"
+ "csrr %4, " __stringify(CSR_VLENB) "\n\t"
: "=r" (dest->vstart), "=r" (dest->vtype), "=r" (dest->vl),
- "=r" (dest->vcsr) : :);
+ "=r" (dest->vcsr), "=r" (dest->vlenb) : :);
}
static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src)
diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h
index e17c550986a6..283800130614 100644
--- a/arch/riscv/include/uapi/asm/ptrace.h
+++ b/arch/riscv/include/uapi/asm/ptrace.h
@@ -97,6 +97,7 @@ struct __riscv_v_ext_state {
unsigned long vl;
unsigned long vtype;
unsigned long vcsr;
+ unsigned long vlenb;
void *datap;
/*
* In signal handler, datap will be set a correct user stack offset
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 1d572cf3140f..487303e3ef22 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -25,9 +25,6 @@ enum riscv_regset {
#ifdef CONFIG_FPU
REGSET_F,
#endif
-#ifdef CONFIG_RISCV_ISA_V
- REGSET_V,
-#endif
};
static int riscv_gpr_get(struct task_struct *target,
@@ -84,61 +81,6 @@ static int riscv_fpr_set(struct task_struct *target,
}
#endif
-#ifdef CONFIG_RISCV_ISA_V
-static int riscv_vr_get(struct task_struct *target,
- const struct user_regset *regset,
- struct membuf to)
-{
- struct __riscv_v_ext_state *vstate = &target->thread.vstate;
-
- if (!riscv_v_vstate_query(task_pt_regs(target)))
- return -EINVAL;
-
- /*
- * Ensure the vector registers have been saved to the memory before
- * copying them to membuf.
- */
- if (target == current)
- riscv_v_vstate_save(current, task_pt_regs(current));
-
- /* Copy vector header from vstate. */
- membuf_write(&to, vstate, offsetof(struct __riscv_v_ext_state, datap));
- membuf_zero(&to, sizeof(vstate->datap));
-
- /* Copy all the vector registers from vstate. */
- return membuf_write(&to, vstate->datap, riscv_v_vsize);
-}
-
-static int riscv_vr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret, size;
- struct __riscv_v_ext_state *vstate = &target->thread.vstate;
-
- if (!riscv_v_vstate_query(task_pt_regs(target)))
- return -EINVAL;
-
- /* Copy rest of the vstate except datap */
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate, 0,
- offsetof(struct __riscv_v_ext_state, datap));
- if (unlikely(ret))
- return ret;
-
- /* Skip copy datap. */
- size = sizeof(vstate->datap);
- count -= size;
- ubuf += size;
-
- /* Copy all the vector registers. */
- pos = 0;
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate->datap,
- 0, riscv_v_vsize);
- return ret;
-}
-#endif
-
static const struct user_regset riscv_user_regset[] = {
[REGSET_X] = {
.core_note_type = NT_PRSTATUS,
@@ -158,17 +100,6 @@ static const struct user_regset riscv_user_regset[] = {
.set = riscv_fpr_set,
},
#endif
-#ifdef CONFIG_RISCV_ISA_V
- [REGSET_V] = {
- .core_note_type = NT_RISCV_VECTOR,
- .align = 16,
- .n = ((32 * RISCV_MAX_VLENB) +
- sizeof(struct __riscv_v_ext_state)) / sizeof(__u32),
- .size = sizeof(__u32),
- .regset_get = riscv_vr_get,
- .set = riscv_vr_set,
- },
-#endif
};
static const struct user_regset_view riscv_user_native_view = {
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index ea3d61de065b..161d0b34c2cb 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -102,6 +102,7 @@ static const struct mm_walk_ops pageattr_ops = {
.pmd_entry = pageattr_pmd_entry,
.pte_entry = pageattr_pte_entry,
.pte_hole = pageattr_pte_hole,
+ .walk_lock = PGWALK_RDLOCK,
};
static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index 76e362277179..8e4d74f51115 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -3,7 +3,7 @@ obj-y += kernel/
obj-y += mm/
obj-$(CONFIG_KVM) += kvm/
obj-y += crypto/
-obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
+obj-$(CONFIG_S390_HYPFS) += hypfs/
obj-$(CONFIG_APPLDATA_BASE) += appldata/
obj-y += net/
obj-$(CONFIG_PCI) += pci/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 5b39918b7042..18bf754e1fad 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -174,6 +174,7 @@ config S390
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUNCTION_ERROR_INJECTION
+ select HAVE_FUNCTION_GRAPH_RETVAL
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
@@ -512,6 +513,17 @@ config KEXEC_SIG
verification for the corresponding kernel image type being
loaded in order for this to work.
+config CERT_STORE
+ bool "Get user certificates via DIAG320"
+ depends on KEYS
+ select CRYPTO_LIB_SHA256
+ help
+ Enable this option if you want to access user-provided secure boot
+ certificates via DIAG 0x320.
+
+ These certificates will be made available via the keyring named
+ 'cert_store'.
+
config KERNEL_NOBP
def_bool n
prompt "Enable modified branch prediction for the kernel by default"
@@ -743,9 +755,9 @@ config CRASH_DUMP
Crash dump kernels are loaded in the main kernel with kexec-tools
into a specially reserved region and then later executed after
a crash by kdump/kexec.
- Refer to <file:Documentation/s390/zfcpdump.rst> for more details on this.
+ Refer to <file:Documentation/arch/s390/zfcpdump.rst> for more details on this.
This option also enables s390 zfcpdump.
- See also <file:Documentation/s390/zfcpdump.rst>
+ See also <file:Documentation/arch/s390/zfcpdump.rst>
endmenu
@@ -867,13 +879,24 @@ config APPLDATA_NET_SUM
This can also be compiled as a module, which will be called
appldata_net_sum.o.
-config S390_HYPFS_FS
+config S390_HYPFS
def_bool y
+ prompt "s390 hypervisor information"
+ help
+ This provides several binary files at (debugfs)/s390_hypfs/ to
+ provide accounting information in an s390 hypervisor environment.
+
+config S390_HYPFS_FS
+ def_bool n
prompt "s390 hypervisor file system support"
select SYS_HYPERVISOR
+ depends on S390_HYPFS
help
This is a virtual file system intended to provide accounting
- information in an s390 hypervisor environment.
+ information in an s390 hypervisor environment. This file system
+ is deprecated and should not be used.
+
+ Say N if you are unsure.
source "arch/s390/kvm/Kconfig"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 5ed242897b0d..a53a36ee0731 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -119,7 +119,6 @@ export KBUILD_CFLAGS_DECOMPRESSOR
OBJCOPYFLAGS := -O binary
libs-y += arch/s390/lib/
-drivers-y += drivers/s390/
boot := arch/s390/boot
syscalls := arch/s390/kernel/syscalls
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 64bd7ac3e35d..b9681cb22753 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -27,6 +27,7 @@ struct page *__bootdata_preserved(vmemmap);
unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END);
+unsigned long __bootdata_preserved(max_mappable);
unsigned long __bootdata(ident_map_size);
u64 __bootdata_preserved(stfle_fac_list[16]);
@@ -176,6 +177,7 @@ static unsigned long setup_kernel_memory_layout(void)
unsigned long asce_limit;
unsigned long rte_size;
unsigned long pages;
+ unsigned long vsize;
unsigned long vmax;
pages = ident_map_size / PAGE_SIZE;
@@ -183,19 +185,19 @@ static unsigned long setup_kernel_memory_layout(void)
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
/* choose kernel address space layout: 4 or 3 levels. */
- vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
- if (IS_ENABLED(CONFIG_KASAN) ||
- vmalloc_size > _REGION2_SIZE ||
- vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
- _REGION2_SIZE) {
+ vsize = round_up(ident_map_size, _REGION3_SIZE) + vmemmap_size +
+ MODULES_LEN + MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE;
+ vsize = size_add(vsize, vmalloc_size);
+ if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) {
asce_limit = _REGION1_SIZE;
rte_size = _REGION2_SIZE;
} else {
asce_limit = _REGION2_SIZE;
rte_size = _REGION3_SIZE;
}
+
/*
- * forcing modules and vmalloc area under the ultravisor
+ * Forcing modules and vmalloc area under the ultravisor
* secure storage limit, so that any vmalloc allocation
* we do could be used to back secure guest storage.
*/
@@ -204,7 +206,7 @@ static unsigned long setup_kernel_memory_layout(void)
/* force vmalloc and modules below kasan shadow */
vmax = min(vmax, KASAN_SHADOW_START);
#endif
- __memcpy_real_area = round_down(vmax - PAGE_SIZE, PAGE_SIZE);
+ __memcpy_real_area = round_down(vmax - MEMCPY_REAL_SIZE, PAGE_SIZE);
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
sizeof(struct lowcore));
MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
@@ -220,8 +222,9 @@ static unsigned long setup_kernel_memory_layout(void)
pages = SECTION_ALIGN_UP(pages);
/* keep vmemmap_start aligned to a top level region table entry */
vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
- /* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */
vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
+ /* maximum mappable address as seen by arch_get_mappable_range() */
+ max_mappable = vmemmap_start;
/* make sure identity map doesn't overlay with vmemmap */
ident_map_size = min(ident_map_size, vmemmap_start);
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
@@ -286,8 +289,9 @@ void startup_kernel(void)
setup_lpp();
safe_addr = mem_safe_offset();
+
/*
- * reserve decompressor memory together with decompression heap, buffer and
+ * Reserve decompressor memory together with decompression heap, buffer and
* memory which might be occupied by uncompressed kernel at default 1Mb
* position (if KASLR is off or failed).
*/
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 3a6a9a88bd38..af2fbe48e16c 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -835,6 +835,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=300
# CONFIG_RCU_TRACE is not set
CONFIG_LATENCYTOP=y
CONFIG_BOOTTIME_TRACING=y
+CONFIG_FUNCTION_GRAPH_RETVAL=y
CONFIG_FPROBE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index b13a5a0d99d2..3f263b767a4c 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -787,6 +787,7 @@ CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
CONFIG_BOOTTIME_TRACING=y
+CONFIG_FUNCTION_GRAPH_RETVAL=y
CONFIG_FPROBE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_STACK_TRACER=y
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 38349150c96e..8b541e44151d 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -35,7 +35,7 @@
* and padding is also possible, the limits need to be generous.
*/
#define PAES_MIN_KEYSIZE 16
-#define PAES_MAX_KEYSIZE 320
+#define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE
static u8 *ctrblk;
static DEFINE_MUTEX(ctrblk_lock);
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index 06f601509ce9..c34854d298f8 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -3,7 +3,12 @@
# Makefile for the linux hypfs filesystem routines.
#
-obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
+obj-$(CONFIG_S390_HYPFS) += hypfs_dbfs.o
+obj-$(CONFIG_S390_HYPFS) += hypfs_diag.o
+obj-$(CONFIG_S390_HYPFS) += hypfs_diag0c.o
+obj-$(CONFIG_S390_HYPFS) += hypfs_sprp.o
+obj-$(CONFIG_S390_HYPFS) += hypfs_vm.o
-s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o hypfs_sprp.o
-s390_hypfs-objs += hypfs_diag0c.o
+obj-$(CONFIG_S390_HYPFS_FS) += hypfs_diag_fs.o
+obj-$(CONFIG_S390_HYPFS_FS) += hypfs_vm_fs.o
+obj-$(CONFIG_S390_HYPFS_FS) += inode.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index 05f3f9aee5fc..65f4036fd541 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -46,6 +46,15 @@ void hypfs_diag0c_exit(void);
void hypfs_sprp_init(void);
void hypfs_sprp_exit(void);
+int __hypfs_fs_init(void);
+
+static inline int hypfs_fs_init(void)
+{
+ if (IS_ENABLED(CONFIG_S390_HYPFS_FS))
+ return __hypfs_fs_init();
+ return 0;
+}
+
/* debugfs interface */
struct hypfs_dbfs_file;
@@ -69,7 +78,6 @@ struct hypfs_dbfs_file {
struct dentry *dentry;
};
-extern void hypfs_dbfs_init(void);
extern void hypfs_dbfs_exit(void);
extern void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df);
extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df);
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index f4c7dbfaf8ee..4024599eb448 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -90,12 +90,33 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
debugfs_remove(df->dentry);
}
-void hypfs_dbfs_init(void)
+static int __init hypfs_dbfs_init(void)
{
- dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
-}
+ int rc = -ENODATA;
-void hypfs_dbfs_exit(void)
-{
+ dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
+ if (hypfs_diag_init())
+ goto fail_dbfs_exit;
+ if (hypfs_vm_init())
+ goto fail_hypfs_diag_exit;
+ hypfs_sprp_init();
+ if (hypfs_diag0c_init())
+ goto fail_hypfs_sprp_exit;
+ rc = hypfs_fs_init();
+ if (rc)
+ goto fail_hypfs_diag0c_exit;
+ return 0;
+
+fail_hypfs_diag0c_exit:
+ hypfs_diag0c_exit();
+fail_hypfs_sprp_exit:
+ hypfs_sprp_exit();
+ hypfs_vm_exit();
+fail_hypfs_diag_exit:
+ hypfs_diag_exit();
+ pr_err("Initialization of hypfs failed with rc=%i\n", rc);
+fail_dbfs_exit:
debugfs_remove(dbfs_dir);
+ return rc;
}
+device_initcall(hypfs_dbfs_init)
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index c3be533c4cd3..279b7bba4d43 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -18,188 +18,27 @@
#include <linux/mm.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
+#include "hypfs_diag.h"
#include "hypfs.h"
-#define TMP_SIZE 64 /* size of temporary buffers */
-
#define DBFS_D204_HDR_VERSION 0
-static char *diag224_cpu_names; /* diag 224 name table */
static enum diag204_sc diag204_store_sc; /* used subcode for store */
static enum diag204_format diag204_info_type; /* used diag 204 data format */
static void *diag204_buf; /* 4K aligned buffer for diag204 data */
-static void *diag204_buf_vmalloc; /* vmalloc pointer for diag204 data */
static int diag204_buf_pages; /* number of pages for diag204 data */
static struct dentry *dbfs_d204_file;
-/*
- * DIAG 204 member access functions.
- *
- * Since we have two different diag 204 data formats for old and new s390
- * machines, we do not access the structs directly, but use getter functions for
- * each struct member instead. This should make the code more readable.
- */
-
-/* Time information block */
-
-static inline int info_blk_hdr__size(enum diag204_format type)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return sizeof(struct diag204_info_blk_hdr);
- else /* DIAG204_INFO_EXT */
- return sizeof(struct diag204_x_info_blk_hdr);
-}
-
-static inline __u8 info_blk_hdr__npar(enum diag204_format type, void *hdr)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return ((struct diag204_info_blk_hdr *)hdr)->npar;
- else /* DIAG204_INFO_EXT */
- return ((struct diag204_x_info_blk_hdr *)hdr)->npar;
-}
-
-static inline __u8 info_blk_hdr__flags(enum diag204_format type, void *hdr)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return ((struct diag204_info_blk_hdr *)hdr)->flags;
- else /* DIAG204_INFO_EXT */
- return ((struct diag204_x_info_blk_hdr *)hdr)->flags;
-}
-
-/* Partition header */
-
-static inline int part_hdr__size(enum diag204_format type)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return sizeof(struct diag204_part_hdr);
- else /* DIAG204_INFO_EXT */
- return sizeof(struct diag204_x_part_hdr);
-}
-
-static inline __u8 part_hdr__rcpus(enum diag204_format type, void *hdr)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return ((struct diag204_part_hdr *)hdr)->cpus;
- else /* DIAG204_INFO_EXT */
- return ((struct diag204_x_part_hdr *)hdr)->rcpus;
-}
-
-static inline void part_hdr__part_name(enum diag204_format type, void *hdr,
- char *name)
-{
- if (type == DIAG204_INFO_SIMPLE)
- memcpy(name, ((struct diag204_part_hdr *)hdr)->part_name,
- DIAG204_LPAR_NAME_LEN);
- else /* DIAG204_INFO_EXT */
- memcpy(name, ((struct diag204_x_part_hdr *)hdr)->part_name,
- DIAG204_LPAR_NAME_LEN);
- EBCASC(name, DIAG204_LPAR_NAME_LEN);
- name[DIAG204_LPAR_NAME_LEN] = 0;
- strim(name);
-}
-
-/* CPU info block */
-
-static inline int cpu_info__size(enum diag204_format type)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return sizeof(struct diag204_cpu_info);
- else /* DIAG204_INFO_EXT */
- return sizeof(struct diag204_x_cpu_info);
-}
-
-static inline __u8 cpu_info__ctidx(enum diag204_format type, void *hdr)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return ((struct diag204_cpu_info *)hdr)->ctidx;
- else /* DIAG204_INFO_EXT */
- return ((struct diag204_x_cpu_info *)hdr)->ctidx;
-}
-
-static inline __u16 cpu_info__cpu_addr(enum diag204_format type, void *hdr)
+enum diag204_format diag204_get_info_type(void)
{
- if (type == DIAG204_INFO_SIMPLE)
- return ((struct diag204_cpu_info *)hdr)->cpu_addr;
- else /* DIAG204_INFO_EXT */
- return ((struct diag204_x_cpu_info *)hdr)->cpu_addr;
+ return diag204_info_type;
}
-static inline __u64 cpu_info__acc_time(enum diag204_format type, void *hdr)
+static void diag204_set_info_type(enum diag204_format type)
{
- if (type == DIAG204_INFO_SIMPLE)
- return ((struct diag204_cpu_info *)hdr)->acc_time;
- else /* DIAG204_INFO_EXT */
- return ((struct diag204_x_cpu_info *)hdr)->acc_time;
-}
-
-static inline __u64 cpu_info__lp_time(enum diag204_format type, void *hdr)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return ((struct diag204_cpu_info *)hdr)->lp_time;
- else /* DIAG204_INFO_EXT */
- return ((struct diag204_x_cpu_info *)hdr)->lp_time;
-}
-
-static inline __u64 cpu_info__online_time(enum diag204_format type, void *hdr)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return 0; /* online_time not available in simple info */
- else /* DIAG204_INFO_EXT */
- return ((struct diag204_x_cpu_info *)hdr)->online_time;
-}
-
-/* Physical header */
-
-static inline int phys_hdr__size(enum diag204_format type)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return sizeof(struct diag204_phys_hdr);
- else /* DIAG204_INFO_EXT */
- return sizeof(struct diag204_x_phys_hdr);
-}
-
-static inline __u8 phys_hdr__cpus(enum diag204_format type, void *hdr)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return ((struct diag204_phys_hdr *)hdr)->cpus;
- else /* DIAG204_INFO_EXT */
- return ((struct diag204_x_phys_hdr *)hdr)->cpus;
-}
-
-/* Physical CPU info block */
-
-static inline int phys_cpu__size(enum diag204_format type)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return sizeof(struct diag204_phys_cpu);
- else /* DIAG204_INFO_EXT */
- return sizeof(struct diag204_x_phys_cpu);
-}
-
-static inline __u16 phys_cpu__cpu_addr(enum diag204_format type, void *hdr)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return ((struct diag204_phys_cpu *)hdr)->cpu_addr;
- else /* DIAG204_INFO_EXT */
- return ((struct diag204_x_phys_cpu *)hdr)->cpu_addr;
-}
-
-static inline __u64 phys_cpu__mgm_time(enum diag204_format type, void *hdr)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return ((struct diag204_phys_cpu *)hdr)->mgm_time;
- else /* DIAG204_INFO_EXT */
- return ((struct diag204_x_phys_cpu *)hdr)->mgm_time;
-}
-
-static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr)
-{
- if (type == DIAG204_INFO_SIMPLE)
- return ((struct diag204_phys_cpu *)hdr)->ctidx;
- else /* DIAG204_INFO_EXT */
- return ((struct diag204_x_phys_cpu *)hdr)->ctidx;
+ diag204_info_type = type;
}
/* Diagnose 204 functions */
@@ -212,43 +51,11 @@ static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr)
static void diag204_free_buffer(void)
{
- if (!diag204_buf)
- return;
- if (diag204_buf_vmalloc) {
- vfree(diag204_buf_vmalloc);
- diag204_buf_vmalloc = NULL;
- } else {
- free_pages((unsigned long) diag204_buf, 0);
- }
+ vfree(diag204_buf);
diag204_buf = NULL;
}
-static void *page_align_ptr(void *ptr)
-{
- return (void *) PAGE_ALIGN((unsigned long) ptr);
-}
-
-static void *diag204_alloc_vbuf(int pages)
-{
- /* The buffer has to be page aligned! */
- diag204_buf_vmalloc = vmalloc(array_size(PAGE_SIZE, (pages + 1)));
- if (!diag204_buf_vmalloc)
- return ERR_PTR(-ENOMEM);
- diag204_buf = page_align_ptr(diag204_buf_vmalloc);
- diag204_buf_pages = pages;
- return diag204_buf;
-}
-
-static void *diag204_alloc_rbuf(void)
-{
- diag204_buf = (void*)__get_free_pages(GFP_KERNEL,0);
- if (!diag204_buf)
- return ERR_PTR(-ENOMEM);
- diag204_buf_pages = 1;
- return diag204_buf;
-}
-
-static void *diag204_get_buffer(enum diag204_format fmt, int *pages)
+void *diag204_get_buffer(enum diag204_format fmt, int *pages)
{
if (diag204_buf) {
*pages = diag204_buf_pages;
@@ -256,15 +63,19 @@ static void *diag204_get_buffer(enum diag204_format fmt, int *pages)
}
if (fmt == DIAG204_INFO_SIMPLE) {
*pages = 1;
- return diag204_alloc_rbuf();
} else {/* DIAG204_INFO_EXT */
*pages = diag204((unsigned long)DIAG204_SUBC_RSI |
(unsigned long)DIAG204_INFO_EXT, 0, NULL);
if (*pages <= 0)
- return ERR_PTR(-ENOSYS);
- else
- return diag204_alloc_vbuf(*pages);
+ return ERR_PTR(-EOPNOTSUPP);
}
+ diag204_buf = __vmalloc_node(array_size(*pages, PAGE_SIZE),
+ PAGE_SIZE, GFP_KERNEL, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ if (!diag204_buf)
+ return ERR_PTR(-ENOMEM);
+ diag204_buf_pages = *pages;
+ return diag204_buf;
}
/*
@@ -291,13 +102,13 @@ static int diag204_probe(void)
if (diag204((unsigned long)DIAG204_SUBC_STIB7 |
(unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) {
diag204_store_sc = DIAG204_SUBC_STIB7;
- diag204_info_type = DIAG204_INFO_EXT;
+ diag204_set_info_type(DIAG204_INFO_EXT);
goto out;
}
if (diag204((unsigned long)DIAG204_SUBC_STIB6 |
(unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) {
diag204_store_sc = DIAG204_SUBC_STIB6;
- diag204_info_type = DIAG204_INFO_EXT;
+ diag204_set_info_type(DIAG204_INFO_EXT);
goto out;
}
diag204_free_buffer();
@@ -313,10 +124,10 @@ static int diag204_probe(void)
if (diag204((unsigned long)DIAG204_SUBC_STIB4 |
(unsigned long)DIAG204_INFO_SIMPLE, pages, buf) >= 0) {
diag204_store_sc = DIAG204_SUBC_STIB4;
- diag204_info_type = DIAG204_INFO_SIMPLE;
+ diag204_set_info_type(DIAG204_INFO_SIMPLE);
goto out;
} else {
- rc = -ENOSYS;
+ rc = -EOPNOTSUPP;
goto fail_store;
}
out:
@@ -327,58 +138,13 @@ fail_alloc:
return rc;
}
-static int diag204_do_store(void *buf, int pages)
+int diag204_store(void *buf, int pages)
{
int rc;
- rc = diag204((unsigned long) diag204_store_sc |
- (unsigned long) diag204_info_type, pages, buf);
- return rc < 0 ? -ENOSYS : 0;
-}
-
-static void *diag204_store(void)
-{
- void *buf;
- int pages, rc;
-
- buf = diag204_get_buffer(diag204_info_type, &pages);
- if (IS_ERR(buf))
- goto out;
- rc = diag204_do_store(buf, pages);
- if (rc)
- return ERR_PTR(rc);
-out:
- return buf;
-}
-
-/* Diagnose 224 functions */
-
-static int diag224_get_name_table(void)
-{
- /* memory must be below 2GB */
- diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
- if (!diag224_cpu_names)
- return -ENOMEM;
- if (diag224(diag224_cpu_names)) {
- free_page((unsigned long) diag224_cpu_names);
- return -EOPNOTSUPP;
- }
- EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
- return 0;
-}
-
-static void diag224_delete_name_table(void)
-{
- free_page((unsigned long) diag224_cpu_names);
-}
-
-static int diag224_idx2name(int index, char *name)
-{
- memcpy(name, diag224_cpu_names + ((index + 1) * DIAG204_CPU_NAME_LEN),
- DIAG204_CPU_NAME_LEN);
- name[DIAG204_CPU_NAME_LEN] = 0;
- strim(name);
- return 0;
+ rc = diag204((unsigned long)diag204_store_sc |
+ (unsigned long)diag204_get_info_type(), pages, buf);
+ return rc < 0 ? -EOPNOTSUPP : 0;
}
struct dbfs_d204_hdr {
@@ -403,8 +169,8 @@ static int dbfs_d204_create(void **data, void **data_free_ptr, size_t *size)
base = vzalloc(buf_size);
if (!base)
return -ENOMEM;
- d204 = page_align_ptr(base + sizeof(d204->hdr)) - sizeof(d204->hdr);
- rc = diag204_do_store(d204->buf, diag204_buf_pages);
+ d204 = PTR_ALIGN(base + sizeof(d204->hdr), PAGE_SIZE) - sizeof(d204->hdr);
+ rc = diag204_store(d204->buf, diag204_buf_pages);
if (rc) {
vfree(base);
return rc;
@@ -433,176 +199,21 @@ __init int hypfs_diag_init(void)
return -ENODATA;
}
- if (diag204_info_type == DIAG204_INFO_EXT)
+ if (diag204_get_info_type() == DIAG204_INFO_EXT)
hypfs_dbfs_create_file(&dbfs_file_d204);
- if (MACHINE_IS_LPAR) {
- rc = diag224_get_name_table();
- if (rc) {
- pr_err("The hardware system does not provide all "
- "functions required by hypfs\n");
- debugfs_remove(dbfs_d204_file);
- return rc;
- }
+ rc = hypfs_diag_fs_init();
+ if (rc) {
+ pr_err("The hardware system does not provide all functions required by hypfs\n");
+ debugfs_remove(dbfs_d204_file);
}
- return 0;
+ return rc;
}
void hypfs_diag_exit(void)
{
debugfs_remove(dbfs_d204_file);
- diag224_delete_name_table();
+ hypfs_diag_fs_exit();
diag204_free_buffer();
hypfs_dbfs_remove_file(&dbfs_file_d204);
}
-
-/*
- * Functions to create the directory structure
- * *******************************************
- */
-
-static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
-{
- struct dentry *cpu_dir;
- char buffer[TMP_SIZE];
- void *rc;
-
- snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_info_type,
- cpu_info));
- cpu_dir = hypfs_mkdir(cpus_dir, buffer);
- rc = hypfs_create_u64(cpu_dir, "mgmtime",
- cpu_info__acc_time(diag204_info_type, cpu_info) -
- cpu_info__lp_time(diag204_info_type, cpu_info));
- if (IS_ERR(rc))
- return PTR_ERR(rc);
- rc = hypfs_create_u64(cpu_dir, "cputime",
- cpu_info__lp_time(diag204_info_type, cpu_info));
- if (IS_ERR(rc))
- return PTR_ERR(rc);
- if (diag204_info_type == DIAG204_INFO_EXT) {
- rc = hypfs_create_u64(cpu_dir, "onlinetime",
- cpu_info__online_time(diag204_info_type,
- cpu_info));
- if (IS_ERR(rc))
- return PTR_ERR(rc);
- }
- diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
- rc = hypfs_create_str(cpu_dir, "type", buffer);
- return PTR_ERR_OR_ZERO(rc);
-}
-
-static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
-{
- struct dentry *cpus_dir;
- struct dentry *lpar_dir;
- char lpar_name[DIAG204_LPAR_NAME_LEN + 1];
- void *cpu_info;
- int i;
-
- part_hdr__part_name(diag204_info_type, part_hdr, lpar_name);
- lpar_name[DIAG204_LPAR_NAME_LEN] = 0;
- lpar_dir = hypfs_mkdir(systems_dir, lpar_name);
- if (IS_ERR(lpar_dir))
- return lpar_dir;
- cpus_dir = hypfs_mkdir(lpar_dir, "cpus");
- if (IS_ERR(cpus_dir))
- return cpus_dir;
- cpu_info = part_hdr + part_hdr__size(diag204_info_type);
- for (i = 0; i < part_hdr__rcpus(diag204_info_type, part_hdr); i++) {
- int rc;
- rc = hypfs_create_cpu_files(cpus_dir, cpu_info);
- if (rc)
- return ERR_PTR(rc);
- cpu_info += cpu_info__size(diag204_info_type);
- }
- return cpu_info;
-}
-
-static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
-{
- struct dentry *cpu_dir;
- char buffer[TMP_SIZE];
- void *rc;
-
- snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_info_type,
- cpu_info));
- cpu_dir = hypfs_mkdir(cpus_dir, buffer);
- if (IS_ERR(cpu_dir))
- return PTR_ERR(cpu_dir);
- rc = hypfs_create_u64(cpu_dir, "mgmtime",
- phys_cpu__mgm_time(diag204_info_type, cpu_info));
- if (IS_ERR(rc))
- return PTR_ERR(rc);
- diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
- rc = hypfs_create_str(cpu_dir, "type", buffer);
- return PTR_ERR_OR_ZERO(rc);
-}
-
-static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr)
-{
- int i;
- void *cpu_info;
- struct dentry *cpus_dir;
-
- cpus_dir = hypfs_mkdir(parent_dir, "cpus");
- if (IS_ERR(cpus_dir))
- return cpus_dir;
- cpu_info = phys_hdr + phys_hdr__size(diag204_info_type);
- for (i = 0; i < phys_hdr__cpus(diag204_info_type, phys_hdr); i++) {
- int rc;
- rc = hypfs_create_phys_cpu_files(cpus_dir, cpu_info);
- if (rc)
- return ERR_PTR(rc);
- cpu_info += phys_cpu__size(diag204_info_type);
- }
- return cpu_info;
-}
-
-int hypfs_diag_create_files(struct dentry *root)
-{
- struct dentry *systems_dir, *hyp_dir;
- void *time_hdr, *part_hdr;
- int i, rc;
- void *buffer, *ptr;
-
- buffer = diag204_store();
- if (IS_ERR(buffer))
- return PTR_ERR(buffer);
-
- systems_dir = hypfs_mkdir(root, "systems");
- if (IS_ERR(systems_dir)) {
- rc = PTR_ERR(systems_dir);
- goto err_out;
- }
- time_hdr = (struct x_info_blk_hdr *)buffer;
- part_hdr = time_hdr + info_blk_hdr__size(diag204_info_type);
- for (i = 0; i < info_blk_hdr__npar(diag204_info_type, time_hdr); i++) {
- part_hdr = hypfs_create_lpar_files(systems_dir, part_hdr);
- if (IS_ERR(part_hdr)) {
- rc = PTR_ERR(part_hdr);
- goto err_out;
- }
- }
- if (info_blk_hdr__flags(diag204_info_type, time_hdr) &
- DIAG204_LPAR_PHYS_FLG) {
- ptr = hypfs_create_phys_files(root, part_hdr);
- if (IS_ERR(ptr)) {
- rc = PTR_ERR(ptr);
- goto err_out;
- }
- }
- hyp_dir = hypfs_mkdir(root, "hyp");
- if (IS_ERR(hyp_dir)) {
- rc = PTR_ERR(hyp_dir);
- goto err_out;
- }
- ptr = hypfs_create_str(hyp_dir, "type", "LPAR Hypervisor");
- if (IS_ERR(ptr)) {
- rc = PTR_ERR(ptr);
- goto err_out;
- }
- rc = 0;
-
-err_out:
- return rc;
-}
diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h
new file mode 100644
index 000000000000..7090eff27fef
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_diag.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hypervisor filesystem for Linux on s390. Diag 204 and 224
+ * implementation.
+ *
+ * Copyright IBM Corp. 2006, 2008
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#ifndef _S390_HYPFS_DIAG_H_
+#define _S390_HYPFS_DIAG_H_
+
+#include <asm/diag.h>
+
+enum diag204_format diag204_get_info_type(void);
+void *diag204_get_buffer(enum diag204_format fmt, int *pages);
+int diag204_store(void *buf, int pages);
+
+int __hypfs_diag_fs_init(void);
+void __hypfs_diag_fs_exit(void);
+
+static inline int hypfs_diag_fs_init(void)
+{
+ if (IS_ENABLED(CONFIG_S390_HYPFS_FS))
+ return __hypfs_diag_fs_init();
+ return 0;
+}
+
+static inline void hypfs_diag_fs_exit(void)
+{
+ if (IS_ENABLED(CONFIG_S390_HYPFS_FS))
+ __hypfs_diag_fs_exit();
+}
+
+#endif /* _S390_HYPFS_DIAG_H_ */
diff --git a/arch/s390/hypfs/hypfs_diag_fs.c b/arch/s390/hypfs/hypfs_diag_fs.c
new file mode 100644
index 000000000000..00a6d370a280
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_diag_fs.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hypervisor filesystem for Linux on s390. Diag 204 and 224
+ * implementation.
+ *
+ * Copyright IBM Corp. 2006, 2008
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "hypfs"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <asm/diag.h>
+#include <asm/ebcdic.h>
+#include "hypfs_diag.h"
+#include "hypfs.h"
+
+#define TMP_SIZE 64 /* size of temporary buffers */
+
+static char *diag224_cpu_names; /* diag 224 name table */
+static int diag224_idx2name(int index, char *name);
+
+/*
+ * DIAG 204 member access functions.
+ *
+ * Since we have two different diag 204 data formats for old and new s390
+ * machines, we do not access the structs directly, but use getter functions for
+ * each struct member instead. This should make the code more readable.
+ */
+
+/* Time information block */
+
+static inline int info_blk_hdr__size(enum diag204_format type)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return sizeof(struct diag204_info_blk_hdr);
+ else /* DIAG204_INFO_EXT */
+ return sizeof(struct diag204_x_info_blk_hdr);
+}
+
+static inline __u8 info_blk_hdr__npar(enum diag204_format type, void *hdr)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_info_blk_hdr *)hdr)->npar;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_info_blk_hdr *)hdr)->npar;
+}
+
+static inline __u8 info_blk_hdr__flags(enum diag204_format type, void *hdr)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_info_blk_hdr *)hdr)->flags;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_info_blk_hdr *)hdr)->flags;
+}
+
+/* Partition header */
+
+static inline int part_hdr__size(enum diag204_format type)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return sizeof(struct diag204_part_hdr);
+ else /* DIAG204_INFO_EXT */
+ return sizeof(struct diag204_x_part_hdr);
+}
+
+static inline __u8 part_hdr__rcpus(enum diag204_format type, void *hdr)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_part_hdr *)hdr)->cpus;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_part_hdr *)hdr)->rcpus;
+}
+
+static inline void part_hdr__part_name(enum diag204_format type, void *hdr,
+ char *name)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ memcpy(name, ((struct diag204_part_hdr *)hdr)->part_name,
+ DIAG204_LPAR_NAME_LEN);
+ else /* DIAG204_INFO_EXT */
+ memcpy(name, ((struct diag204_x_part_hdr *)hdr)->part_name,
+ DIAG204_LPAR_NAME_LEN);
+ EBCASC(name, DIAG204_LPAR_NAME_LEN);
+ name[DIAG204_LPAR_NAME_LEN] = 0;
+ strim(name);
+}
+
+/* CPU info block */
+
+static inline int cpu_info__size(enum diag204_format type)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return sizeof(struct diag204_cpu_info);
+ else /* DIAG204_INFO_EXT */
+ return sizeof(struct diag204_x_cpu_info);
+}
+
+static inline __u8 cpu_info__ctidx(enum diag204_format type, void *hdr)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_cpu_info *)hdr)->ctidx;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_cpu_info *)hdr)->ctidx;
+}
+
+static inline __u16 cpu_info__cpu_addr(enum diag204_format type, void *hdr)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_cpu_info *)hdr)->cpu_addr;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_cpu_info *)hdr)->cpu_addr;
+}
+
+static inline __u64 cpu_info__acc_time(enum diag204_format type, void *hdr)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_cpu_info *)hdr)->acc_time;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_cpu_info *)hdr)->acc_time;
+}
+
+static inline __u64 cpu_info__lp_time(enum diag204_format type, void *hdr)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_cpu_info *)hdr)->lp_time;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_cpu_info *)hdr)->lp_time;
+}
+
+static inline __u64 cpu_info__online_time(enum diag204_format type, void *hdr)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return 0; /* online_time not available in simple info */
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_cpu_info *)hdr)->online_time;
+}
+
+/* Physical header */
+
+static inline int phys_hdr__size(enum diag204_format type)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return sizeof(struct diag204_phys_hdr);
+ else /* DIAG204_INFO_EXT */
+ return sizeof(struct diag204_x_phys_hdr);
+}
+
+static inline __u8 phys_hdr__cpus(enum diag204_format type, void *hdr)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_phys_hdr *)hdr)->cpus;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_phys_hdr *)hdr)->cpus;
+}
+
+/* Physical CPU info block */
+
+static inline int phys_cpu__size(enum diag204_format type)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return sizeof(struct diag204_phys_cpu);
+ else /* DIAG204_INFO_EXT */
+ return sizeof(struct diag204_x_phys_cpu);
+}
+
+static inline __u16 phys_cpu__cpu_addr(enum diag204_format type, void *hdr)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_phys_cpu *)hdr)->cpu_addr;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_phys_cpu *)hdr)->cpu_addr;
+}
+
+static inline __u64 phys_cpu__mgm_time(enum diag204_format type, void *hdr)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_phys_cpu *)hdr)->mgm_time;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_phys_cpu *)hdr)->mgm_time;
+}
+
+static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr)
+{
+ if (type == DIAG204_INFO_SIMPLE)
+ return ((struct diag204_phys_cpu *)hdr)->ctidx;
+ else /* DIAG204_INFO_EXT */
+ return ((struct diag204_x_phys_cpu *)hdr)->ctidx;
+}
+
+/*
+ * Functions to create the directory structure
+ * *******************************************
+ */
+
+static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
+{
+ struct dentry *cpu_dir;
+ char buffer[TMP_SIZE];
+ void *rc;
+
+ snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(),
+ cpu_info));
+ cpu_dir = hypfs_mkdir(cpus_dir, buffer);
+ rc = hypfs_create_u64(cpu_dir, "mgmtime",
+ cpu_info__acc_time(diag204_get_info_type(), cpu_info) -
+ cpu_info__lp_time(diag204_get_info_type(), cpu_info));
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ rc = hypfs_create_u64(cpu_dir, "cputime",
+ cpu_info__lp_time(diag204_get_info_type(), cpu_info));
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ if (diag204_get_info_type() == DIAG204_INFO_EXT) {
+ rc = hypfs_create_u64(cpu_dir, "onlinetime",
+ cpu_info__online_time(diag204_get_info_type(),
+ cpu_info));
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ }
+ diag224_idx2name(cpu_info__ctidx(diag204_get_info_type(), cpu_info), buffer);
+ rc = hypfs_create_str(cpu_dir, "type", buffer);
+ return PTR_ERR_OR_ZERO(rc);
+}
+
+static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
+{
+ struct dentry *cpus_dir;
+ struct dentry *lpar_dir;
+ char lpar_name[DIAG204_LPAR_NAME_LEN + 1];
+ void *cpu_info;
+ int i;
+
+ part_hdr__part_name(diag204_get_info_type(), part_hdr, lpar_name);
+ lpar_name[DIAG204_LPAR_NAME_LEN] = 0;
+ lpar_dir = hypfs_mkdir(systems_dir, lpar_name);
+ if (IS_ERR(lpar_dir))
+ return lpar_dir;
+ cpus_dir = hypfs_mkdir(lpar_dir, "cpus");
+ if (IS_ERR(cpus_dir))
+ return cpus_dir;
+ cpu_info = part_hdr + part_hdr__size(diag204_get_info_type());
+ for (i = 0; i < part_hdr__rcpus(diag204_get_info_type(), part_hdr); i++) {
+ int rc;
+
+ rc = hypfs_create_cpu_files(cpus_dir, cpu_info);
+ if (rc)
+ return ERR_PTR(rc);
+ cpu_info += cpu_info__size(diag204_get_info_type());
+ }
+ return cpu_info;
+}
+
+static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
+{
+ struct dentry *cpu_dir;
+ char buffer[TMP_SIZE];
+ void *rc;
+
+ snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_get_info_type(),
+ cpu_info));
+ cpu_dir = hypfs_mkdir(cpus_dir, buffer);
+ if (IS_ERR(cpu_dir))
+ return PTR_ERR(cpu_dir);
+ rc = hypfs_create_u64(cpu_dir, "mgmtime",
+ phys_cpu__mgm_time(diag204_get_info_type(), cpu_info));
+ if (IS_ERR(rc))
+ return PTR_ERR(rc);
+ diag224_idx2name(phys_cpu__ctidx(diag204_get_info_type(), cpu_info), buffer);
+ rc = hypfs_create_str(cpu_dir, "type", buffer);
+ return PTR_ERR_OR_ZERO(rc);
+}
+
+static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr)
+{
+ int i;
+ void *cpu_info;
+ struct dentry *cpus_dir;
+
+ cpus_dir = hypfs_mkdir(parent_dir, "cpus");
+ if (IS_ERR(cpus_dir))
+ return cpus_dir;
+ cpu_info = phys_hdr + phys_hdr__size(diag204_get_info_type());
+ for (i = 0; i < phys_hdr__cpus(diag204_get_info_type(), phys_hdr); i++) {
+ int rc;
+
+ rc = hypfs_create_phys_cpu_files(cpus_dir, cpu_info);
+ if (rc)
+ return ERR_PTR(rc);
+ cpu_info += phys_cpu__size(diag204_get_info_type());
+ }
+ return cpu_info;
+}
+
+int hypfs_diag_create_files(struct dentry *root)
+{
+ struct dentry *systems_dir, *hyp_dir;
+ void *time_hdr, *part_hdr;
+ void *buffer, *ptr;
+ int i, rc, pages;
+
+ buffer = diag204_get_buffer(diag204_get_info_type(), &pages);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+ rc = diag204_store(buffer, pages);
+ if (rc)
+ return rc;
+
+ systems_dir = hypfs_mkdir(root, "systems");
+ if (IS_ERR(systems_dir)) {
+ rc = PTR_ERR(systems_dir);
+ goto err_out;
+ }
+ time_hdr = (struct x_info_blk_hdr *)buffer;
+ part_hdr = time_hdr + info_blk_hdr__size(diag204_get_info_type());
+ for (i = 0; i < info_blk_hdr__npar(diag204_get_info_type(), time_hdr); i++) {
+ part_hdr = hypfs_create_lpar_files(systems_dir, part_hdr);
+ if (IS_ERR(part_hdr)) {
+ rc = PTR_ERR(part_hdr);
+ goto err_out;
+ }
+ }
+ if (info_blk_hdr__flags(diag204_get_info_type(), time_hdr) &
+ DIAG204_LPAR_PHYS_FLG) {
+ ptr = hypfs_create_phys_files(root, part_hdr);
+ if (IS_ERR(ptr)) {
+ rc = PTR_ERR(ptr);
+ goto err_out;
+ }
+ }
+ hyp_dir = hypfs_mkdir(root, "hyp");
+ if (IS_ERR(hyp_dir)) {
+ rc = PTR_ERR(hyp_dir);
+ goto err_out;
+ }
+ ptr = hypfs_create_str(hyp_dir, "type", "LPAR Hypervisor");
+ if (IS_ERR(ptr)) {
+ rc = PTR_ERR(ptr);
+ goto err_out;
+ }
+ rc = 0;
+
+err_out:
+ return rc;
+}
+
+/* Diagnose 224 functions */
+
+static int diag224_idx2name(int index, char *name)
+{
+ memcpy(name, diag224_cpu_names + ((index + 1) * DIAG204_CPU_NAME_LEN),
+ DIAG204_CPU_NAME_LEN);
+ name[DIAG204_CPU_NAME_LEN] = 0;
+ strim(name);
+ return 0;
+}
+
+static int diag224_get_name_table(void)
+{
+ /* memory must be below 2GB */
+ diag224_cpu_names = (char *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!diag224_cpu_names)
+ return -ENOMEM;
+ if (diag224(diag224_cpu_names)) {
+ free_page((unsigned long)diag224_cpu_names);
+ return -EOPNOTSUPP;
+ }
+ EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
+ return 0;
+}
+
+static void diag224_delete_name_table(void)
+{
+ free_page((unsigned long)diag224_cpu_names);
+}
+
+int __init __hypfs_diag_fs_init(void)
+{
+ if (MACHINE_IS_LPAR)
+ return diag224_get_name_table();
+ return 0;
+}
+
+void __hypfs_diag_fs_exit(void)
+{
+ diag224_delete_name_table();
+}
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index a3d881ca0a98..3db40ad853e0 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -14,47 +14,15 @@
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/timex.h>
+#include "hypfs_vm.h"
#include "hypfs.h"
-#define NAME_LEN 8
#define DBFS_D2FC_HDR_VERSION 0
static char local_guest[] = " ";
static char all_guests[] = "* ";
static char *all_groups = all_guests;
-static char *guest_query;
-
-struct diag2fc_data {
- __u32 version;
- __u32 flags;
- __u64 used_cpu;
- __u64 el_time;
- __u64 mem_min_kb;
- __u64 mem_max_kb;
- __u64 mem_share_kb;
- __u64 mem_used_kb;
- __u32 pcpus;
- __u32 lcpus;
- __u32 vcpus;
- __u32 ocpus;
- __u32 cpu_max;
- __u32 cpu_shares;
- __u32 cpu_use_samp;
- __u32 cpu_delay_samp;
- __u32 page_wait_samp;
- __u32 idle_samp;
- __u32 other_samp;
- __u32 total_samp;
- char guest_name[NAME_LEN];
-};
-
-struct diag2fc_parm_list {
- char userid[NAME_LEN];
- char aci_grp[NAME_LEN];
- __u64 addr;
- __u32 size;
- __u32 fmt;
-};
+char *diag2fc_guest_query;
static int diag2fc(int size, char* query, void *addr)
{
@@ -62,10 +30,10 @@ static int diag2fc(int size, char* query, void *addr)
unsigned long rc;
struct diag2fc_parm_list parm_list;
- memcpy(parm_list.userid, query, NAME_LEN);
- ASCEBC(parm_list.userid, NAME_LEN);
- memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
- ASCEBC(parm_list.aci_grp, NAME_LEN);
+ memcpy(parm_list.userid, query, DIAG2FC_NAME_LEN);
+ ASCEBC(parm_list.userid, DIAG2FC_NAME_LEN);
+ memcpy(parm_list.aci_grp, all_groups, DIAG2FC_NAME_LEN);
+ ASCEBC(parm_list.aci_grp, DIAG2FC_NAME_LEN);
parm_list.addr = (unsigned long)addr;
parm_list.size = size;
parm_list.fmt = 0x02;
@@ -87,7 +55,7 @@ static int diag2fc(int size, char* query, void *addr)
/*
* Allocate buffer for "query" and store diag 2fc at "offset"
*/
-static void *diag2fc_store(char *query, unsigned int *count, int offset)
+void *diag2fc_store(char *query, unsigned int *count, int offset)
{
void *data;
int size;
@@ -108,132 +76,11 @@ static void *diag2fc_store(char *query, unsigned int *count, int offset)
return data;
}
-static void diag2fc_free(const void *data)
+void diag2fc_free(const void *data)
{
vfree(data);
}
-#define ATTRIBUTE(dir, name, member) \
-do { \
- void *rc; \
- rc = hypfs_create_u64(dir, name, member); \
- if (IS_ERR(rc)) \
- return PTR_ERR(rc); \
-} while(0)
-
-static int hypfs_vm_create_guest(struct dentry *systems_dir,
- struct diag2fc_data *data)
-{
- char guest_name[NAME_LEN + 1] = {};
- struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir;
- int dedicated_flag, capped_value;
-
- capped_value = (data->flags & 0x00000006) >> 1;
- dedicated_flag = (data->flags & 0x00000008) >> 3;
-
- /* guest dir */
- memcpy(guest_name, data->guest_name, NAME_LEN);
- EBCASC(guest_name, NAME_LEN);
- strim(guest_name);
- guest_dir = hypfs_mkdir(systems_dir, guest_name);
- if (IS_ERR(guest_dir))
- return PTR_ERR(guest_dir);
- ATTRIBUTE(guest_dir, "onlinetime_us", data->el_time);
-
- /* logical cpu information */
- cpus_dir = hypfs_mkdir(guest_dir, "cpus");
- if (IS_ERR(cpus_dir))
- return PTR_ERR(cpus_dir);
- ATTRIBUTE(cpus_dir, "cputime_us", data->used_cpu);
- ATTRIBUTE(cpus_dir, "capped", capped_value);
- ATTRIBUTE(cpus_dir, "dedicated", dedicated_flag);
- ATTRIBUTE(cpus_dir, "count", data->vcpus);
- /*
- * Note: The "weight_min" attribute got the wrong name.
- * The value represents the number of non-stopped (operating)
- * CPUS.
- */
- ATTRIBUTE(cpus_dir, "weight_min", data->ocpus);
- ATTRIBUTE(cpus_dir, "weight_max", data->cpu_max);
- ATTRIBUTE(cpus_dir, "weight_cur", data->cpu_shares);
-
- /* memory information */
- mem_dir = hypfs_mkdir(guest_dir, "mem");
- if (IS_ERR(mem_dir))
- return PTR_ERR(mem_dir);
- ATTRIBUTE(mem_dir, "min_KiB", data->mem_min_kb);
- ATTRIBUTE(mem_dir, "max_KiB", data->mem_max_kb);
- ATTRIBUTE(mem_dir, "used_KiB", data->mem_used_kb);
- ATTRIBUTE(mem_dir, "share_KiB", data->mem_share_kb);
-
- /* samples */
- samples_dir = hypfs_mkdir(guest_dir, "samples");
- if (IS_ERR(samples_dir))
- return PTR_ERR(samples_dir);
- ATTRIBUTE(samples_dir, "cpu_using", data->cpu_use_samp);
- ATTRIBUTE(samples_dir, "cpu_delay", data->cpu_delay_samp);
- ATTRIBUTE(samples_dir, "mem_delay", data->page_wait_samp);
- ATTRIBUTE(samples_dir, "idle", data->idle_samp);
- ATTRIBUTE(samples_dir, "other", data->other_samp);
- ATTRIBUTE(samples_dir, "total", data->total_samp);
- return 0;
-}
-
-int hypfs_vm_create_files(struct dentry *root)
-{
- struct dentry *dir, *file;
- struct diag2fc_data *data;
- unsigned int count = 0;
- int rc, i;
-
- data = diag2fc_store(guest_query, &count, 0);
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- /* Hypervisor Info */
- dir = hypfs_mkdir(root, "hyp");
- if (IS_ERR(dir)) {
- rc = PTR_ERR(dir);
- goto failed;
- }
- file = hypfs_create_str(dir, "type", "z/VM Hypervisor");
- if (IS_ERR(file)) {
- rc = PTR_ERR(file);
- goto failed;
- }
-
- /* physical cpus */
- dir = hypfs_mkdir(root, "cpus");
- if (IS_ERR(dir)) {
- rc = PTR_ERR(dir);
- goto failed;
- }
- file = hypfs_create_u64(dir, "count", data->lcpus);
- if (IS_ERR(file)) {
- rc = PTR_ERR(file);
- goto failed;
- }
-
- /* guests */
- dir = hypfs_mkdir(root, "systems");
- if (IS_ERR(dir)) {
- rc = PTR_ERR(dir);
- goto failed;
- }
-
- for (i = 0; i < count; i++) {
- rc = hypfs_vm_create_guest(dir, &(data[i]));
- if (rc)
- goto failed;
- }
- diag2fc_free(data);
- return 0;
-
-failed:
- diag2fc_free(data);
- return rc;
-}
-
struct dbfs_d2fc_hdr {
u64 len; /* Length of d2fc buffer without header */
u16 version; /* Version of header */
@@ -252,7 +99,7 @@ static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size)
struct dbfs_d2fc *d2fc;
unsigned int count;
- d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
+ d2fc = diag2fc_store(diag2fc_guest_query, &count, sizeof(d2fc->hdr));
if (IS_ERR(d2fc))
return PTR_ERR(d2fc);
store_tod_clock_ext(&d2fc->hdr.tod_ext);
@@ -277,9 +124,9 @@ int hypfs_vm_init(void)
if (!MACHINE_IS_VM)
return 0;
if (diag2fc(0, all_guests, NULL) > 0)
- guest_query = all_guests;
+ diag2fc_guest_query = all_guests;
else if (diag2fc(0, local_guest, NULL) > 0)
- guest_query = local_guest;
+ diag2fc_guest_query = local_guest;
else
return -EACCES;
hypfs_dbfs_create_file(&dbfs_file_2fc);
diff --git a/arch/s390/hypfs/hypfs_vm.h b/arch/s390/hypfs/hypfs_vm.h
new file mode 100644
index 000000000000..fe2e5851addd
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_vm.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hypervisor filesystem for Linux on s390. z/VM implementation.
+ *
+ * Copyright IBM Corp. 2006
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#ifndef _S390_HYPFS_VM_H_
+#define _S390_HYPFS_VM_H_
+
+#define DIAG2FC_NAME_LEN 8
+
+struct diag2fc_data {
+ __u32 version;
+ __u32 flags;
+ __u64 used_cpu;
+ __u64 el_time;
+ __u64 mem_min_kb;
+ __u64 mem_max_kb;
+ __u64 mem_share_kb;
+ __u64 mem_used_kb;
+ __u32 pcpus;
+ __u32 lcpus;
+ __u32 vcpus;
+ __u32 ocpus;
+ __u32 cpu_max;
+ __u32 cpu_shares;
+ __u32 cpu_use_samp;
+ __u32 cpu_delay_samp;
+ __u32 page_wait_samp;
+ __u32 idle_samp;
+ __u32 other_samp;
+ __u32 total_samp;
+ char guest_name[DIAG2FC_NAME_LEN];
+};
+
+struct diag2fc_parm_list {
+ char userid[DIAG2FC_NAME_LEN];
+ char aci_grp[DIAG2FC_NAME_LEN];
+ __u64 addr;
+ __u32 size;
+ __u32 fmt;
+};
+
+void *diag2fc_store(char *query, unsigned int *count, int offset);
+void diag2fc_free(const void *data);
+extern char *diag2fc_guest_query;
+
+#endif /* _S390_HYPFS_VM_H_ */
diff --git a/arch/s390/hypfs/hypfs_vm_fs.c b/arch/s390/hypfs/hypfs_vm_fs.c
new file mode 100644
index 000000000000..6011289afa8c
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_vm_fs.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hypervisor filesystem for Linux on s390. z/VM implementation.
+ *
+ * Copyright IBM Corp. 2006
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <asm/extable.h>
+#include <asm/diag.h>
+#include <asm/ebcdic.h>
+#include <asm/timex.h>
+#include "hypfs_vm.h"
+#include "hypfs.h"
+
+#define ATTRIBUTE(dir, name, member) \
+do { \
+ void *rc; \
+ rc = hypfs_create_u64(dir, name, member); \
+ if (IS_ERR(rc)) \
+ return PTR_ERR(rc); \
+} while (0)
+
+static int hypfs_vm_create_guest(struct dentry *systems_dir,
+ struct diag2fc_data *data)
+{
+ char guest_name[DIAG2FC_NAME_LEN + 1] = {};
+ struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir;
+ int dedicated_flag, capped_value;
+
+ capped_value = (data->flags & 0x00000006) >> 1;
+ dedicated_flag = (data->flags & 0x00000008) >> 3;
+
+ /* guest dir */
+ memcpy(guest_name, data->guest_name, DIAG2FC_NAME_LEN);
+ EBCASC(guest_name, DIAG2FC_NAME_LEN);
+ strim(guest_name);
+ guest_dir = hypfs_mkdir(systems_dir, guest_name);
+ if (IS_ERR(guest_dir))
+ return PTR_ERR(guest_dir);
+ ATTRIBUTE(guest_dir, "onlinetime_us", data->el_time);
+
+ /* logical cpu information */
+ cpus_dir = hypfs_mkdir(guest_dir, "cpus");
+ if (IS_ERR(cpus_dir))
+ return PTR_ERR(cpus_dir);
+ ATTRIBUTE(cpus_dir, "cputime_us", data->used_cpu);
+ ATTRIBUTE(cpus_dir, "capped", capped_value);
+ ATTRIBUTE(cpus_dir, "dedicated", dedicated_flag);
+ ATTRIBUTE(cpus_dir, "count", data->vcpus);
+ /*
+ * Note: The "weight_min" attribute got the wrong name.
+ * The value represents the number of non-stopped (operating)
+ * CPUS.
+ */
+ ATTRIBUTE(cpus_dir, "weight_min", data->ocpus);
+ ATTRIBUTE(cpus_dir, "weight_max", data->cpu_max);
+ ATTRIBUTE(cpus_dir, "weight_cur", data->cpu_shares);
+
+ /* memory information */
+ mem_dir = hypfs_mkdir(guest_dir, "mem");
+ if (IS_ERR(mem_dir))
+ return PTR_ERR(mem_dir);
+ ATTRIBUTE(mem_dir, "min_KiB", data->mem_min_kb);
+ ATTRIBUTE(mem_dir, "max_KiB", data->mem_max_kb);
+ ATTRIBUTE(mem_dir, "used_KiB", data->mem_used_kb);
+ ATTRIBUTE(mem_dir, "share_KiB", data->mem_share_kb);
+
+ /* samples */
+ samples_dir = hypfs_mkdir(guest_dir, "samples");
+ if (IS_ERR(samples_dir))
+ return PTR_ERR(samples_dir);
+ ATTRIBUTE(samples_dir, "cpu_using", data->cpu_use_samp);
+ ATTRIBUTE(samples_dir, "cpu_delay", data->cpu_delay_samp);
+ ATTRIBUTE(samples_dir, "mem_delay", data->page_wait_samp);
+ ATTRIBUTE(samples_dir, "idle", data->idle_samp);
+ ATTRIBUTE(samples_dir, "other", data->other_samp);
+ ATTRIBUTE(samples_dir, "total", data->total_samp);
+ return 0;
+}
+
+int hypfs_vm_create_files(struct dentry *root)
+{
+ struct dentry *dir, *file;
+ struct diag2fc_data *data;
+ unsigned int count = 0;
+ int rc, i;
+
+ data = diag2fc_store(diag2fc_guest_query, &count, 0);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ /* Hypervisor Info */
+ dir = hypfs_mkdir(root, "hyp");
+ if (IS_ERR(dir)) {
+ rc = PTR_ERR(dir);
+ goto failed;
+ }
+ file = hypfs_create_str(dir, "type", "z/VM Hypervisor");
+ if (IS_ERR(file)) {
+ rc = PTR_ERR(file);
+ goto failed;
+ }
+
+ /* physical cpus */
+ dir = hypfs_mkdir(root, "cpus");
+ if (IS_ERR(dir)) {
+ rc = PTR_ERR(dir);
+ goto failed;
+ }
+ file = hypfs_create_u64(dir, "count", data->lcpus);
+ if (IS_ERR(file)) {
+ rc = PTR_ERR(file);
+ goto failed;
+ }
+
+ /* guests */
+ dir = hypfs_mkdir(root, "systems");
+ if (IS_ERR(dir)) {
+ rc = PTR_ERR(dir);
+ goto failed;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = hypfs_vm_create_guest(dir, &data[i]);
+ if (rc)
+ goto failed;
+ }
+ diag2fc_free(data);
+ return 0;
+
+failed:
+ diag2fc_free(data);
+ return rc;
+}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index ee919bfc8186..ada83149932f 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -53,7 +53,7 @@ static void hypfs_update_update(struct super_block *sb)
struct inode *inode = d_inode(sb_info->update_file);
sb_info->last_update = ktime_get_seconds();
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
}
/* directory tree removal functions */
@@ -101,7 +101,7 @@ static struct inode *hypfs_make_inode(struct super_block *sb, umode_t mode)
ret->i_mode = mode;
ret->i_uid = hypfs_info->uid;
ret->i_gid = hypfs_info->gid;
- ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
+ ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
if (S_ISDIR(mode))
set_nlink(ret, 2);
}
@@ -460,45 +460,18 @@ static const struct super_operations hypfs_s_ops = {
.show_options = hypfs_show_options,
};
-static int __init hypfs_init(void)
+int __init __hypfs_fs_init(void)
{
int rc;
- hypfs_dbfs_init();
-
- if (hypfs_diag_init()) {
- rc = -ENODATA;
- goto fail_dbfs_exit;
- }
- if (hypfs_vm_init()) {
- rc = -ENODATA;
- goto fail_hypfs_diag_exit;
- }
- hypfs_sprp_init();
- if (hypfs_diag0c_init()) {
- rc = -ENODATA;
- goto fail_hypfs_sprp_exit;
- }
rc = sysfs_create_mount_point(hypervisor_kobj, "s390");
if (rc)
- goto fail_hypfs_diag0c_exit;
+ return rc;
rc = register_filesystem(&hypfs_type);
if (rc)
- goto fail_filesystem;
+ goto fail;
return 0;
-
-fail_filesystem:
+fail:
sysfs_remove_mount_point(hypervisor_kobj, "s390");
-fail_hypfs_diag0c_exit:
- hypfs_diag0c_exit();
-fail_hypfs_sprp_exit:
- hypfs_sprp_exit();
- hypfs_vm_exit();
-fail_hypfs_diag_exit:
- hypfs_diag_exit();
- pr_err("Initialization of hypfs failed with rc=%i\n", rc);
-fail_dbfs_exit:
- hypfs_dbfs_exit();
return rc;
}
-device_initcall(hypfs_init)
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 1a18d7b82f86..4b904110d27c 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -5,6 +5,5 @@ generated-y += syscall_table.h
generated-y += unistd_nr.h
generic-y += asm-offsets.h
-generic-y += export.h
generic-y += kvm_types.h
generic-y += mcs_spinlock.h
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index ac665b9670c5..ccd4e148b5ed 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -222,7 +222,7 @@ static inline debug_entry_t *debug_text_event(debug_info_t *id, int level,
/*
* IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
- * stored in the s390dbf. See Documentation/s390/s390dbf.rst for more details!
+ * stored in the s390dbf. See Documentation/arch/s390/s390dbf.rst for more details!
*/
extern debug_entry_t *
__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
@@ -350,7 +350,7 @@ static inline debug_entry_t *debug_text_exception(debug_info_t *id, int level,
/*
* IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
- * stored in the s390dbf. See Documentation/s390/s390dbf.rst for more details!
+ * stored in the s390dbf. See Documentation/arch/s390/s390dbf.rst for more details!
*/
extern debug_entry_t *
__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 902e0330dd91..bed804137537 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -36,6 +36,7 @@ enum diag_stat_enum {
DIAG_STAT_X304,
DIAG_STAT_X308,
DIAG_STAT_X318,
+ DIAG_STAT_X320,
DIAG_STAT_X500,
NR_DIAG_STAT
};
@@ -108,6 +109,8 @@ enum diag204_sc {
DIAG204_SUBC_STIB7 = 7
};
+#define DIAG204_SUBCODE_MASK 0xffff
+
/* The two available diag 204 data formats */
enum diag204_format {
DIAG204_INFO_SIMPLE = 0,
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index e5c5cb1207e2..5a82b08f03cd 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -54,6 +54,23 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *
return NULL;
}
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+struct fgraph_ret_regs {
+ unsigned long gpr2;
+ unsigned long fp;
+};
+
+static __always_inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
+{
+ return ret_regs->gpr2;
+}
+
+static __always_inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
+{
+ return ret_regs->fp;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
static __always_inline unsigned long
ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs)
{
diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h
index d55ba878378b..e47fd8cbe701 100644
--- a/arch/s390/include/asm/kfence.h
+++ b/arch/s390/include/asm/kfence.h
@@ -35,7 +35,7 @@ static __always_inline void kfence_split_mapping(void)
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
- __kernel_map_pages(virt_to_page(addr), 1, !protect);
+ __kernel_map_pages(virt_to_page((void *)addr), 1, !protect);
return true;
}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 2bbc3d54959d..91bfecb91321 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -1028,6 +1028,9 @@ static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa)
extern char sie_exit;
+bool kvm_s390_pv_is_protected(struct kvm *kvm);
+bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu);
+
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
diff --git a/arch/s390/include/asm/maccess.h b/arch/s390/include/asm/maccess.h
index cfec3141fdba..50225940d971 100644
--- a/arch/s390/include/asm/maccess.h
+++ b/arch/s390/include/asm/maccess.h
@@ -4,6 +4,9 @@
#include <linux/types.h>
+#define MEMCPY_REAL_SIZE PAGE_SIZE
+#define MEMCPY_REAL_MASK PAGE_MASK
+
struct iov_iter;
extern unsigned long __memcpy_real_area;
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index a9c138fcd2ad..cfec0743314e 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -191,8 +191,16 @@ int arch_make_page_accessible(struct page *page);
#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
-#define pfn_to_virt(pfn) __va(pfn_to_phys(pfn))
-#define virt_to_pfn(kaddr) (phys_to_pfn(__pa(kaddr)))
+static inline void *pfn_to_virt(unsigned long pfn)
+{
+ return __va(pfn_to_phys(pfn));
+}
+
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return phys_to_pfn(__pa(kaddr));
+}
+
#define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
diff --git a/arch/s390/include/asm/pfault.h b/arch/s390/include/asm/pfault.h
new file mode 100644
index 000000000000..a1bee4a1e470
--- /dev/null
+++ b/arch/s390/include/asm/pfault.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 1999, 2023
+ */
+#ifndef _ASM_S390_PFAULT_H
+#define _ASM_S390_PFAULT_H
+
+#include <linux/errno.h>
+
+int __pfault_init(void);
+void __pfault_fini(void);
+
+static inline int pfault_init(void)
+{
+ if (IS_ENABLED(CONFIG_PFAULT))
+ return __pfault_init();
+ return -EOPNOTSUPP;
+}
+
+static inline void pfault_fini(void)
+{
+ if (IS_ENABLED(CONFIG_PFAULT))
+ __pfault_fini();
+}
+
+#endif /* _ASM_S390_PFAULT_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index c55f3c3365af..30909fe27c24 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -89,8 +89,6 @@ extern unsigned long __bootdata_preserved(VMALLOC_END);
extern struct page *__bootdata_preserved(vmemmap);
extern unsigned long __bootdata_preserved(vmemmap_size);
-#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
-
extern unsigned long __bootdata_preserved(MODULES_VADDR);
extern unsigned long __bootdata_preserved(MODULES_END);
#define MODULES_VADDR MODULES_VADDR
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index dac7da88f61f..5742d23bba13 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -86,6 +86,7 @@ struct sclp_info {
unsigned char has_kss : 1;
unsigned char has_gisaf : 1;
unsigned char has_diag318 : 1;
+ unsigned char has_diag320 : 1;
unsigned char has_sipl : 1;
unsigned char has_sipl_eckd : 1;
unsigned char has_dirq : 1;
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index f191255c60db..b30fe91166e3 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -74,6 +74,7 @@ extern unsigned int zlib_dfltcc_support;
extern int noexec_disabled;
extern unsigned long ident_map_size;
+extern unsigned long max_mappable;
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
extern unsigned long mio_wb_bit_mask;
@@ -117,14 +118,6 @@ extern unsigned int console_irq;
#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
-#ifdef CONFIG_PFAULT
-extern int pfault_init(void);
-extern void pfault_fini(void);
-#else /* CONFIG_PFAULT */
-#define pfault_init() ({-1;})
-#define pfault_fini() do { } while (0)
-#endif /* CONFIG_PFAULT */
-
#ifdef CONFIG_VMCP
void vmcp_cma_reserve(void);
#else
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index d6bb2f4f78d1..d2cd42bb2c26 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -463,6 +463,7 @@ static inline int is_prot_virt_host(void)
return prot_virt_host;
}
+int uv_pin_shared(unsigned long paddr);
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
int uv_destroy_owned_page(unsigned long paddr);
@@ -475,6 +476,11 @@ void setup_uv(void);
#define is_prot_virt_host() 0
static inline void setup_uv(void) {}
+static inline int uv_pin_shared(unsigned long paddr)
+{
+ return 0;
+}
+
static inline int uv_destroy_owned_page(unsigned long paddr)
{
return 0;
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
index 5faf0a1d2c16..5ad76471e73f 100644
--- a/arch/s390/include/uapi/asm/pkey.h
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -26,7 +26,7 @@
#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
#define MAXAESCIPHERKEYSIZE 136 /* our aes cipher keys have always 136 bytes */
#define MINEP11AESKEYBLOBSIZE 256 /* min EP11 AES key blob size */
-#define MAXEP11AESKEYBLOBSIZE 320 /* max EP11 AES key blob size */
+#define MAXEP11AESKEYBLOBSIZE 336 /* max EP11 AES key blob size */
/* Minimum size of a key blob */
#define MINKEYBLOBSIZE SECKEYBLOBSIZE
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 6b2a051e1f8a..0df2b88cc0da 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -37,9 +37,9 @@ CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
obj-y := head64.o traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o cpufeature.o
-obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o
+obj-y += sysinfo.o lgr.o os_info.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
-obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
+obj-y += entry.o reipl.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o
@@ -63,12 +63,13 @@ obj-$(CONFIG_RETHOOK) += rethook.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
-
+obj-$(CONFIG_CERT_STORE) += cert_store.o
obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 81cf72088041..fa5f6885c74a 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/purgatory.h>
#include <linux/pgtable.h>
+#include <linux/ftrace.h>
#include <asm/idle.h>
#include <asm/gmap.h>
#include <asm/stacktrace.h>
@@ -177,5 +178,13 @@ int main(void)
DEFINE(OLDMEM_SIZE, PARMAREA + offsetof(struct parmarea, oldmem_size));
DEFINE(COMMAND_LINE, PARMAREA + offsetof(struct parmarea, command_line));
DEFINE(MAX_COMMAND_LINE_SIZE, PARMAREA + offsetof(struct parmarea, max_command_line_size));
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* function graph return value tracing */
+ OFFSET(__FGRAPH_RET_GPR2, fgraph_ret_regs, gpr2);
+ OFFSET(__FGRAPH_RET_FP, fgraph_ret_regs, fp);
+ DEFINE(__FGRAPH_RET_SIZE, sizeof(struct fgraph_ret_regs));
+#endif
+ OFFSET(__FTRACE_REGS_PT_REGS, ftrace_regs, regs);
+ DEFINE(__FTRACE_REGS_SIZE, sizeof(struct ftrace_regs));
return 0;
}
diff --git a/arch/s390/kernel/cert_store.c b/arch/s390/kernel/cert_store.c
new file mode 100644
index 000000000000..3986a044eb36
--- /dev/null
+++ b/arch/s390/kernel/cert_store.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DIAG 0x320 support and certificate store handling
+ *
+ * Copyright IBM Corp. 2023
+ * Author(s): Anastasia Eskova <anastasia.eskova@ibm.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/key-type.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <crypto/sha2.h>
+#include <keys/user-type.h>
+#include <asm/debug.h>
+#include <asm/diag.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+
+#define DIAG_MAX_RETRIES 10
+
+#define VCE_FLAGS_VALID_MASK 0x80
+
+#define ISM_LEN_DWORDS 4
+#define VCSSB_LEN_BYTES 128
+#define VCSSB_LEN_NO_CERTS 4
+#define VCB_LEN_NO_CERTS 64
+#define VC_NAME_LEN_BYTES 64
+
+#define CERT_STORE_KEY_TYPE_NAME "cert_store_key"
+#define CERT_STORE_KEYRING_NAME "cert_store"
+
+static debug_info_t *cert_store_dbf;
+static debug_info_t *cert_store_hexdump;
+
+#define pr_dbf_msg(fmt, ...) \
+ debug_sprintf_event(cert_store_dbf, 3, fmt "\n", ## __VA_ARGS__)
+
+enum diag320_subcode {
+ DIAG320_SUBCODES = 0,
+ DIAG320_STORAGE = 1,
+ DIAG320_CERT_BLOCK = 2,
+};
+
+enum diag320_rc {
+ DIAG320_RC_OK = 0x0001,
+ DIAG320_RC_CS_NOMATCH = 0x0306,
+};
+
+/* Verification Certificates Store Support Block (VCSSB). */
+struct vcssb {
+ u32 vcssb_length;
+ u8 pad_0x04[3];
+ u8 version;
+ u8 pad_0x08[8];
+ u32 cs_token;
+ u8 pad_0x14[12];
+ u16 total_vc_index_count;
+ u16 max_vc_index_count;
+ u8 pad_0x24[28];
+ u32 max_vce_length;
+ u32 max_vcxe_length;
+ u8 pad_0x48[8];
+ u32 max_single_vcb_length;
+ u32 total_vcb_length;
+ u32 max_single_vcxb_length;
+ u32 total_vcxb_length;
+ u8 pad_0x60[32];
+} __packed __aligned(8);
+
+/* Verification Certificate Entry (VCE) Header. */
+struct vce_header {
+ u32 vce_length;
+ u8 flags;
+ u8 key_type;
+ u16 vc_index;
+ u8 vc_name[VC_NAME_LEN_BYTES]; /* EBCDIC */
+ u8 vc_format;
+ u8 pad_0x49;
+ u16 key_id_length;
+ u8 pad_0x4c;
+ u8 vc_hash_type;
+ u16 vc_hash_length;
+ u8 pad_0x50[4];
+ u32 vc_length;
+ u8 pad_0x58[8];
+ u16 vc_hash_offset;
+ u16 vc_offset;
+ u8 pad_0x64[28];
+} __packed __aligned(4);
+
+/* Verification Certificate Block (VCB) Header. */
+struct vcb_header {
+ u32 vcb_input_length;
+ u8 pad_0x04[4];
+ u16 first_vc_index;
+ u16 last_vc_index;
+ u32 pad_0x0c;
+ u32 cs_token;
+ u8 pad_0x14[12];
+ u32 vcb_output_length;
+ u8 pad_0x24[3];
+ u8 version;
+ u16 stored_vc_count;
+ u16 remaining_vc_count;
+ u8 pad_0x2c[20];
+} __packed __aligned(4);
+
+/* Verification Certificate Block (VCB). */
+struct vcb {
+ struct vcb_header vcb_hdr;
+ u8 vcb_buf[];
+} __packed __aligned(4);
+
+/* Verification Certificate Entry (VCE). */
+struct vce {
+ struct vce_header vce_hdr;
+ u8 cert_data_buf[];
+} __packed __aligned(4);
+
+static void cert_store_key_describe(const struct key *key, struct seq_file *m)
+{
+ char ascii[VC_NAME_LEN_BYTES + 1];
+
+ /*
+ * First 64 bytes of the key description is key name in EBCDIC CP 500.
+ * Convert it to ASCII for displaying in /proc/keys.
+ */
+ strscpy(ascii, key->description, sizeof(ascii));
+ EBCASC_500(ascii, VC_NAME_LEN_BYTES);
+ seq_puts(m, ascii);
+
+ seq_puts(m, &key->description[VC_NAME_LEN_BYTES]);
+ if (key_is_positive(key))
+ seq_printf(m, ": %u", key->datalen);
+}
+
+/*
+ * Certificate store key type takes over properties of
+ * user key but cannot be updated.
+ */
+static struct key_type key_type_cert_store_key = {
+ .name = CERT_STORE_KEY_TYPE_NAME,
+ .preparse = user_preparse,
+ .free_preparse = user_free_preparse,
+ .instantiate = generic_key_instantiate,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = cert_store_key_describe,
+ .read = user_read,
+};
+
+/* Logging functions. */
+static void pr_dbf_vcb(const struct vcb *b)
+{
+ pr_dbf_msg("VCB Header:");
+ pr_dbf_msg("vcb_input_length: %d", b->vcb_hdr.vcb_input_length);
+ pr_dbf_msg("first_vc_index: %d", b->vcb_hdr.first_vc_index);
+ pr_dbf_msg("last_vc_index: %d", b->vcb_hdr.last_vc_index);
+ pr_dbf_msg("cs_token: %d", b->vcb_hdr.cs_token);
+ pr_dbf_msg("vcb_output_length: %d", b->vcb_hdr.vcb_output_length);
+ pr_dbf_msg("version: %d", b->vcb_hdr.version);
+ pr_dbf_msg("stored_vc_count: %d", b->vcb_hdr.stored_vc_count);
+ pr_dbf_msg("remaining_vc_count: %d", b->vcb_hdr.remaining_vc_count);
+}
+
+static void pr_dbf_vce(const struct vce *e)
+{
+ unsigned char vc_name[VC_NAME_LEN_BYTES + 1];
+ char log_string[VC_NAME_LEN_BYTES + 40];
+
+ pr_dbf_msg("VCE Header:");
+ pr_dbf_msg("vce_hdr.vce_length: %d", e->vce_hdr.vce_length);
+ pr_dbf_msg("vce_hdr.flags: %d", e->vce_hdr.flags);
+ pr_dbf_msg("vce_hdr.key_type: %d", e->vce_hdr.key_type);
+ pr_dbf_msg("vce_hdr.vc_index: %d", e->vce_hdr.vc_index);
+ pr_dbf_msg("vce_hdr.vc_format: %d", e->vce_hdr.vc_format);
+ pr_dbf_msg("vce_hdr.key_id_length: %d", e->vce_hdr.key_id_length);
+ pr_dbf_msg("vce_hdr.vc_hash_type: %d", e->vce_hdr.vc_hash_type);
+ pr_dbf_msg("vce_hdr.vc_hash_length: %d", e->vce_hdr.vc_hash_length);
+ pr_dbf_msg("vce_hdr.vc_hash_offset: %d", e->vce_hdr.vc_hash_offset);
+ pr_dbf_msg("vce_hdr.vc_length: %d", e->vce_hdr.vc_length);
+ pr_dbf_msg("vce_hdr.vc_offset: %d", e->vce_hdr.vc_offset);
+
+ /* Certificate name in ASCII. */
+ memcpy(vc_name, e->vce_hdr.vc_name, VC_NAME_LEN_BYTES);
+ EBCASC_500(vc_name, VC_NAME_LEN_BYTES);
+ vc_name[VC_NAME_LEN_BYTES] = '\0';
+
+ snprintf(log_string, sizeof(log_string),
+ "index: %d vce_hdr.vc_name (ASCII): %s",
+ e->vce_hdr.vc_index, vc_name);
+ debug_text_event(cert_store_hexdump, 3, log_string);
+
+ /* Certificate data. */
+ debug_text_event(cert_store_hexdump, 3, "VCE: Certificate data start");
+ debug_event(cert_store_hexdump, 3, (u8 *)e->cert_data_buf, 128);
+ debug_text_event(cert_store_hexdump, 3, "VCE: Certificate data end");
+ debug_event(cert_store_hexdump, 3,
+ (u8 *)e->cert_data_buf + e->vce_hdr.vce_length - 128, 128);
+}
+
+static void pr_dbf_vcssb(const struct vcssb *s)
+{
+ debug_text_event(cert_store_hexdump, 3, "DIAG320 Subcode1");
+ debug_event(cert_store_hexdump, 3, (u8 *)s, VCSSB_LEN_BYTES);
+
+ pr_dbf_msg("VCSSB:");
+ pr_dbf_msg("vcssb_length: %u", s->vcssb_length);
+ pr_dbf_msg("version: %u", s->version);
+ pr_dbf_msg("cs_token: %u", s->cs_token);
+ pr_dbf_msg("total_vc_index_count: %u", s->total_vc_index_count);
+ pr_dbf_msg("max_vc_index_count: %u", s->max_vc_index_count);
+ pr_dbf_msg("max_vce_length: %u", s->max_vce_length);
+ pr_dbf_msg("max_vcxe_length: %u", s->max_vce_length);
+ pr_dbf_msg("max_single_vcb_length: %u", s->max_single_vcb_length);
+ pr_dbf_msg("total_vcb_length: %u", s->total_vcb_length);
+ pr_dbf_msg("max_single_vcxb_length: %u", s->max_single_vcxb_length);
+ pr_dbf_msg("total_vcxb_length: %u", s->total_vcxb_length);
+}
+
+static int __diag320(unsigned long subcode, void *addr)
+{
+ union register_pair rp = { .even = (unsigned long)addr, };
+
+ asm volatile(
+ " diag %[rp],%[subcode],0x320\n"
+ "0: nopr %%r7\n"
+ EX_TABLE(0b, 0b)
+ : [rp] "+d" (rp.pair)
+ : [subcode] "d" (subcode)
+ : "cc", "memory");
+
+ return rp.odd;
+}
+
+static int diag320(unsigned long subcode, void *addr)
+{
+ diag_stat_inc(DIAG_STAT_X320);
+
+ return __diag320(subcode, addr);
+}
+
+/*
+ * Calculate SHA256 hash of the VCE certificate and compare it to hash stored in
+ * VCE. Return -EINVAL if hashes don't match.
+ */
+static int check_certificate_hash(const struct vce *vce)
+{
+ u8 hash[SHA256_DIGEST_SIZE];
+ u16 vc_hash_length;
+ u8 *vce_hash;
+
+ vce_hash = (u8 *)vce + vce->vce_hdr.vc_hash_offset;
+ vc_hash_length = vce->vce_hdr.vc_hash_length;
+ sha256((u8 *)vce + vce->vce_hdr.vc_offset, vce->vce_hdr.vc_length, hash);
+ if (memcmp(vce_hash, hash, vc_hash_length) == 0)
+ return 0;
+
+ pr_dbf_msg("SHA256 hash of received certificate does not match");
+ debug_text_event(cert_store_hexdump, 3, "VCE hash:");
+ debug_event(cert_store_hexdump, 3, vce_hash, SHA256_DIGEST_SIZE);
+ debug_text_event(cert_store_hexdump, 3, "Calculated hash:");
+ debug_event(cert_store_hexdump, 3, hash, SHA256_DIGEST_SIZE);
+
+ return -EINVAL;
+}
+
+static int check_certificate_valid(const struct vce *vce)
+{
+ if (!(vce->vce_hdr.flags & VCE_FLAGS_VALID_MASK)) {
+ pr_dbf_msg("Certificate entry is invalid");
+ return -EINVAL;
+ }
+ if (vce->vce_hdr.vc_format != 1) {
+ pr_dbf_msg("Certificate format is not supported");
+ return -EINVAL;
+ }
+ if (vce->vce_hdr.vc_hash_type != 1) {
+ pr_dbf_msg("Hash type is not supported");
+ return -EINVAL;
+ }
+
+ return check_certificate_hash(vce);
+}
+
+static struct key *get_user_session_keyring(void)
+{
+ key_ref_t us_keyring_ref;
+
+ us_keyring_ref = lookup_user_key(KEY_SPEC_USER_SESSION_KEYRING,
+ KEY_LOOKUP_CREATE, KEY_NEED_LINK);
+ if (IS_ERR(us_keyring_ref)) {
+ pr_dbf_msg("Couldn't get user session keyring: %ld",
+ PTR_ERR(us_keyring_ref));
+ return ERR_PTR(-ENOKEY);
+ }
+ key_ref_put(us_keyring_ref);
+ return key_ref_to_ptr(us_keyring_ref);
+}
+
+/* Invalidate all keys from cert_store keyring. */
+static int invalidate_keyring_keys(struct key *keyring)
+{
+ unsigned long num_keys, key_index;
+ size_t keyring_payload_len;
+ key_serial_t *key_array;
+ struct key *current_key;
+ int rc;
+
+ keyring_payload_len = key_type_keyring.read(keyring, NULL, 0);
+ num_keys = keyring_payload_len / sizeof(key_serial_t);
+ key_array = kcalloc(num_keys, sizeof(key_serial_t), GFP_KERNEL);
+ if (!key_array)
+ return -ENOMEM;
+
+ rc = key_type_keyring.read(keyring, (char *)key_array, keyring_payload_len);
+ if (rc != keyring_payload_len) {
+ pr_dbf_msg("Couldn't read keyring payload");
+ goto out;
+ }
+
+ for (key_index = 0; key_index < num_keys; key_index++) {
+ current_key = key_lookup(key_array[key_index]);
+ pr_dbf_msg("Invalidating key %08x", current_key->serial);
+
+ key_invalidate(current_key);
+ key_put(current_key);
+ rc = key_unlink(keyring, current_key);
+ if (rc) {
+ pr_dbf_msg("Couldn't unlink key %08x: %d", current_key->serial, rc);
+ break;
+ }
+ }
+out:
+ kfree(key_array);
+ return rc;
+}
+
+static struct key *find_cs_keyring(void)
+{
+ key_ref_t cs_keyring_ref;
+ struct key *cs_keyring;
+
+ cs_keyring_ref = keyring_search(make_key_ref(get_user_session_keyring(), true),
+ &key_type_keyring, CERT_STORE_KEYRING_NAME,
+ false);
+ if (!IS_ERR(cs_keyring_ref)) {
+ cs_keyring = key_ref_to_ptr(cs_keyring_ref);
+ key_ref_put(cs_keyring_ref);
+ goto found;
+ }
+ /* Search default locations: thread, process, session keyrings */
+ cs_keyring = request_key(&key_type_keyring, CERT_STORE_KEYRING_NAME, NULL);
+ if (IS_ERR(cs_keyring))
+ return NULL;
+ key_put(cs_keyring);
+found:
+ return cs_keyring;
+}
+
+static void cleanup_cs_keys(void)
+{
+ struct key *cs_keyring;
+
+ cs_keyring = find_cs_keyring();
+ if (!cs_keyring)
+ return;
+
+ pr_dbf_msg("Found cert_store keyring. Purging...");
+ /*
+ * Remove cert_store_key_type in case invalidation
+ * of old cert_store keys failed (= severe error).
+ */
+ if (invalidate_keyring_keys(cs_keyring))
+ unregister_key_type(&key_type_cert_store_key);
+
+ keyring_clear(cs_keyring);
+ key_invalidate(cs_keyring);
+ key_put(cs_keyring);
+ key_unlink(get_user_session_keyring(), cs_keyring);
+}
+
+static struct key *create_cs_keyring(void)
+{
+ static struct key *cs_keyring;
+
+ /* Cleanup previous cs_keyring and all associated keys if any. */
+ cleanup_cs_keys();
+ cs_keyring = keyring_alloc(CERT_STORE_KEYRING_NAME, GLOBAL_ROOT_UID,
+ GLOBAL_ROOT_GID, current_cred(),
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ,
+ KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_SET_KEEP,
+ NULL, get_user_session_keyring());
+ if (IS_ERR(cs_keyring)) {
+ pr_dbf_msg("Can't allocate cert_store keyring");
+ return NULL;
+ }
+
+ pr_dbf_msg("Successfully allocated cert_store keyring: %08x", cs_keyring->serial);
+
+ /*
+ * In case a previous clean-up ran into an
+ * error and unregistered key type.
+ */
+ register_key_type(&key_type_cert_store_key);
+
+ return cs_keyring;
+}
+
+/*
+ * Allocate memory and create key description in format
+ * [key name in EBCDIC]:[VCE index]:[CS token].
+ * Return a pointer to key description or NULL if memory
+ * allocation failed. Memory should be freed by caller.
+ */
+static char *get_key_description(struct vcssb *vcssb, const struct vce *vce)
+{
+ size_t len, name_len;
+ u32 cs_token;
+ char *desc;
+
+ cs_token = vcssb->cs_token;
+ /* Description string contains "%64s:%04u:%08u\0". */
+ name_len = sizeof(vce->vce_hdr.vc_name);
+ len = name_len + 1 + 4 + 1 + 8 + 1;
+ desc = kmalloc(len, GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ memcpy(desc, vce->vce_hdr.vc_name, name_len);
+ sprintf(desc + name_len, ":%04u:%08u", vce->vce_hdr.vc_index, cs_token);
+
+ return desc;
+}
+
+/*
+ * Create a key of type "cert_store_key" using the data from VCE for key
+ * payload and key description. Link the key to "cert_store" keyring.
+ */
+static int create_key_from_vce(struct vcssb *vcssb, struct vce *vce,
+ struct key *keyring)
+{
+ key_ref_t newkey;
+ char *desc;
+ int rc;
+
+ desc = get_key_description(vcssb, vce);
+ if (!desc)
+ return -ENOMEM;
+
+ newkey = key_create_or_update(
+ make_key_ref(keyring, true), CERT_STORE_KEY_TYPE_NAME,
+ desc, (u8 *)vce + vce->vce_hdr.vc_offset,
+ vce->vce_hdr.vc_length,
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ,
+ KEY_ALLOC_NOT_IN_QUOTA);
+
+ rc = PTR_ERR_OR_ZERO(newkey);
+ if (rc) {
+ pr_dbf_msg("Couldn't create a key from Certificate Entry (%d)", rc);
+ rc = -ENOKEY;
+ goto out;
+ }
+
+ key_ref_put(newkey);
+out:
+ kfree(desc);
+ return rc;
+}
+
+/* Get Verification Certificate Storage Size block with DIAG320 subcode2. */
+static int get_vcssb(struct vcssb *vcssb)
+{
+ int diag320_rc;
+
+ memset(vcssb, 0, sizeof(*vcssb));
+ vcssb->vcssb_length = VCSSB_LEN_BYTES;
+ diag320_rc = diag320(DIAG320_STORAGE, vcssb);
+ pr_dbf_vcssb(vcssb);
+
+ if (diag320_rc != DIAG320_RC_OK) {
+ pr_dbf_msg("Diag 320 Subcode 1 returned bad RC: %04x", diag320_rc);
+ return -EIO;
+ }
+ if (vcssb->vcssb_length == VCSSB_LEN_NO_CERTS) {
+ pr_dbf_msg("No certificates available for current configuration");
+ return -ENOKEY;
+ }
+
+ return 0;
+}
+
+static u32 get_4k_mult_vcb_size(struct vcssb *vcssb)
+{
+ return round_up(vcssb->max_single_vcb_length, PAGE_SIZE);
+}
+
+/* Fill input fields of single-entry VCB that will be read by LPAR. */
+static void fill_vcb_input(struct vcssb *vcssb, struct vcb *vcb, u16 index)
+{
+ memset(vcb, 0, sizeof(*vcb));
+ vcb->vcb_hdr.vcb_input_length = get_4k_mult_vcb_size(vcssb);
+ vcb->vcb_hdr.cs_token = vcssb->cs_token;
+
+ /* Request single entry. */
+ vcb->vcb_hdr.first_vc_index = index;
+ vcb->vcb_hdr.last_vc_index = index;
+}
+
+static void extract_vce_from_sevcb(struct vcb *vcb, struct vce *vce)
+{
+ struct vce *extracted_vce;
+
+ extracted_vce = (struct vce *)vcb->vcb_buf;
+ memcpy(vce, vcb->vcb_buf, extracted_vce->vce_hdr.vce_length);
+ pr_dbf_vce(vce);
+}
+
+static int get_sevcb(struct vcssb *vcssb, u16 index, struct vcb *vcb)
+{
+ int rc, diag320_rc;
+
+ fill_vcb_input(vcssb, vcb, index);
+
+ diag320_rc = diag320(DIAG320_CERT_BLOCK, vcb);
+ pr_dbf_msg("Diag 320 Subcode2 RC %2x", diag320_rc);
+ pr_dbf_vcb(vcb);
+
+ switch (diag320_rc) {
+ case DIAG320_RC_OK:
+ rc = 0;
+ if (vcb->vcb_hdr.vcb_output_length == VCB_LEN_NO_CERTS) {
+ pr_dbf_msg("No certificate entry for index %u", index);
+ rc = -ENOKEY;
+ } else if (vcb->vcb_hdr.remaining_vc_count != 0) {
+ /* Retry on insufficient space. */
+ pr_dbf_msg("Couldn't get all requested certificates");
+ rc = -EAGAIN;
+ }
+ break;
+ case DIAG320_RC_CS_NOMATCH:
+ pr_dbf_msg("Certificate Store token mismatch");
+ rc = -EAGAIN;
+ break;
+ default:
+ pr_dbf_msg("Diag 320 Subcode2 returned bad rc (0x%4x)", diag320_rc);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * Allocate memory for single-entry VCB, get VCB via DIAG320 subcode 2 call,
+ * extract VCE and create a key from its' certificate.
+ */
+static int create_key_from_sevcb(struct vcssb *vcssb, u16 index,
+ struct key *keyring)
+{
+ struct vcb *vcb;
+ struct vce *vce;
+ int rc;
+
+ rc = -ENOMEM;
+ vcb = vmalloc(get_4k_mult_vcb_size(vcssb));
+ vce = vmalloc(vcssb->max_single_vcb_length - sizeof(vcb->vcb_hdr));
+ if (!vcb || !vce)
+ goto out;
+
+ rc = get_sevcb(vcssb, index, vcb);
+ if (rc)
+ goto out;
+
+ extract_vce_from_sevcb(vcb, vce);
+ rc = check_certificate_valid(vce);
+ if (rc)
+ goto out;
+
+ rc = create_key_from_vce(vcssb, vce, keyring);
+ if (rc)
+ goto out;
+
+ pr_dbf_msg("Successfully created key from Certificate Entry %d", index);
+out:
+ vfree(vce);
+ vfree(vcb);
+ return rc;
+}
+
+/*
+ * Request a single-entry VCB for each VCE available for the partition.
+ * Create a key from it and link it to cert_store keyring. If no keys
+ * could be created (i.e. VCEs were invalid) return -ENOKEY.
+ */
+static int add_certificates_to_keyring(struct vcssb *vcssb, struct key *keyring)
+{
+ int rc, index, count, added;
+
+ count = 0;
+ added = 0;
+ /* Certificate Store entries indices start with 1 and have no gaps. */
+ for (index = 1; index < vcssb->total_vc_index_count + 1; index++) {
+ pr_dbf_msg("Creating key from VCE %u", index);
+ rc = create_key_from_sevcb(vcssb, index, keyring);
+ count++;
+
+ if (rc == -EAGAIN)
+ return rc;
+
+ if (rc)
+ pr_dbf_msg("Creating key from VCE %u failed (%d)", index, rc);
+ else
+ added++;
+ }
+
+ if (added == 0) {
+ pr_dbf_msg("Processed %d entries. No keys created", count);
+ return -ENOKEY;
+ }
+
+ pr_info("Added %d of %d keys to cert_store keyring", added, count);
+
+ /*
+ * Do not allow to link more keys to certificate store keyring after all
+ * the VCEs were processed.
+ */
+ rc = keyring_restrict(make_key_ref(keyring, true), NULL, NULL);
+ if (rc)
+ pr_dbf_msg("Failed to set restriction to cert_store keyring (%d)", rc);
+
+ return 0;
+}
+
+/*
+ * Check which DIAG320 subcodes are installed.
+ * Return -ENOENT if subcodes 1 or 2 are not available.
+ */
+static int query_diag320_subcodes(void)
+{
+ unsigned long ism[ISM_LEN_DWORDS];
+ int rc;
+
+ rc = diag320(0, ism);
+ if (rc != DIAG320_RC_OK) {
+ pr_dbf_msg("DIAG320 subcode query returned %04x", rc);
+ return -ENOENT;
+ }
+
+ debug_text_event(cert_store_hexdump, 3, "DIAG320 Subcode 0");
+ debug_event(cert_store_hexdump, 3, ism, sizeof(ism));
+
+ if (!test_bit_inv(1, ism) || !test_bit_inv(2, ism)) {
+ pr_dbf_msg("Not all required DIAG320 subcodes are installed");
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/*
+ * Check if Certificate Store is supported by the firmware and DIAG320 subcodes
+ * 1 and 2 are installed. Create cert_store keyring and link all certificates
+ * available for the current partition to it as "cert_store_key" type
+ * keys. On refresh or error invalidate cert_store keyring and destroy
+ * all keys of "cert_store_key" type.
+ */
+static int fill_cs_keyring(void)
+{
+ struct key *cs_keyring;
+ struct vcssb *vcssb;
+ int rc;
+
+ rc = -ENOMEM;
+ vcssb = kmalloc(VCSSB_LEN_BYTES, GFP_KERNEL);
+ if (!vcssb)
+ goto cleanup_keys;
+
+ rc = -ENOENT;
+ if (!sclp.has_diag320) {
+ pr_dbf_msg("Certificate Store is not supported");
+ goto cleanup_keys;
+ }
+
+ rc = query_diag320_subcodes();
+ if (rc)
+ goto cleanup_keys;
+
+ rc = get_vcssb(vcssb);
+ if (rc)
+ goto cleanup_keys;
+
+ rc = -ENOMEM;
+ cs_keyring = create_cs_keyring();
+ if (!cs_keyring)
+ goto cleanup_keys;
+
+ rc = add_certificates_to_keyring(vcssb, cs_keyring);
+ if (rc)
+ goto cleanup_cs_keyring;
+
+ goto out;
+
+cleanup_cs_keyring:
+ key_put(cs_keyring);
+cleanup_keys:
+ cleanup_cs_keys();
+out:
+ kfree(vcssb);
+ return rc;
+}
+
+static DEFINE_MUTEX(cs_refresh_lock);
+static int cs_status_val = -1;
+
+static ssize_t cs_status_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ if (cs_status_val == -1)
+ return sysfs_emit(buf, "uninitialized\n");
+ else if (cs_status_val == 0)
+ return sysfs_emit(buf, "ok\n");
+
+ return sysfs_emit(buf, "failed (%d)\n", cs_status_val);
+}
+
+static struct kobj_attribute cs_status_attr = __ATTR_RO(cs_status);
+
+static ssize_t refresh_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc, retries;
+
+ pr_dbf_msg("Refresh certificate store information requested");
+ rc = mutex_lock_interruptible(&cs_refresh_lock);
+ if (rc)
+ return rc;
+
+ for (retries = 0; retries < DIAG_MAX_RETRIES; retries++) {
+ /* Request certificates from certificate store. */
+ rc = fill_cs_keyring();
+ if (rc)
+ pr_dbf_msg("Failed to refresh certificate store information (%d)", rc);
+ if (rc != -EAGAIN)
+ break;
+ }
+ cs_status_val = rc;
+ mutex_unlock(&cs_refresh_lock);
+
+ return rc ?: count;
+}
+
+static struct kobj_attribute refresh_attr = __ATTR_WO(refresh);
+
+static const struct attribute *cert_store_attrs[] __initconst = {
+ &cs_status_attr.attr,
+ &refresh_attr.attr,
+ NULL,
+};
+
+static struct kobject *cert_store_kobj;
+
+static int __init cert_store_init(void)
+{
+ int rc = -ENOMEM;
+
+ cert_store_dbf = debug_register("cert_store_msg", 10, 1, 64);
+ if (!cert_store_dbf)
+ goto cleanup_dbf;
+
+ cert_store_hexdump = debug_register("cert_store_hexdump", 3, 1, 128);
+ if (!cert_store_hexdump)
+ goto cleanup_dbf;
+
+ debug_register_view(cert_store_hexdump, &debug_hex_ascii_view);
+ debug_register_view(cert_store_dbf, &debug_sprintf_view);
+
+ /* Create directory /sys/firmware/cert_store. */
+ cert_store_kobj = kobject_create_and_add("cert_store", firmware_kobj);
+ if (!cert_store_kobj)
+ goto cleanup_dbf;
+
+ rc = sysfs_create_files(cert_store_kobj, cert_store_attrs);
+ if (rc)
+ goto cleanup_kobj;
+
+ register_key_type(&key_type_cert_store_key);
+
+ return rc;
+
+cleanup_kobj:
+ kobject_put(cert_store_kobj);
+cleanup_dbf:
+ debug_unregister(cert_store_dbf);
+ debug_unregister(cert_store_hexdump);
+
+ return rc;
+}
+device_initcall(cert_store_init);
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 82079f2d8583..f9f06cd8fcee 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -11,6 +11,7 @@
#include <linux/cpu.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
#include <asm/asm-extable.h>
#include <asm/diag.h>
#include <asm/trace/diag.h>
@@ -50,6 +51,7 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
[DIAG_STAT_X304] = { .code = 0x304, .name = "Partition-Resource Service" },
[DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" },
[DIAG_STAT_X318] = { .code = 0x318, .name = "CP Name and Version Codes" },
+ [DIAG_STAT_X320] = { .code = 0x320, .name = "Certificate Store" },
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
};
@@ -167,8 +169,29 @@ static inline int __diag204(unsigned long *subcode, unsigned long size, void *ad
return rp.odd;
}
+/**
+ * diag204() - Issue diagnose 204 call.
+ * @subcode: Subcode of diagnose 204 to be executed.
+ * @size: Size of area in pages which @area points to, if given.
+ * @addr: Vmalloc'ed memory area where the result is written to.
+ *
+ * Execute diagnose 204 with the given subcode and write the result to the
+ * memory area specified with @addr. For subcodes which do not write a
+ * result to memory both @size and @addr must be zero. If @addr is
+ * specified it must be page aligned and must have been allocated with
+ * vmalloc(). Conversion to real / physical addresses will be handled by
+ * this function if required.
+ */
int diag204(unsigned long subcode, unsigned long size, void *addr)
{
+ if (addr) {
+ if (WARN_ON_ONCE(!is_vmalloc_addr(addr)))
+ return -1;
+ if (WARN_ON_ONCE(!IS_ALIGNED((unsigned long)addr, PAGE_SIZE)))
+ return -1;
+ }
+ if ((subcode & DIAG204_SUBCODE_MASK) == DIAG204_SUBC_STIB4)
+ addr = (void *)pfn_to_phys(vmalloc_to_pfn(addr));
diag_stat_inc(DIAG_STAT_X204);
size = __diag204(&subcode, size, addr);
if (subcode)
@@ -200,7 +223,7 @@ int diag210(struct diag210 *addr)
EXPORT_SYMBOL(diag210);
/*
- * Diagnose 210: Get information about a virtual device
+ * Diagnose 8C: Access 3270 Display Device Information
*/
int diag8c(struct diag8c *addr, struct ccw_dev_id *devno)
{
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c
index 7f8246c9be08..0e51fa537262 100644
--- a/arch/s390/kernel/ebcdic.c
+++ b/arch/s390/kernel/ebcdic.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * ECBDIC -> ASCII, ASCII -> ECBDIC,
+ * EBCDIC -> ASCII, ASCII -> EBCDIC,
* upper to lower case (EBCDIC) conversion tables.
*
* S390 version
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index a660f4b6d654..49a11f6dd7ae 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -8,6 +8,7 @@
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm-extable.h>
@@ -26,7 +27,6 @@
#include <asm/vx-insn.h>
#include <asm/setup.h>
#include <asm/nmi.h>
-#include <asm/export.h>
#include <asm/nospec-insn.h>
_LPP_OFFSET = __LC_LPP
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 85a00d97a314..05e51666db03 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -266,7 +266,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
{ \
- strncpy(_value, buf, sizeof(_value) - 1); \
+ strscpy(_value, buf, sizeof(_value)); \
strim(_value); \
return len; \
} \
@@ -557,15 +557,12 @@ static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
static struct attribute *ipl_fcp_attrs[] = {
- &sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
&sys_ipl_fcp_wwpn_attr.attr,
&sys_ipl_fcp_lun_attr.attr,
&sys_ipl_fcp_bootprog_attr.attr,
&sys_ipl_fcp_br_lba_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
- &sys_ipl_secure_attr.attr,
- &sys_ipl_has_secure_attr.attr,
NULL,
};
@@ -575,14 +572,11 @@ static struct attribute_group ipl_fcp_attr_group = {
};
static struct attribute *ipl_nvme_attrs[] = {
- &sys_ipl_type_attr.attr,
&sys_ipl_nvme_fid_attr.attr,
&sys_ipl_nvme_nsid_attr.attr,
&sys_ipl_nvme_bootprog_attr.attr,
&sys_ipl_nvme_br_lba_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
- &sys_ipl_secure_attr.attr,
- &sys_ipl_has_secure_attr.attr,
NULL,
};
@@ -592,13 +586,10 @@ static struct attribute_group ipl_nvme_attr_group = {
};
static struct attribute *ipl_eckd_attrs[] = {
- &sys_ipl_type_attr.attr,
&sys_ipl_eckd_bootprog_attr.attr,
&sys_ipl_eckd_br_chr_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_device_attr.attr,
- &sys_ipl_secure_attr.attr,
- &sys_ipl_has_secure_attr.attr,
NULL,
};
@@ -610,21 +601,15 @@ static struct attribute_group ipl_eckd_attr_group = {
/* CCW ipl device attributes */
static struct attribute *ipl_ccw_attrs_vm[] = {
- &sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_vm_parm_attr.attr,
- &sys_ipl_secure_attr.attr,
- &sys_ipl_has_secure_attr.attr,
NULL,
};
static struct attribute *ipl_ccw_attrs_lpar[] = {
- &sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
- &sys_ipl_secure_attr.attr,
- &sys_ipl_has_secure_attr.attr,
NULL,
};
@@ -636,15 +621,15 @@ static struct attribute_group ipl_ccw_attr_group_lpar = {
.attrs = ipl_ccw_attrs_lpar
};
-/* UNKNOWN ipl device attributes */
-
-static struct attribute *ipl_unknown_attrs[] = {
+static struct attribute *ipl_common_attrs[] = {
&sys_ipl_type_attr.attr,
+ &sys_ipl_secure_attr.attr,
+ &sys_ipl_has_secure_attr.attr,
NULL,
};
-static struct attribute_group ipl_unknown_attr_group = {
- .attrs = ipl_unknown_attrs,
+static struct attribute_group ipl_common_attr_group = {
+ .attrs = ipl_common_attrs,
};
static struct kset *ipl_kset;
@@ -668,6 +653,9 @@ static int __init ipl_init(void)
rc = -ENOMEM;
goto out;
}
+ rc = sysfs_create_group(&ipl_kset->kobj, &ipl_common_attr_group);
+ if (rc)
+ goto out;
switch (ipl_info.type) {
case IPL_TYPE_CCW:
if (MACHINE_IS_VM)
@@ -689,8 +677,6 @@ static int __init ipl_init(void)
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nvme_attr_group);
break;
default:
- rc = sysfs_create_group(&ipl_kset->kobj,
- &ipl_unknown_attr_group);
break;
}
out:
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 6d9276c096a6..12a2bd4fc88c 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -13,6 +13,7 @@
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/debug_locks.h>
+#include <asm/pfault.h>
#include <asm/cio.h>
#include <asm/setup.h>
#include <asm/smp.h>
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
index 2df94d32140c..8d207b82d9fe 100644
--- a/arch/s390/kernel/machine_kexec_file.c
+++ b/arch/s390/kernel/machine_kexec_file.c
@@ -188,7 +188,7 @@ static int kexec_file_add_ipl_report(struct kimage *image,
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
buf.mem = data->memsz;
- ptr = (void *)ipl_cert_list_addr;
+ ptr = __va(ipl_cert_list_addr);
end = ptr + ipl_cert_list_size;
ncerts = 0;
while (ptr < end) {
@@ -200,7 +200,7 @@ static int kexec_file_add_ipl_report(struct kimage *image,
addr = data->memsz + data->report->size;
addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
- ptr = (void *)ipl_cert_list_addr;
+ ptr = __va(ipl_cert_list_addr);
while (ptr < end) {
len = *(unsigned int *)ptr;
ptr += sizeof(len);
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index dbece2803c50..ae4d4fd9afcd 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -9,15 +9,20 @@
#include <asm/ftrace.h>
#include <asm/nospec-insn.h>
#include <asm/ptrace.h>
-#include <asm/export.h>
+#define STACK_FRAME_SIZE_PTREGS (STACK_FRAME_OVERHEAD + __PT_SIZE)
+#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
+#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
+#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
+
+#define STACK_FRAME_SIZE_FREGS (STACK_FRAME_OVERHEAD + __FTRACE_REGS_SIZE)
+#define STACK_FREGS (STACK_FRAME_OVERHEAD)
+#define STACK_FREGS_PTREGS (STACK_FRAME_OVERHEAD + __FTRACE_REGS_PT_REGS)
+#define STACK_FREGS_PTREGS_GPRS (STACK_FREGS_PTREGS + __PT_GPRS)
+#define STACK_FREGS_PTREGS_PSW (STACK_FREGS_PTREGS + __PT_PSW)
+#define STACK_FREGS_PTREGS_ORIG_GPR2 (STACK_FREGS_PTREGS + __PT_ORIG_GPR2)
+#define STACK_FREGS_PTREGS_FLAGS (STACK_FREGS_PTREGS + __PT_FLAGS)
-#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
-#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
-#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
-#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
-#define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2)
-#define STACK_PTREGS_FLAGS (STACK_PTREGS + __PT_FLAGS)
/* packed stack: allocate just enough for r14, r15 and backchain */
#define TRACED_FUNC_FRAME_SIZE 24
@@ -53,23 +58,23 @@ SYM_CODE_END(ftrace_stub_direct_tramp)
stg %r1,__SF_BACKCHAIN(%r15)
stg %r0,(__SF_GPRS+8*8)(%r15)
stg %r15,(__SF_GPRS+9*8)(%r15)
- # allocate pt_regs and stack frame for ftrace_trace_function
- aghi %r15,-STACK_FRAME_SIZE
- stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
- xc STACK_PTREGS_ORIG_GPR2(8,%r15),STACK_PTREGS_ORIG_GPR2(%r15)
+ # allocate ftrace_regs and stack frame for ftrace_trace_function
+ aghi %r15,-STACK_FRAME_SIZE_FREGS
+ stg %r1,(STACK_FREGS_PTREGS_GPRS+15*8)(%r15)
+ xc STACK_FREGS_PTREGS_ORIG_GPR2(8,%r15),STACK_FREGS_PTREGS_ORIG_GPR2(%r15)
.if \allregs == 1
- stg %r14,(STACK_PTREGS_PSW)(%r15)
- mvghi STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS
+ stg %r14,(STACK_FREGS_PTREGS_PSW)(%r15)
+ mvghi STACK_FREGS_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS
.else
- xc STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15)
+ xc STACK_FREGS_PTREGS_FLAGS(8,%r15),STACK_FREGS_PTREGS_FLAGS(%r15)
.endif
lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address
aghi %r1,-TRACED_FUNC_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
- stg %r0,(STACK_PTREGS_PSW+8)(%r15)
- stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
+ stg %r0,(STACK_FREGS_PTREGS_PSW+8)(%r15)
+ stmg %r2,%r14,(STACK_FREGS_PTREGS_GPRS+2*8)(%r15)
.endm
SYM_CODE_START(ftrace_regs_caller)
@@ -96,30 +101,30 @@ SYM_CODE_START(ftrace_common)
lg %r1,0(%r1)
#endif
lgr %r3,%r14
- la %r5,STACK_PTREGS(%r15)
+ la %r5,STACK_FREGS(%r15)
BASR_EX %r14,%r1
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
# The j instruction gets runtime patched to a nop instruction.
# See ftrace_enable_ftrace_graph_caller.
SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL)
j .Lftrace_graph_caller_end
- lmg %r2,%r3,(STACK_PTREGS_GPRS+14*8)(%r15)
- lg %r4,(STACK_PTREGS_PSW+8)(%r15)
+ lmg %r2,%r3,(STACK_FREGS_PTREGS_GPRS+14*8)(%r15)
+ lg %r4,(STACK_FREGS_PTREGS_PSW+8)(%r15)
brasl %r14,prepare_ftrace_return
- stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
+ stg %r2,(STACK_FREGS_PTREGS_GPRS+14*8)(%r15)
.Lftrace_graph_caller_end:
#endif
- lg %r0,(STACK_PTREGS_PSW+8)(%r15)
+ lg %r0,(STACK_FREGS_PTREGS_PSW+8)(%r15)
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
- ltg %r1,STACK_PTREGS_ORIG_GPR2(%r15)
+ ltg %r1,STACK_FREGS_PTREGS_ORIG_GPR2(%r15)
locgrz %r1,%r0
#else
- lg %r1,STACK_PTREGS_ORIG_GPR2(%r15)
+ lg %r1,STACK_FREGS_PTREGS_ORIG_GPR2(%r15)
ltgr %r1,%r1
jnz 0f
lgr %r1,%r0
#endif
-0: lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
+0: lmg %r2,%r15,(STACK_FREGS_PTREGS_GPRS+2*8)(%r15)
BR_EX %r1
SYM_CODE_END(ftrace_common)
@@ -128,10 +133,14 @@ SYM_CODE_END(ftrace_common)
SYM_FUNC_START(return_to_handler)
stmg %r2,%r5,32(%r15)
lgr %r1,%r15
- aghi %r15,-STACK_FRAME_OVERHEAD
+ aghi %r15,-(STACK_FRAME_OVERHEAD+__FGRAPH_RET_SIZE)
stg %r1,__SF_BACKCHAIN(%r15)
+ la %r3,STACK_FRAME_OVERHEAD(%r15)
+ stg %r1,__FGRAPH_RET_FP(%r3)
+ stg %r2,__FGRAPH_RET_GPR2(%r3)
+ lgr %r2,%r3
brasl %r14,ftrace_return_to_handler
- aghi %r15,STACK_FRAME_OVERHEAD
+ aghi %r15,STACK_FRAME_OVERHEAD+__FGRAPH_RET_SIZE
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
BR_EX %r14
@@ -160,11 +169,11 @@ SYM_CODE_END(ftrace_shared_hotpatch_trampoline_exrl)
SYM_CODE_START(arch_rethook_trampoline)
stg %r14,(__SF_GPRS+8*8)(%r15)
- lay %r15,-STACK_FRAME_SIZE(%r15)
+ lay %r15,-STACK_FRAME_SIZE_PTREGS(%r15)
stmg %r0,%r14,STACK_PTREGS_GPRS(%r15)
# store original stack pointer in backchain and pt_regs
- lay %r7,STACK_FRAME_SIZE(%r15)
+ lay %r7,STACK_FRAME_SIZE_PTREGS(%r15)
stg %r7,__SF_BACKCHAIN(%r15)
stg %r7,STACK_PTREGS_GPRS+(15*8)(%r15)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 00d76448319d..c744104e4a9c 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -146,6 +146,7 @@ static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
int __bootdata(noexec_disabled);
+unsigned long __bootdata_preserved(max_mappable);
unsigned long __bootdata(ident_map_size);
struct physmem_info __bootdata(physmem_info);
@@ -874,7 +875,7 @@ static void __init log_component_list(void)
pr_info("Linux is running with Secure-IPL enabled\n");
else
pr_info("Linux is running with Secure-IPL disabled\n");
- ptr = (void *) early_ipl_comp_list_addr;
+ ptr = __va(early_ipl_comp_list_addr);
end = (void *) ptr + early_ipl_comp_list_size;
pr_info("The IPL report contains the following components:\n");
while (ptr < end) {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index f9a2b755f510..a4edb7ea66ea 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -37,6 +37,7 @@
#include <linux/crash_dump.h>
#include <linux/kprobes.h>
#include <asm/asm-offsets.h>
+#include <asm/pfault.h>
#include <asm/diag.h>
#include <asm/switch_to.h>
#include <asm/facility.h>
@@ -252,8 +253,9 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
{
- struct lowcore *lc = lowcore_ptr[cpu];
+ struct lowcore *lc, *abs_lc;
+ lc = lowcore_ptr[cpu];
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
lc->cpu_nr = cpu;
@@ -266,7 +268,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
lc->machine_flags = S390_lowcore.machine_flags;
lc->user_timer = lc->system_timer =
lc->steal_timer = lc->avg_steal_timer = 0;
- __ctl_store(lc->cregs_save_area, 0, 15);
+ abs_lc = get_abs_lowcore();
+ memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area));
+ put_abs_lowcore(abs_lc);
lc->cregs_save_area[1] = lc->kernel_asce;
lc->cregs_save_area[7] = lc->user_asce;
save_access_regs((unsigned int *) lc->access_regs_save_area);
@@ -606,8 +610,8 @@ void smp_ctl_set_clear_bit(int cr, int bit, bool set)
ctlreg = (ctlreg & parms.andval) | parms.orval;
abs_lc->cregs_save_area[cr] = ctlreg;
put_abs_lowcore(abs_lc);
- spin_unlock(&ctl_lock);
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
+ spin_unlock(&ctl_lock);
}
EXPORT_SYMBOL(smp_ctl_set_clear_bit);
@@ -927,12 +931,18 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
rc = pcpu_alloc_lowcore(pcpu, cpu);
if (rc)
return rc;
+ /*
+ * Make sure global control register contents do not change
+ * until new CPU has initialized control registers.
+ */
+ spin_lock(&ctl_lock);
pcpu_prepare_secondary(pcpu, cpu);
pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
/* Wait until cpu puts itself in the online & active maps */
while (!cpu_online(cpu))
cpu_relax();
+ spin_unlock(&ctl_lock);
return 0;
}
diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c
index 2ea7f208f0e7..30bb20461db4 100644
--- a/arch/s390/kernel/sthyi.c
+++ b/arch/s390/kernel/sthyi.c
@@ -317,7 +317,9 @@ static void fill_diag(struct sthyi_sctns *sctns)
if (pages <= 0)
return;
- diag204_buf = vmalloc(array_size(pages, PAGE_SIZE));
+ diag204_buf = __vmalloc_node(array_size(pages, PAGE_SIZE),
+ PAGE_SIZE, GFP_KERNEL, NUMA_NO_NODE,
+ __builtin_return_address(0));
if (!diag204_buf)
return;
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index a6935af2235c..0122cc156952 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -454,3 +454,4 @@
449 common futex_waitv sys_futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2 sys_fchmodat2
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 66f0eb1c872b..b771f1b4cdd1 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -88,7 +88,7 @@ fail:
* Requests the Ultravisor to pin the page in the shared state. This will
* cause an intercept when the guest attempts to unshare the pinned page.
*/
-static int uv_pin_shared(unsigned long paddr)
+int uv_pin_shared(unsigned long paddr)
{
struct uv_cb_cfs uvcb = {
.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
@@ -100,6 +100,7 @@ static int uv_pin_shared(unsigned long paddr)
return -EINVAL;
return 0;
}
+EXPORT_SYMBOL_GPL(uv_pin_shared);
/*
* Requests the Ultravisor to destroy a guest page and make it
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 0261d42c7d01..a7ea80cfa445 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -270,18 +270,6 @@ static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu)
return vcpu->arch.pv.handle;
}
-static inline bool kvm_s390_pv_is_protected(struct kvm *kvm)
-{
- lockdep_assert_held(&kvm->lock);
- return !!kvm_s390_pv_get_handle(kvm);
-}
-
-static inline bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
-{
- lockdep_assert_held(&vcpu->mutex);
- return !!kvm_s390_pv_cpu_get_handle(vcpu);
-}
-
/* implemented in interrupt.c */
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index bf1fdc7bf89e..8d3f39a8a11e 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -18,6 +18,20 @@
#include <linux/mmu_notifier.h>
#include "kvm-s390.h"
+bool kvm_s390_pv_is_protected(struct kvm *kvm)
+{
+ lockdep_assert_held(&kvm->lock);
+ return !!kvm_s390_pv_get_handle(kvm);
+}
+EXPORT_SYMBOL_GPL(kvm_s390_pv_is_protected);
+
+bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
+{
+ lockdep_assert_held(&vcpu->mutex);
+ return !!kvm_s390_pv_cpu_get_handle(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_s390_pv_cpu_is_protected);
+
/**
* struct pv_vm_to_be_destroyed - Represents a protected VM that needs to
* be destroyed
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index 5a9a55de2e10..08f60a42b9a6 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -5,8 +5,8 @@
* Copyright IBM Corp. 2012
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
#include <asm/nospec-insn.h>
GEN_BR_THUNK %r14
diff --git a/arch/s390/lib/tishift.S b/arch/s390/lib/tishift.S
index de33cf02cfd2..96214f51f49b 100644
--- a/arch/s390/lib/tishift.S
+++ b/arch/s390/lib/tishift.S
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/nospec-insn.h>
-#include <asm/export.h>
.section .noinstr.text, "ax"
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index d90db06a8af5..352ff520fd94 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o
obj-$(CONFIG_PGSTE) += gmap.o
+obj-$(CONFIG_PFAULT) += pfault.o
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 5300c6867d5e..f47515313226 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -90,7 +90,7 @@ static long cmm_alloc_pages(long nr, long *counter,
} else
free_page((unsigned long) npa);
}
- diag10_range(virt_to_pfn(addr), 1);
+ diag10_range(virt_to_pfn((void *)addr), 1);
pa->pages[pa->index++] = addr;
(*counter)++;
spin_unlock(&cmm_lock);
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index ba5f80268878..afa5db750d92 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -297,7 +297,7 @@ static int pt_dump_init(void)
address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore;
address_markers[ABS_LOWCORE_END_NR].start_address = __abs_lowcore + ABS_LOWCORE_MAP_SIZE;
address_markers[MEMCPY_REAL_NR].start_address = __memcpy_real_area;
- address_markers[MEMCPY_REAL_END_NR].start_address = __memcpy_real_area + PAGE_SIZE;
+ address_markers[MEMCPY_REAL_END_NR].start_address = __memcpy_real_area + MEMCPY_REAL_SIZE;
address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
address_markers[VMALLOC_NR].start_address = VMALLOC_START;
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 1bc42ce26599..e41869f5cc95 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -640,10 +640,13 @@ void segment_warning(int rc, char *seg_name)
pr_err("There is not enough memory to load or query "
"DCSS %s\n", seg_name);
break;
- case -ERANGE:
- pr_err("DCSS %s exceeds the kernel mapping range (%lu) "
- "and cannot be loaded\n", seg_name, VMEM_MAX_PHYS);
+ case -ERANGE: {
+ struct range mhp_range = arch_get_mappable_range();
+
+ pr_err("DCSS %s exceeds the kernel mapping range (%llu) "
+ "and cannot be loaded\n", seg_name, mhp_range.end + 1);
break;
+ }
default:
break;
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 2f123429a291..b5e1bea9194c 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -43,8 +43,6 @@
#include "../kernel/entry.h"
#define __FAIL_ADDR_MASK -4096L
-#define __SUBCODE_MASK 0x0600
-#define __PF_RES_FIELD 0x8000000000000000ULL
/*
* Allocate private vm_fault_reason from top. Please make sure it won't
@@ -583,232 +581,6 @@ void do_dat_exception(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_dat_exception);
-#ifdef CONFIG_PFAULT
-/*
- * 'pfault' pseudo page faults routines.
- */
-static int pfault_disable;
-
-static int __init nopfault(char *str)
-{
- pfault_disable = 1;
- return 1;
-}
-
-__setup("nopfault", nopfault);
-
-struct pfault_refbk {
- u16 refdiagc;
- u16 reffcode;
- u16 refdwlen;
- u16 refversn;
- u64 refgaddr;
- u64 refselmk;
- u64 refcmpmk;
- u64 reserved;
-} __attribute__ ((packed, aligned(8)));
-
-static struct pfault_refbk pfault_init_refbk = {
- .refdiagc = 0x258,
- .reffcode = 0,
- .refdwlen = 5,
- .refversn = 2,
- .refgaddr = __LC_LPP,
- .refselmk = 1ULL << 48,
- .refcmpmk = 1ULL << 48,
- .reserved = __PF_RES_FIELD
-};
-
-int pfault_init(void)
-{
- int rc;
-
- if (pfault_disable)
- return -1;
- diag_stat_inc(DIAG_STAT_X258);
- asm volatile(
- " diag %1,%0,0x258\n"
- "0: j 2f\n"
- "1: la %0,8\n"
- "2:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc)
- : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
- return rc;
-}
-
-static struct pfault_refbk pfault_fini_refbk = {
- .refdiagc = 0x258,
- .reffcode = 1,
- .refdwlen = 5,
- .refversn = 2,
-};
-
-void pfault_fini(void)
-{
-
- if (pfault_disable)
- return;
- diag_stat_inc(DIAG_STAT_X258);
- asm volatile(
- " diag %0,0,0x258\n"
- "0: nopr %%r7\n"
- EX_TABLE(0b,0b)
- : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
-}
-
-static DEFINE_SPINLOCK(pfault_lock);
-static LIST_HEAD(pfault_list);
-
-#define PF_COMPLETE 0x0080
-
-/*
- * The mechanism of our pfault code: if Linux is running as guest, runs a user
- * space process and the user space process accesses a page that the host has
- * paged out we get a pfault interrupt.
- *
- * This allows us, within the guest, to schedule a different process. Without
- * this mechanism the host would have to suspend the whole virtual cpu until
- * the page has been paged in.
- *
- * So when we get such an interrupt then we set the state of the current task
- * to uninterruptible and also set the need_resched flag. Both happens within
- * interrupt context(!). If we later on want to return to user space we
- * recognize the need_resched flag and then call schedule(). It's not very
- * obvious how this works...
- *
- * Of course we have a lot of additional fun with the completion interrupt (->
- * host signals that a page of a process has been paged in and the process can
- * continue to run). This interrupt can arrive on any cpu and, since we have
- * virtual cpus, actually appear before the interrupt that signals that a page
- * is missing.
- */
-static void pfault_interrupt(struct ext_code ext_code,
- unsigned int param32, unsigned long param64)
-{
- struct task_struct *tsk;
- __u16 subcode;
- pid_t pid;
-
- /*
- * Get the external interruption subcode & pfault initial/completion
- * signal bit. VM stores this in the 'cpu address' field associated
- * with the external interrupt.
- */
- subcode = ext_code.subcode;
- if ((subcode & 0xff00) != __SUBCODE_MASK)
- return;
- inc_irq_stat(IRQEXT_PFL);
- /* Get the token (= pid of the affected task). */
- pid = param64 & LPP_PID_MASK;
- rcu_read_lock();
- tsk = find_task_by_pid_ns(pid, &init_pid_ns);
- if (tsk)
- get_task_struct(tsk);
- rcu_read_unlock();
- if (!tsk)
- return;
- spin_lock(&pfault_lock);
- if (subcode & PF_COMPLETE) {
- /* signal bit is set -> a page has been swapped in by VM */
- if (tsk->thread.pfault_wait == 1) {
- /* Initial interrupt was faster than the completion
- * interrupt. pfault_wait is valid. Set pfault_wait
- * back to zero and wake up the process. This can
- * safely be done because the task is still sleeping
- * and can't produce new pfaults. */
- tsk->thread.pfault_wait = 0;
- list_del(&tsk->thread.list);
- wake_up_process(tsk);
- put_task_struct(tsk);
- } else {
- /* Completion interrupt was faster than initial
- * interrupt. Set pfault_wait to -1 so the initial
- * interrupt doesn't put the task to sleep.
- * If the task is not running, ignore the completion
- * interrupt since it must be a leftover of a PFAULT
- * CANCEL operation which didn't remove all pending
- * completion interrupts. */
- if (task_is_running(tsk))
- tsk->thread.pfault_wait = -1;
- }
- } else {
- /* signal bit not set -> a real page is missing. */
- if (WARN_ON_ONCE(tsk != current))
- goto out;
- if (tsk->thread.pfault_wait == 1) {
- /* Already on the list with a reference: put to sleep */
- goto block;
- } else if (tsk->thread.pfault_wait == -1) {
- /* Completion interrupt was faster than the initial
- * interrupt (pfault_wait == -1). Set pfault_wait
- * back to zero and exit. */
- tsk->thread.pfault_wait = 0;
- } else {
- /* Initial interrupt arrived before completion
- * interrupt. Let the task sleep.
- * An extra task reference is needed since a different
- * cpu may set the task state to TASK_RUNNING again
- * before the scheduler is reached. */
- get_task_struct(tsk);
- tsk->thread.pfault_wait = 1;
- list_add(&tsk->thread.list, &pfault_list);
-block:
- /* Since this must be a userspace fault, there
- * is no kernel task state to trample. Rely on the
- * return to userspace schedule() to block. */
- __set_current_state(TASK_UNINTERRUPTIBLE);
- set_tsk_need_resched(tsk);
- set_preempt_need_resched();
- }
- }
-out:
- spin_unlock(&pfault_lock);
- put_task_struct(tsk);
-}
-
-static int pfault_cpu_dead(unsigned int cpu)
-{
- struct thread_struct *thread, *next;
- struct task_struct *tsk;
-
- spin_lock_irq(&pfault_lock);
- list_for_each_entry_safe(thread, next, &pfault_list, list) {
- thread->pfault_wait = 0;
- list_del(&thread->list);
- tsk = container_of(thread, struct task_struct, thread);
- wake_up_process(tsk);
- put_task_struct(tsk);
- }
- spin_unlock_irq(&pfault_lock);
- return 0;
-}
-
-static int __init pfault_irq_init(void)
-{
- int rc;
-
- rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
- if (rc)
- goto out_extint;
- rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
- if (rc)
- goto out_pfault;
- irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
- cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
- NULL, pfault_cpu_dead);
- return 0;
-
-out_pfault:
- unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
-out_extint:
- pfault_disable = 1;
- return rc;
-}
-early_initcall(pfault_irq_init);
-
-#endif /* CONFIG_PFAULT */
-
#if IS_ENABLED(CONFIG_PGSTE)
void do_secure_storage_access(struct pt_regs *regs)
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 9c8af31be970..906a7bfc2a78 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2514,6 +2514,7 @@ static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
static const struct mm_walk_ops thp_split_walk_ops = {
.pmd_entry = thp_split_walk_pmd_entry,
+ .walk_lock = PGWALK_WRLOCK_VERIFY,
};
static inline void thp_split_mm(struct mm_struct *mm)
@@ -2565,6 +2566,7 @@ static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
static const struct mm_walk_ops zap_zero_walk_ops = {
.pmd_entry = __zap_zero_pages,
+ .walk_lock = PGWALK_WRLOCK,
};
/*
@@ -2655,6 +2657,7 @@ static const struct mm_walk_ops enable_skey_walk_ops = {
.hugetlb_entry = __s390_enable_skey_hugetlb,
.pte_entry = __s390_enable_skey_pte,
.pmd_entry = __s390_enable_skey_pmd,
+ .walk_lock = PGWALK_WRLOCK,
};
int s390_enable_skey(void)
@@ -2692,6 +2695,7 @@ static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
static const struct mm_walk_ops reset_cmma_walk_ops = {
.pte_entry = __s390_reset_cmma,
+ .walk_lock = PGWALK_WRLOCK,
};
void s390_reset_cmma(struct mm_struct *mm)
@@ -2728,6 +2732,7 @@ static int s390_gather_pages(pte_t *ptep, unsigned long addr,
static const struct mm_walk_ops gather_pages_ops = {
.pte_entry = s390_gather_pages,
+ .walk_lock = PGWALK_RDLOCK,
};
/*
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index cbe1df1e9c18..c805b3e2592b 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -86,11 +86,12 @@ size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count)
void *chunk;
pte_t pte;
+ BUILD_BUG_ON(MEMCPY_REAL_SIZE != PAGE_SIZE);
while (count) {
- phys = src & PAGE_MASK;
- offset = src & ~PAGE_MASK;
+ phys = src & MEMCPY_REAL_MASK;
+ offset = src & ~MEMCPY_REAL_MASK;
chunk = (void *)(__memcpy_real_area + offset);
- len = min(count, PAGE_SIZE - offset);
+ len = min(count, MEMCPY_REAL_SIZE - offset);
pte = mk_pte_phys(phys, PAGE_KERNEL_RO);
mutex_lock(&memcpy_real_mutex);
diff --git a/arch/s390/mm/pfault.c b/arch/s390/mm/pfault.c
new file mode 100644
index 000000000000..1aac13bb8f53
--- /dev/null
+++ b/arch/s390/mm/pfault.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 1999, 2023
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/sched/task.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <asm/asm-extable.h>
+#include <asm/pfault.h>
+#include <asm/diag.h>
+
+#define __SUBCODE_MASK 0x0600
+#define __PF_RES_FIELD 0x8000000000000000UL
+
+/*
+ * 'pfault' pseudo page faults routines.
+ */
+static int pfault_disable;
+
+static int __init nopfault(char *str)
+{
+ pfault_disable = 1;
+ return 1;
+}
+early_param("nopfault", nopfault);
+
+struct pfault_refbk {
+ u16 refdiagc;
+ u16 reffcode;
+ u16 refdwlen;
+ u16 refversn;
+ u64 refgaddr;
+ u64 refselmk;
+ u64 refcmpmk;
+ u64 reserved;
+};
+
+static struct pfault_refbk pfault_init_refbk = {
+ .refdiagc = 0x258,
+ .reffcode = 0,
+ .refdwlen = 5,
+ .refversn = 2,
+ .refgaddr = __LC_LPP,
+ .refselmk = 1UL << 48,
+ .refcmpmk = 1UL << 48,
+ .reserved = __PF_RES_FIELD
+};
+
+int __pfault_init(void)
+{
+ int rc = -EOPNOTSUPP;
+
+ if (pfault_disable)
+ return rc;
+ diag_stat_inc(DIAG_STAT_X258);
+ asm volatile(
+ " diag %[refbk],%[rc],0x258\n"
+ "0: nopr %%r7\n"
+ EX_TABLE(0b, 0b)
+ : [rc] "+d" (rc)
+ : [refbk] "a" (&pfault_init_refbk), "m" (pfault_init_refbk)
+ : "cc");
+ return rc;
+}
+
+static struct pfault_refbk pfault_fini_refbk = {
+ .refdiagc = 0x258,
+ .reffcode = 1,
+ .refdwlen = 5,
+ .refversn = 2,
+};
+
+void __pfault_fini(void)
+{
+ if (pfault_disable)
+ return;
+ diag_stat_inc(DIAG_STAT_X258);
+ asm volatile(
+ " diag %[refbk],0,0x258\n"
+ "0: nopr %%r7\n"
+ EX_TABLE(0b, 0b)
+ :
+ : [refbk] "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk)
+ : "cc");
+}
+
+static DEFINE_SPINLOCK(pfault_lock);
+static LIST_HEAD(pfault_list);
+
+#define PF_COMPLETE 0x0080
+
+/*
+ * The mechanism of our pfault code: if Linux is running as guest, runs a user
+ * space process and the user space process accesses a page that the host has
+ * paged out we get a pfault interrupt.
+ *
+ * This allows us, within the guest, to schedule a different process. Without
+ * this mechanism the host would have to suspend the whole virtual cpu until
+ * the page has been paged in.
+ *
+ * So when we get such an interrupt then we set the state of the current task
+ * to uninterruptible and also set the need_resched flag. Both happens within
+ * interrupt context(!). If we later on want to return to user space we
+ * recognize the need_resched flag and then call schedule(). It's not very
+ * obvious how this works...
+ *
+ * Of course we have a lot of additional fun with the completion interrupt (->
+ * host signals that a page of a process has been paged in and the process can
+ * continue to run). This interrupt can arrive on any cpu and, since we have
+ * virtual cpus, actually appear before the interrupt that signals that a page
+ * is missing.
+ */
+static void pfault_interrupt(struct ext_code ext_code,
+ unsigned int param32, unsigned long param64)
+{
+ struct task_struct *tsk;
+ __u16 subcode;
+ pid_t pid;
+
+ /*
+ * Get the external interruption subcode & pfault initial/completion
+ * signal bit. VM stores this in the 'cpu address' field associated
+ * with the external interrupt.
+ */
+ subcode = ext_code.subcode;
+ if ((subcode & 0xff00) != __SUBCODE_MASK)
+ return;
+ inc_irq_stat(IRQEXT_PFL);
+ /* Get the token (= pid of the affected task). */
+ pid = param64 & LPP_PID_MASK;
+ rcu_read_lock();
+ tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+ if (tsk)
+ get_task_struct(tsk);
+ rcu_read_unlock();
+ if (!tsk)
+ return;
+ spin_lock(&pfault_lock);
+ if (subcode & PF_COMPLETE) {
+ /* signal bit is set -> a page has been swapped in by VM */
+ if (tsk->thread.pfault_wait == 1) {
+ /*
+ * Initial interrupt was faster than the completion
+ * interrupt. pfault_wait is valid. Set pfault_wait
+ * back to zero and wake up the process. This can
+ * safely be done because the task is still sleeping
+ * and can't produce new pfaults.
+ */
+ tsk->thread.pfault_wait = 0;
+ list_del(&tsk->thread.list);
+ wake_up_process(tsk);
+ put_task_struct(tsk);
+ } else {
+ /*
+ * Completion interrupt was faster than initial
+ * interrupt. Set pfault_wait to -1 so the initial
+ * interrupt doesn't put the task to sleep.
+ * If the task is not running, ignore the completion
+ * interrupt since it must be a leftover of a PFAULT
+ * CANCEL operation which didn't remove all pending
+ * completion interrupts.
+ */
+ if (task_is_running(tsk))
+ tsk->thread.pfault_wait = -1;
+ }
+ } else {
+ /* signal bit not set -> a real page is missing. */
+ if (WARN_ON_ONCE(tsk != current))
+ goto out;
+ if (tsk->thread.pfault_wait == 1) {
+ /* Already on the list with a reference: put to sleep */
+ goto block;
+ } else if (tsk->thread.pfault_wait == -1) {
+ /*
+ * Completion interrupt was faster than the initial
+ * interrupt (pfault_wait == -1). Set pfault_wait
+ * back to zero and exit.
+ */
+ tsk->thread.pfault_wait = 0;
+ } else {
+ /*
+ * Initial interrupt arrived before completion
+ * interrupt. Let the task sleep.
+ * An extra task reference is needed since a different
+ * cpu may set the task state to TASK_RUNNING again
+ * before the scheduler is reached.
+ */
+ get_task_struct(tsk);
+ tsk->thread.pfault_wait = 1;
+ list_add(&tsk->thread.list, &pfault_list);
+block:
+ /*
+ * Since this must be a userspace fault, there
+ * is no kernel task state to trample. Rely on the
+ * return to userspace schedule() to block.
+ */
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ set_tsk_need_resched(tsk);
+ set_preempt_need_resched();
+ }
+ }
+out:
+ spin_unlock(&pfault_lock);
+ put_task_struct(tsk);
+}
+
+static int pfault_cpu_dead(unsigned int cpu)
+{
+ struct thread_struct *thread, *next;
+ struct task_struct *tsk;
+
+ spin_lock_irq(&pfault_lock);
+ list_for_each_entry_safe(thread, next, &pfault_list, list) {
+ thread->pfault_wait = 0;
+ list_del(&thread->list);
+ tsk = container_of(thread, struct task_struct, thread);
+ wake_up_process(tsk);
+ put_task_struct(tsk);
+ }
+ spin_unlock_irq(&pfault_lock);
+ return 0;
+}
+
+static int __init pfault_irq_init(void)
+{
+ int rc;
+
+ rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
+ if (rc)
+ goto out_extint;
+ rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
+ if (rc)
+ goto out_pfault;
+ irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+ cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
+ NULL, pfault_cpu_dead);
+ return 0;
+
+out_pfault:
+ unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
+out_extint:
+ pfault_disable = 1;
+ return rc;
+}
+early_initcall(pfault_irq_init);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 24a66670f5c3..e44243b9c0a4 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -36,7 +36,7 @@ static void vmem_free_pages(unsigned long addr, int order)
{
/* We don't expect boot memory to be removed ever. */
if (!slab_is_available() ||
- WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
+ WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
return;
free_pages(addr, order);
}
@@ -531,7 +531,7 @@ struct range arch_get_mappable_range(void)
struct range mhp_range;
mhp_range.start = 0;
- mhp_range.end = VMEM_MAX_PHYS - 1;
+ mhp_range.end = max_mappable - 1;
return mhp_range;
}
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index ee367798e388..ee90a91ed888 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -666,9 +666,4 @@ static struct miscdevice clp_misc_device = {
.fops = &clp_misc_fops,
};
-static int __init clp_misc_init(void)
-{
- return misc_register(&clp_misc_device);
-}
-
-device_initcall(clp_misc_init);
+builtin_misc_device(clp_misc_device);
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index 97377e8c5025..e90d585c4d3e 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -454,3 +454,4 @@
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index faa835f3c54a..4ed06c71c43f 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -497,3 +497,4 @@
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 5026e7b9adfe..ff4bda95b9c7 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -554,7 +554,7 @@ struct mconsole_output {
static DEFINE_SPINLOCK(client_lock);
static LIST_HEAD(clients);
-static char console_buf[MCONSOLE_MAX_DATA];
+static char console_buf[MCONSOLE_MAX_DATA] __nonstring;
static void console_write(struct console *console, const char *string,
unsigned int len)
@@ -567,7 +567,7 @@ static void console_write(struct console *console, const char *string,
while (len > 0) {
n = min((size_t) len, ARRAY_SIZE(console_buf));
- strncpy(console_buf, string, n);
+ memcpy(console_buf, string, n);
string += n;
len -= n;
diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c
index c650e428432b..c719e1ec4645 100644
--- a/arch/um/drivers/vector_user.c
+++ b/arch/um/drivers/vector_user.c
@@ -141,7 +141,7 @@ static int create_tap_fd(char *iface)
}
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
- strncpy((char *)&ifr.ifr_name, iface, sizeof(ifr.ifr_name) - 1);
+ strscpy(ifr.ifr_name, iface, sizeof(ifr.ifr_name));
err = ioctl(fd, TUNSETIFF, (void *) &ifr);
if (err != 0) {
@@ -171,7 +171,7 @@ static int create_raw_fd(char *iface, int flags, int proto)
goto raw_fd_cleanup;
}
memset(&ifr, 0, sizeof(ifr));
- strncpy((char *)&ifr.ifr_name, iface, sizeof(ifr.ifr_name) - 1);
+ strscpy(ifr.ifr_name, iface, sizeof(ifr.ifr_name));
if (ioctl(fd, SIOCGIFINDEX, (void *) &ifr) < 0) {
err = -errno;
goto raw_fd_cleanup;
diff --git a/arch/um/include/shared/user.h b/arch/um/include/shared/user.h
index 0347a190429c..981e11d8e025 100644
--- a/arch/um/include/shared/user.h
+++ b/arch/um/include/shared/user.h
@@ -50,7 +50,6 @@ static inline int printk(const char *fmt, ...)
#endif
extern int in_aton(char *str);
-extern size_t strlcpy(char *, const char *, size_t);
extern size_t strlcat(char *, const char *, size_t);
extern size_t strscpy(char *, const char *, size_t);
diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c
index 7a1abb829930..288c422bfa96 100644
--- a/arch/um/os-Linux/umid.c
+++ b/arch/um/os-Linux/umid.c
@@ -40,7 +40,7 @@ static int __init make_uml_dir(void)
__func__);
goto err;
}
- strlcpy(dir, home, sizeof(dir));
+ strscpy(dir, home, sizeof(dir));
uml_dir++;
}
strlcat(dir, uml_dir, sizeof(dir));
@@ -243,7 +243,7 @@ int __init set_umid(char *name)
if (strlen(name) > UMID_LEN - 1)
return -E2BIG;
- strlcpy(umid, name, sizeof(umid));
+ strscpy(umid, name, sizeof(umid));
return 0;
}
@@ -262,7 +262,7 @@ static int __init make_umid(void)
make_uml_dir();
if (*umid == '\0') {
- strlcpy(tmp, uml_dir, sizeof(tmp));
+ strscpy(tmp, uml_dir, sizeof(tmp));
strlcat(tmp, "XXXXXX", sizeof(tmp));
fd = mkstemp(tmp);
if (fd < 0) {
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e36261b4ea14..8d9e4b362572 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1308,44 +1308,8 @@ config X86_REBOOTFIXUPS
Say N otherwise.
config MICROCODE
- bool "CPU microcode loading support"
- default y
+ def_bool y
depends on CPU_SUP_AMD || CPU_SUP_INTEL
- help
- If you say Y here, you will be able to update the microcode on
- Intel and AMD processors. The Intel support is for the IA32 family,
- e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The
- AMD support is for families 0x10 and later. You will obviously need
- the actual microcode binary data itself which is not shipped with
- the Linux kernel.
-
- The preferred method to load microcode from a detached initrd is described
- in Documentation/arch/x86/microcode.rst. For that you need to enable
- CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
- initrd for microcode blobs.
-
- In addition, you can build the microcode into the kernel. For that you
- need to add the vendor-supplied microcode to the CONFIG_EXTRA_FIRMWARE
- config option.
-
-config MICROCODE_INTEL
- bool "Intel microcode loading support"
- depends on CPU_SUP_INTEL && MICROCODE
- default MICROCODE
- help
- This options enables microcode patch loading support for Intel
- processors.
-
- For the current Intel microcode data package go to
- <https://downloadcenter.intel.com> and search for
- 'Linux Processor Microcode Data File'.
-
-config MICROCODE_AMD
- bool "AMD microcode loading support"
- depends on CPU_SUP_AMD && MICROCODE
- help
- If you select this option, microcode patch loading support for AMD
- processors will be enabled.
config MICROCODE_LATE_LOADING
bool "Late microcode loading (DANGEROUS)"
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 40d2ff503079..71fc531b95b4 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -74,6 +74,11 @@ LDFLAGS_vmlinux += -z noexecstack
ifeq ($(CONFIG_LD_IS_BFD),y)
LDFLAGS_vmlinux += $(call ld-option,--no-warn-rwx-segments)
endif
+ifeq ($(CONFIG_EFI_STUB),y)
+# ensure that the static EFI stub library will be pulled in, even if it is
+# never referenced explicitly from the startup code
+LDFLAGS_vmlinux += -u efi_pe_entry
+endif
LDFLAGS_vmlinux += -T
hostprogs := mkpiggy
diff --git a/arch/x86/boot/compressed/efi_mixed.S b/arch/x86/boot/compressed/efi_mixed.S
index 4ca70bf93dc0..f4e22ef774ab 100644
--- a/arch/x86/boot/compressed/efi_mixed.S
+++ b/arch/x86/boot/compressed/efi_mixed.S
@@ -26,8 +26,8 @@
* When booting in 64-bit mode on 32-bit EFI firmware, startup_64_mixed_mode()
* is the first thing that runs after switching to long mode. Depending on
* whether the EFI handover protocol or the compat entry point was used to
- * enter the kernel, it will either branch to the 64-bit EFI handover
- * entrypoint at offset 0x390 in the image, or to the 64-bit EFI PE/COFF
+ * enter the kernel, it will either branch to the common 64-bit EFI stub
+ * entrypoint efi_stub_entry() directly, or via the 64-bit EFI PE/COFF
* entrypoint efi_pe_entry(). In the former case, the bootloader must provide a
* struct bootparams pointer as the third argument, so the presence of such a
* pointer is used to disambiguate.
@@ -37,21 +37,23 @@
* | efi32_pe_entry |---->| | | +-----------+--+
* +------------------+ | | +------+----------------+ |
* | startup_32 |---->| startup_64_mixed_mode | |
- * +------------------+ | | +------+----------------+ V
- * | efi32_stub_entry |---->| | | +------------------+
- * +------------------+ +------------+ +---->| efi64_stub_entry |
- * +-------------+----+
- * +------------+ +----------+ |
- * | startup_64 |<----| efi_main |<--------------+
- * +------------+ +----------+
+ * +------------------+ | | +------+----------------+ |
+ * | efi32_stub_entry |---->| | | |
+ * +------------------+ +------------+ | |
+ * V |
+ * +------------+ +----------------+ |
+ * | startup_64 |<----| efi_stub_entry |<--------+
+ * +------------+ +----------------+
*/
SYM_FUNC_START(startup_64_mixed_mode)
lea efi32_boot_args(%rip), %rdx
mov 0(%rdx), %edi
mov 4(%rdx), %esi
+#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
mov 8(%rdx), %edx // saved bootparams pointer
test %edx, %edx
- jnz efi64_stub_entry
+ jnz efi_stub_entry
+#endif
/*
* efi_pe_entry uses MS calling convention, which requires 32 bytes of
* shadow space on the stack even if all arguments are passed in
@@ -138,6 +140,28 @@ SYM_FUNC_START(__efi64_thunk)
SYM_FUNC_END(__efi64_thunk)
.code32
+#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+SYM_FUNC_START(efi32_stub_entry)
+ call 1f
+1: popl %ecx
+
+ /* Clear BSS */
+ xorl %eax, %eax
+ leal (_bss - 1b)(%ecx), %edi
+ leal (_ebss - 1b)(%ecx), %ecx
+ subl %edi, %ecx
+ shrl $2, %ecx
+ cld
+ rep stosl
+
+ add $0x4, %esp /* Discard return address */
+ popl %ecx
+ popl %edx
+ popl %esi
+ jmp efi32_entry
+SYM_FUNC_END(efi32_stub_entry)
+#endif
+
/*
* EFI service pointer must be in %edi.
*
@@ -218,7 +242,7 @@ SYM_FUNC_END(efi_enter32)
* stub may still exit and return to the firmware using the Exit() EFI boot
* service.]
*/
-SYM_FUNC_START(efi32_entry)
+SYM_FUNC_START_LOCAL(efi32_entry)
call 1f
1: pop %ebx
@@ -245,10 +269,6 @@ SYM_FUNC_START(efi32_entry)
jmp startup_32
SYM_FUNC_END(efi32_entry)
-#define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime)
-#define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol)
-#define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base)
-
/*
* efi_status_t efi32_pe_entry(efi_handle_t image_handle,
* efi_system_table_32_t *sys_table)
@@ -256,8 +276,6 @@ SYM_FUNC_END(efi32_entry)
SYM_FUNC_START(efi32_pe_entry)
pushl %ebp
movl %esp, %ebp
- pushl %eax // dummy push to allocate loaded_image
-
pushl %ebx // save callee-save registers
pushl %edi
@@ -266,48 +284,8 @@ SYM_FUNC_START(efi32_pe_entry)
movl $0x80000003, %eax // EFI_UNSUPPORTED
jnz 2f
- call 1f
-1: pop %ebx
-
- /* Get the loaded image protocol pointer from the image handle */
- leal -4(%ebp), %eax
- pushl %eax // &loaded_image
- leal (loaded_image_proto - 1b)(%ebx), %eax
- pushl %eax // pass the GUID address
- pushl 8(%ebp) // pass the image handle
-
- /*
- * Note the alignment of the stack frame.
- * sys_table
- * handle <-- 16-byte aligned on entry by ABI
- * return address
- * frame pointer
- * loaded_image <-- local variable
- * saved %ebx <-- 16-byte aligned here
- * saved %edi
- * &loaded_image
- * &loaded_image_proto
- * handle <-- 16-byte aligned for call to handle_protocol
- */
-
- movl 12(%ebp), %eax // sys_table
- movl ST32_boottime(%eax), %eax // sys_table->boottime
- call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol
- addl $12, %esp // restore argument space
- testl %eax, %eax
- jnz 2f
-
movl 8(%ebp), %ecx // image_handle
movl 12(%ebp), %edx // sys_table
- movl -4(%ebp), %esi // loaded_image
- movl LI32_image_base(%esi), %esi // loaded_image->image_base
- leal (startup_32 - 1b)(%ebx), %ebp // runtime address of startup_32
- /*
- * We need to set the image_offset variable here since startup_32() will
- * use it before we get to the 64-bit efi_pe_entry() in C code.
- */
- subl %esi, %ebp // calculate image_offset
- movl %ebp, (image_offset - 1b)(%ebx) // save image_offset
xorl %esi, %esi
jmp efi32_entry // pass %ecx, %edx, %esi
// no other registers remain live
@@ -318,14 +296,13 @@ SYM_FUNC_START(efi32_pe_entry)
RET
SYM_FUNC_END(efi32_pe_entry)
- .section ".rodata"
- /* EFI loaded image protocol GUID */
- .balign 4
-SYM_DATA_START_LOCAL(loaded_image_proto)
- .long 0x5b1b31a1
- .word 0x9562, 0x11d2
- .byte 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b
-SYM_DATA_END(loaded_image_proto)
+#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+ .org efi32_stub_entry + 0x200
+ .code64
+SYM_FUNC_START_NOALIGN(efi64_stub_entry)
+ jmp efi_handover_entry
+SYM_FUNC_END(efi64_stub_entry)
+#endif
.data
.balign 8
diff --git a/arch/x86/boot/compressed/error.c b/arch/x86/boot/compressed/error.c
index 5313c5cb2b80..19a8251de506 100644
--- a/arch/x86/boot/compressed/error.c
+++ b/arch/x86/boot/compressed/error.c
@@ -7,7 +7,7 @@
#include "misc.h"
#include "error.h"
-void warn(char *m)
+void warn(const char *m)
{
error_putstr("\n\n");
error_putstr(m);
diff --git a/arch/x86/boot/compressed/error.h b/arch/x86/boot/compressed/error.h
index 86fe33b93715..31f9e080d61a 100644
--- a/arch/x86/boot/compressed/error.h
+++ b/arch/x86/boot/compressed/error.h
@@ -4,7 +4,7 @@
#include <linux/compiler.h>
-void warn(char *m);
+void warn(const char *m);
void error(char *m) __noreturn;
void panic(const char *fmt, ...) __noreturn __cold;
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 987ae727cf9f..1cfe9802a42f 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -84,19 +84,6 @@ SYM_FUNC_START(startup_32)
#ifdef CONFIG_RELOCATABLE
leal startup_32@GOTOFF(%edx), %ebx
-
-#ifdef CONFIG_EFI_STUB
-/*
- * If we were loaded via the EFI LoadImage service, startup_32() will be at an
- * offset to the start of the space allocated for the image. efi_pe_entry() will
- * set up image_offset to tell us where the image actually starts, so that we
- * can use the full available buffer.
- * image_offset = startup_32 - image_base
- * Otherwise image_offset will be zero and has no effect on the calculations.
- */
- subl image_offset@GOTOFF(%edx), %ebx
-#endif
-
movl BP_kernel_alignment(%esi), %eax
decl %eax
addl %eax, %ebx
@@ -150,17 +137,6 @@ SYM_FUNC_START(startup_32)
jmp *%eax
SYM_FUNC_END(startup_32)
-#ifdef CONFIG_EFI_STUB
-SYM_FUNC_START(efi32_stub_entry)
- add $0x4, %esp
- movl 8(%esp), %esi /* save boot_params pointer */
- call efi_main
- /* efi_main returns the possibly relocated address of startup_32 */
- jmp *%eax
-SYM_FUNC_END(efi32_stub_entry)
-SYM_FUNC_ALIAS(efi_stub_entry, efi32_stub_entry)
-#endif
-
.text
SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
@@ -179,13 +155,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
*/
/* push arguments for extract_kernel: */
- pushl output_len@GOTOFF(%ebx) /* decompressed length, end of relocs */
pushl %ebp /* output address */
- pushl input_len@GOTOFF(%ebx) /* input_len */
- leal input_data@GOTOFF(%ebx), %eax
- pushl %eax /* input_data */
- leal boot_heap@GOTOFF(%ebx), %eax
- pushl %eax /* heap area */
pushl %esi /* real mode pointer */
call extract_kernel /* returns kernel entry point in %eax */
addl $24, %esp
@@ -213,8 +183,6 @@ SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
*/
.bss
.balign 4
-boot_heap:
- .fill BOOT_HEAP_SIZE, 1, 0
boot_stack:
.fill BOOT_STACK_SIZE, 1, 0
boot_stack_end:
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 03c4328a88cb..bf4a10a5794f 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -146,19 +146,6 @@ SYM_FUNC_START(startup_32)
#ifdef CONFIG_RELOCATABLE
movl %ebp, %ebx
-
-#ifdef CONFIG_EFI_STUB
-/*
- * If we were loaded via the EFI LoadImage service, startup_32 will be at an
- * offset to the start of the space allocated for the image. efi_pe_entry will
- * set up image_offset to tell us where the image actually starts, so that we
- * can use the full available buffer.
- * image_offset = startup_32 - image_base
- * Otherwise image_offset will be zero and has no effect on the calculations.
- */
- subl rva(image_offset)(%ebp), %ebx
-#endif
-
movl BP_kernel_alignment(%esi), %eax
decl %eax
addl %eax, %ebx
@@ -294,17 +281,6 @@ SYM_FUNC_START(startup_32)
lret
SYM_FUNC_END(startup_32)
-#if IS_ENABLED(CONFIG_EFI_MIXED) && IS_ENABLED(CONFIG_EFI_HANDOVER_PROTOCOL)
- .org 0x190
-SYM_FUNC_START(efi32_stub_entry)
- add $0x4, %esp /* Discard return address */
- popl %ecx
- popl %edx
- popl %esi
- jmp efi32_entry
-SYM_FUNC_END(efi32_stub_entry)
-#endif
-
.code64
.org 0x200
SYM_CODE_START(startup_64)
@@ -346,20 +322,6 @@ SYM_CODE_START(startup_64)
/* Start with the delta to where the kernel will run at. */
#ifdef CONFIG_RELOCATABLE
leaq startup_32(%rip) /* - $startup_32 */, %rbp
-
-#ifdef CONFIG_EFI_STUB
-/*
- * If we were loaded via the EFI LoadImage service, startup_32 will be at an
- * offset to the start of the space allocated for the image. efi_pe_entry will
- * set up image_offset to tell us where the image actually starts, so that we
- * can use the full available buffer.
- * image_offset = startup_32 - image_base
- * Otherwise image_offset will be zero and has no effect on the calculations.
- */
- movl image_offset(%rip), %eax
- subq %rax, %rbp
-#endif
-
movl BP_kernel_alignment(%rsi), %eax
decl %eax
addq %rax, %rbp
@@ -398,10 +360,6 @@ SYM_CODE_START(startup_64)
* For the trampoline, we need the top page table to reside in lower
* memory as we don't have a way to load 64-bit values into CR3 in
* 32-bit mode.
- *
- * We go though the trampoline even if we don't have to: if we're
- * already in a desired paging mode. This way the trampoline code gets
- * tested on every boot.
*/
/* Make sure we have GDT with 32-bit code segment */
@@ -416,10 +374,14 @@ SYM_CODE_START(startup_64)
lretq
.Lon_kernel_cs:
+ /*
+ * RSI holds a pointer to a boot_params structure provided by the
+ * loader, and this needs to be preserved across C function calls. So
+ * move it into a callee saved register.
+ */
+ movq %rsi, %r15
- pushq %rsi
call load_stage1_idt
- popq %rsi
#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
@@ -430,63 +392,24 @@ SYM_CODE_START(startup_64)
* CPUID instructions being issued, so go ahead and do that now via
* sev_enable(), which will also handle the rest of the SEV-related
* detection/setup to ensure that has been done in advance of any dependent
- * code.
+ * code. Pass the boot_params pointer as the first argument.
*/
- pushq %rsi
- movq %rsi, %rdi /* real mode address */
+ movq %r15, %rdi
call sev_enable
- popq %rsi
#endif
/*
- * paging_prepare() sets up the trampoline and checks if we need to
- * enable 5-level paging.
+ * configure_5level_paging() updates the number of paging levels using
+ * a trampoline in 32-bit addressable memory if the current number does
+ * not match the desired number.
*
- * paging_prepare() returns a two-quadword structure which lands
- * into RDX:RAX:
- * - Address of the trampoline is returned in RAX.
- * - Non zero RDX means trampoline needs to enable 5-level
- * paging.
- *
- * RSI holds real mode data and needs to be preserved across
- * this function call.
- */
- pushq %rsi
- movq %rsi, %rdi /* real mode address */
- call paging_prepare
- popq %rsi
-
- /* Save the trampoline address in RCX */
- movq %rax, %rcx
-
- /*
- * Load the address of trampoline_return() into RDI.
- * It will be used by the trampoline to return to the main code.
+ * Pass the boot_params pointer as the first argument. The second
+ * argument is the relocated address of the page table to use instead
+ * of the page table in trampoline memory (if required).
*/
- leaq trampoline_return(%rip), %rdi
-
- /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
- pushq $__KERNEL32_CS
- leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax
- pushq %rax
- lretq
-trampoline_return:
- /* Restore the stack, the 32-bit trampoline uses its own stack */
- leaq rva(boot_stack_end)(%rbx), %rsp
-
- /*
- * cleanup_trampoline() would restore trampoline memory.
- *
- * RDI is address of the page table to use instead of page table
- * in trampoline memory (if required).
- *
- * RSI holds real mode data and needs to be preserved across
- * this function call.
- */
- pushq %rsi
- leaq rva(top_pgtable)(%rbx), %rdi
- call cleanup_trampoline
- popq %rsi
+ movq %r15, %rdi
+ leaq rva(top_pgtable)(%rbx), %rsi
+ call configure_5level_paging
/* Zero EFLAGS */
pushq $0
@@ -496,7 +419,6 @@ trampoline_return:
* Copy the compressed kernel to the end of our buffer
* where decompression in place becomes safe.
*/
- pushq %rsi
leaq (_bss-8)(%rip), %rsi
leaq rva(_bss-8)(%rbx), %rdi
movl $(_bss - startup_32), %ecx
@@ -504,7 +426,6 @@ trampoline_return:
std
rep movsq
cld
- popq %rsi
/*
* The GDT may get overwritten either during the copy we just did or
@@ -523,21 +444,6 @@ trampoline_return:
jmp *%rax
SYM_CODE_END(startup_64)
-#ifdef CONFIG_EFI_STUB
-#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
- .org 0x390
-#endif
-SYM_FUNC_START(efi64_stub_entry)
- and $~0xf, %rsp /* realign the stack */
- movq %rdx, %rbx /* save boot_params pointer */
- call efi_main
- movq %rbx,%rsi
- leaq rva(startup_64)(%rax), %rax
- jmp *%rax
-SYM_FUNC_END(efi64_stub_entry)
-SYM_FUNC_ALIAS(efi_stub_entry, efi64_stub_entry)
-#endif
-
.text
SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
@@ -551,128 +457,122 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
shrq $3, %rcx
rep stosq
- pushq %rsi
call load_stage2_idt
/* Pass boot_params to initialize_identity_maps() */
- movq (%rsp), %rdi
+ movq %r15, %rdi
call initialize_identity_maps
- popq %rsi
/*
* Do the extraction, and jump to the new kernel..
*/
- pushq %rsi /* Save the real mode argument */
- movq %rsi, %rdi /* real mode address */
- leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
- leaq input_data(%rip), %rdx /* input_data */
- movl input_len(%rip), %ecx /* input_len */
- movq %rbp, %r8 /* output target address */
- movl output_len(%rip), %r9d /* decompressed length, end of relocs */
+ /* pass struct boot_params pointer and output target address */
+ movq %r15, %rdi
+ movq %rbp, %rsi
call extract_kernel /* returns kernel entry point in %rax */
- popq %rsi
/*
* Jump to the decompressed kernel.
*/
+ movq %r15, %rsi
jmp *%rax
SYM_FUNC_END(.Lrelocated)
- .code32
/*
- * This is the 32-bit trampoline that will be copied over to low memory.
+ * This is the 32-bit trampoline that will be copied over to low memory. It
+ * will be called using the ordinary 64-bit calling convention from code
+ * running in 64-bit mode.
*
- * RDI contains the return address (might be above 4G).
- * ECX contains the base address of the trampoline memory.
- * Non zero RDX means trampoline needs to enable 5-level paging.
+ * Return address is at the top of the stack (might be above 4G).
+ * The first argument (EDI) contains the address of the temporary PGD level
+ * page table in 32-bit addressable memory which will be programmed into
+ * register CR3.
*/
+ .section ".rodata", "a", @progbits
SYM_CODE_START(trampoline_32bit_src)
- /* Set up data and stack segments */
- movl $__KERNEL_DS, %eax
- movl %eax, %ds
- movl %eax, %ss
+ /*
+ * Preserve callee save 64-bit registers on the stack: this is
+ * necessary because the architecture does not guarantee that GPRs will
+ * retain their full 64-bit values across a 32-bit mode switch.
+ */
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbp
+ pushq %rbx
+
+ /* Preserve top half of RSP in a legacy mode GPR to avoid truncation */
+ movq %rsp, %rbx
+ shrq $32, %rbx
+
+ /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
+ pushq $__KERNEL32_CS
+ leaq 0f(%rip), %rax
+ pushq %rax
+ lretq
- /* Set up new stack */
- leal TRAMPOLINE_32BIT_STACK_END(%ecx), %esp
+ /*
+ * The 32-bit code below will do a far jump back to long mode and end
+ * up here after reconfiguring the number of paging levels. First, the
+ * stack pointer needs to be restored to its full 64-bit value before
+ * the callee save register contents can be popped from the stack.
+ */
+.Lret:
+ shlq $32, %rbx
+ orq %rbx, %rsp
+
+ /* Restore the preserved 64-bit registers */
+ popq %rbx
+ popq %rbp
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+ .code32
+0:
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
- /* Check what paging mode we want to be in after the trampoline */
- testl %edx, %edx
- jz 1f
-
- /* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */
- movl %cr4, %eax
- testl $X86_CR4_LA57, %eax
- jnz 3f
- jmp 2f
-1:
- /* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */
- movl %cr4, %eax
- testl $X86_CR4_LA57, %eax
- jz 3f
-2:
/* Point CR3 to the trampoline's new top level page table */
- leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
- movl %eax, %cr3
-3:
+ movl %edi, %cr3
+
/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
- pushl %ecx
- pushl %edx
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_LME, %eax
/* Avoid writing EFER if no change was made (for TDX guest) */
jc 1f
wrmsr
-1: popl %edx
- popl %ecx
-
-#ifdef CONFIG_X86_MCE
- /*
- * Preserve CR4.MCE if the kernel will enable #MC support.
- * Clearing MCE may fault in some environments (that also force #MC
- * support). Any machine check that occurs before #MC support is fully
- * configured will crash the system regardless of the CR4.MCE value set
- * here.
- */
- movl %cr4, %eax
- andl $X86_CR4_MCE, %eax
-#else
- movl $0, %eax
-#endif
-
- /* Enable PAE and LA57 (if required) paging modes */
- orl $X86_CR4_PAE, %eax
- testl %edx, %edx
- jz 1f
- orl $X86_CR4_LA57, %eax
1:
+ /* Toggle CR4.LA57 */
+ movl %cr4, %eax
+ btcl $X86_CR4_LA57_BIT, %eax
movl %eax, %cr4
- /* Calculate address of paging_enabled() once we are executing in the trampoline */
- leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax
-
- /* Prepare the stack for far return to Long Mode */
- pushl $__KERNEL_CS
- pushl %eax
-
/* Enable paging again. */
movl %cr0, %eax
btsl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
- lret
+ /*
+ * Return to the 64-bit calling code using LJMP rather than LRET, to
+ * avoid the need for a 32-bit addressable stack. The destination
+ * address will be adjusted after the template code is copied into a
+ * 32-bit addressable buffer.
+ */
+.Ljmp: ljmpl $__KERNEL_CS, $(.Lret - trampoline_32bit_src)
SYM_CODE_END(trampoline_32bit_src)
- .code64
-SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
- /* Return from the trampoline */
- jmp *%rdi
-SYM_FUNC_END(.Lpaging_enabled)
+/*
+ * This symbol is placed right after trampoline_32bit_src() so its address can
+ * be used to infer the size of the trampoline code.
+ */
+SYM_DATA(trampoline_ljmp_imm_offset, .word .Ljmp + 1 - trampoline_32bit_src)
/*
* The trampoline code has a size limit.
@@ -681,7 +581,7 @@ SYM_FUNC_END(.Lpaging_enabled)
*/
.org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
- .code32
+ .text
SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
/* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
1:
@@ -726,8 +626,6 @@ SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
*/
.bss
.balign 4
-SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0)
-
SYM_DATA_START_LOCAL(boot_stack)
.fill BOOT_STACK_SIZE, 1, 0
.balign 16
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 94b7abcf624b..f711f2a85862 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -330,6 +330,33 @@ static size_t parse_elf(void *output)
return ehdr.e_entry - LOAD_PHYSICAL_ADDR;
}
+const unsigned long kernel_total_size = VO__end - VO__text;
+
+static u8 boot_heap[BOOT_HEAP_SIZE] __aligned(4);
+
+extern unsigned char input_data[];
+extern unsigned int input_len, output_len;
+
+unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
+ void (*error)(char *x))
+{
+ unsigned long entry;
+
+ if (!free_mem_ptr) {
+ free_mem_ptr = (unsigned long)boot_heap;
+ free_mem_end_ptr = (unsigned long)boot_heap + sizeof(boot_heap);
+ }
+
+ if (__decompress(input_data, input_len, NULL, NULL, outbuf, output_len,
+ NULL, error) < 0)
+ return ULONG_MAX;
+
+ entry = parse_elf(outbuf);
+ handle_relocations(outbuf, output_len, virt_addr);
+
+ return entry;
+}
+
/*
* The compressed kernel image (ZO), has been moved so that its position
* is against the end of the buffer used to hold the uncompressed kernel
@@ -347,14 +374,10 @@ static size_t parse_elf(void *output)
* |-------uncompressed kernel image---------|
*
*/
-asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
- unsigned char *input_data,
- unsigned long input_len,
- unsigned char *output,
- unsigned long output_len)
+asmlinkage __visible void *extract_kernel(void *rmode, unsigned char *output)
{
- const unsigned long kernel_total_size = VO__end - VO__text;
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+ memptr heap = (memptr)boot_heap;
unsigned long needed_size;
size_t entry_offset;
@@ -412,7 +435,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
* entries. This ensures the full mapped area is usable RAM
* and doesn't include any reserved areas.
*/
- needed_size = max(output_len, kernel_total_size);
+ needed_size = max_t(unsigned long, output_len, kernel_total_size);
#ifdef CONFIG_X86_64
needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN);
#endif
@@ -443,7 +466,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
#ifdef CONFIG_X86_64
if (heap > 0x3fffffffffffUL)
error("Destination address too large");
- if (virt_addr + max(output_len, kernel_total_size) > KERNEL_IMAGE_SIZE)
+ if (virt_addr + needed_size > KERNEL_IMAGE_SIZE)
error("Destination virtual address is beyond the kernel mapping area");
#else
if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
@@ -461,10 +484,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
accept_memory(__pa(output), __pa(output) + needed_size);
}
- __decompress(input_data, input_len, NULL, NULL, output, output_len,
- NULL, error);
- entry_offset = parse_elf(output);
- handle_relocations(output, output_len, virt_addr);
+ entry_offset = decompress_kernel(output, virt_addr, error);
debug_putstr("done.\nBooting the kernel (entry_offset: 0x");
debug_puthex(entry_offset);
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 964fe903a1cd..cc70d3fb9049 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -179,9 +179,7 @@ static inline int count_immovable_mem_regions(void) { return 0; }
#endif
/* ident_map_64.c */
-#ifdef CONFIG_X86_5LEVEL
extern unsigned int __pgtable_l5_enabled, pgdir_shift, ptrs_per_p4d;
-#endif
extern void kernel_add_identity_map(unsigned long start, unsigned long end);
/* Used by PAGE_KERN* macros: */
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
index cc9b2529a086..6d595abe06b3 100644
--- a/arch/x86/boot/compressed/pgtable.h
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -3,18 +3,16 @@
#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE)
-#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
-
#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
-#define TRAMPOLINE_32BIT_CODE_SIZE 0x80
-
-#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
+#define TRAMPOLINE_32BIT_CODE_SIZE 0xA0
#ifndef __ASSEMBLER__
extern unsigned long *trampoline_32bit;
-extern void trampoline_32bit_src(void *return_ptr);
+extern void trampoline_32bit_src(void *trampoline, bool enable_5lvl);
+
+extern const u16 trampoline_ljmp_imm_offset;
#endif /* __ASSEMBLER__ */
#endif /* BOOT_COMPRESSED_PAGETABLE_H */
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index 2ac12ff4111b..7939eb6e6ce9 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -16,11 +16,6 @@ unsigned int __section(".data") pgdir_shift = 39;
unsigned int __section(".data") ptrs_per_p4d = 1;
#endif
-struct paging_config {
- unsigned long trampoline_start;
- unsigned long l5_required;
-};
-
/* Buffer to preserve trampoline memory */
static char trampoline_save[TRAMPOLINE_32BIT_SIZE];
@@ -29,7 +24,7 @@ static char trampoline_save[TRAMPOLINE_32BIT_SIZE];
* purposes.
*
* Avoid putting the pointer into .bss as it will be cleared between
- * paging_prepare() and extract_kernel().
+ * configure_5level_paging() and extract_kernel().
*/
unsigned long *trampoline_32bit __section(".data");
@@ -106,12 +101,13 @@ static unsigned long find_trampoline_placement(void)
return bios_start - TRAMPOLINE_32BIT_SIZE;
}
-struct paging_config paging_prepare(void *rmode)
+asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable)
{
- struct paging_config paging_config = {};
+ void (*toggle_la57)(void *cr3);
+ bool l5_required = false;
/* Initialize boot_params. Required for cmdline_find_option_bool(). */
- boot_params = rmode;
+ boot_params = bp;
/*
* Check if LA57 is desired and supported.
@@ -129,12 +125,22 @@ struct paging_config paging_prepare(void *rmode)
!cmdline_find_option_bool("no5lvl") &&
native_cpuid_eax(0) >= 7 &&
(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) {
- paging_config.l5_required = 1;
+ l5_required = true;
+
+ /* Initialize variables for 5-level paging */
+ __pgtable_l5_enabled = 1;
+ pgdir_shift = 48;
+ ptrs_per_p4d = 512;
}
- paging_config.trampoline_start = find_trampoline_placement();
+ /*
+ * The trampoline will not be used if the paging mode is already set to
+ * the desired one.
+ */
+ if (l5_required == !!(native_read_cr4() & X86_CR4_LA57))
+ return;
- trampoline_32bit = (unsigned long *)paging_config.trampoline_start;
+ trampoline_32bit = (unsigned long *)find_trampoline_placement();
/* Preserve trampoline memory */
memcpy(trampoline_save, trampoline_32bit, TRAMPOLINE_32BIT_SIZE);
@@ -143,32 +149,32 @@ struct paging_config paging_prepare(void *rmode)
memset(trampoline_32bit, 0, TRAMPOLINE_32BIT_SIZE);
/* Copy trampoline code in place */
- memcpy(trampoline_32bit + TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long),
+ toggle_la57 = memcpy(trampoline_32bit +
+ TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long),
&trampoline_32bit_src, TRAMPOLINE_32BIT_CODE_SIZE);
/*
+ * Avoid the need for a stack in the 32-bit trampoline code, by using
+ * LJMP rather than LRET to return back to long mode. LJMP takes an
+ * immediate absolute address, which needs to be adjusted based on the
+ * placement of the trampoline.
+ */
+ *(u32 *)((u8 *)toggle_la57 + trampoline_ljmp_imm_offset) +=
+ (unsigned long)toggle_la57;
+
+ /*
* The code below prepares page table in trampoline memory.
*
* The new page table will be used by trampoline code for switching
* from 4- to 5-level paging or vice versa.
- *
- * If switching is not required, the page table is unused: trampoline
- * code wouldn't touch CR3.
- */
-
- /*
- * We are not going to use the page table in trampoline memory if we
- * are already in the desired paging mode.
*/
- if (paging_config.l5_required == !!(native_read_cr4() & X86_CR4_LA57))
- goto out;
- if (paging_config.l5_required) {
+ if (l5_required) {
/*
* For 4- to 5-level paging transition, set up current CR3 as
* the first and the only entry in a new top-level page table.
*/
- trampoline_32bit[TRAMPOLINE_32BIT_PGTABLE_OFFSET] = __native_read_cr3() | _PAGE_TABLE_NOENC;
+ *trampoline_32bit = __native_read_cr3() | _PAGE_TABLE_NOENC;
} else {
unsigned long src;
@@ -181,38 +187,17 @@ struct paging_config paging_prepare(void *rmode)
* may be above 4G.
*/
src = *(unsigned long *)__native_read_cr3() & PAGE_MASK;
- memcpy(trampoline_32bit + TRAMPOLINE_32BIT_PGTABLE_OFFSET / sizeof(unsigned long),
- (void *)src, PAGE_SIZE);
+ memcpy(trampoline_32bit, (void *)src, PAGE_SIZE);
}
-out:
- return paging_config;
-}
-
-void cleanup_trampoline(void *pgtable)
-{
- void *trampoline_pgtable;
-
- trampoline_pgtable = trampoline_32bit + TRAMPOLINE_32BIT_PGTABLE_OFFSET / sizeof(unsigned long);
+ toggle_la57(trampoline_32bit);
/*
- * Move the top level page table out of trampoline memory,
- * if it's there.
+ * Move the top level page table out of trampoline memory.
*/
- if ((void *)__native_read_cr3() == trampoline_pgtable) {
- memcpy(pgtable, trampoline_pgtable, PAGE_SIZE);
- native_write_cr3((unsigned long)pgtable);
- }
+ memcpy(pgtable, trampoline_32bit, PAGE_SIZE);
+ native_write_cr3((unsigned long)pgtable);
/* Restore trampoline memory */
memcpy(trampoline_32bit, trampoline_save, TRAMPOLINE_32BIT_SIZE);
-
- /* Initialize variables for 5-level paging */
-#ifdef CONFIG_X86_5LEVEL
- if (__read_cr4() & X86_CR4_LA57) {
- __pgtable_l5_enabled = 1;
- pgdir_shift = 48;
- ptrs_per_p4d = 512;
- }
-#endif
}
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index c3e343bd4760..dc8c876fbd8f 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -365,22 +365,27 @@ static void enforce_vmpl0(void)
* by the guest kernel. As and when a new feature is implemented in the
* guest kernel, a corresponding bit should be added to the mask.
*/
-#define SNP_FEATURES_PRESENT (0)
+#define SNP_FEATURES_PRESENT MSR_AMD64_SNP_DEBUG_SWAP
+
+u64 snp_get_unsupported_features(u64 status)
+{
+ if (!(status & MSR_AMD64_SEV_SNP_ENABLED))
+ return 0;
+
+ return status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
+}
void snp_check_features(void)
{
u64 unsupported;
- if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
- return;
-
/*
* Terminate the boot if hypervisor has enabled any feature lacking
* guest side implementation. Pass on the unsupported features mask through
* EXIT_INFO_2 of the GHCB protocol so that those features can be reported
* as part of the guest boot failure.
*/
- unsupported = sev_status & SNP_FEATURES_IMPL_REQ & ~SNP_FEATURES_PRESENT;
+ unsupported = snp_get_unsupported_features(sev_status);
if (unsupported) {
if (ghcb_version < 2 || (!boot_ghcb && !early_setup_ghcb()))
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
@@ -390,35 +395,22 @@ void snp_check_features(void)
}
}
-void sev_enable(struct boot_params *bp)
+/*
+ * sev_check_cpu_support - Check for SEV support in the CPU capabilities
+ *
+ * Returns < 0 if SEV is not supported, otherwise the position of the
+ * encryption bit in the page table descriptors.
+ */
+static int sev_check_cpu_support(void)
{
unsigned int eax, ebx, ecx, edx;
- struct msr m;
- bool snp;
-
- /*
- * bp->cc_blob_address should only be set by boot/compressed kernel.
- * Initialize it to 0 to ensure that uninitialized values from
- * buggy bootloaders aren't propagated.
- */
- if (bp)
- bp->cc_blob_address = 0;
-
- /*
- * Do an initial SEV capability check before snp_init() which
- * loads the CPUID page and the same checks afterwards are done
- * without the hypervisor and are trustworthy.
- *
- * If the HV fakes SEV support, the guest will crash'n'burn
- * which is good enough.
- */
/* Check for the SME/SEV support leaf */
eax = 0x80000000;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
if (eax < 0x8000001f)
- return;
+ return -ENODEV;
/*
* Check for the SME/SEV feature:
@@ -433,6 +425,35 @@ void sev_enable(struct boot_params *bp)
native_cpuid(&eax, &ebx, &ecx, &edx);
/* Check whether SEV is supported */
if (!(eax & BIT(1)))
+ return -ENODEV;
+
+ return ebx & 0x3f;
+}
+
+void sev_enable(struct boot_params *bp)
+{
+ struct msr m;
+ int bitpos;
+ bool snp;
+
+ /*
+ * bp->cc_blob_address should only be set by boot/compressed kernel.
+ * Initialize it to 0 to ensure that uninitialized values from
+ * buggy bootloaders aren't propagated.
+ */
+ if (bp)
+ bp->cc_blob_address = 0;
+
+ /*
+ * Do an initial SEV capability check before snp_init() which
+ * loads the CPUID page and the same checks afterwards are done
+ * without the hypervisor and are trustworthy.
+ *
+ * If the HV fakes SEV support, the guest will crash'n'burn
+ * which is good enough.
+ */
+
+ if (sev_check_cpu_support() < 0)
return;
/*
@@ -443,26 +464,8 @@ void sev_enable(struct boot_params *bp)
/* Now repeat the checks with the SNP CPUID table. */
- /* Recheck the SME/SEV support leaf */
- eax = 0x80000000;
- ecx = 0;
- native_cpuid(&eax, &ebx, &ecx, &edx);
- if (eax < 0x8000001f)
- return;
-
- /*
- * Recheck for the SME/SEV feature:
- * CPUID Fn8000_001F[EAX]
- * - Bit 0 - Secure Memory Encryption support
- * - Bit 1 - Secure Encrypted Virtualization support
- * CPUID Fn8000_001F[EBX]
- * - Bits 5:0 - Pagetable bit position used to indicate encryption
- */
- eax = 0x8000001f;
- ecx = 0;
- native_cpuid(&eax, &ebx, &ecx, &edx);
- /* Check whether SEV is supported */
- if (!(eax & BIT(1))) {
+ bitpos = sev_check_cpu_support();
+ if (bitpos < 0) {
if (snp)
error("SEV-SNP support indicated by CC blob, but not CPUID.");
return;
@@ -494,7 +497,24 @@ void sev_enable(struct boot_params *bp)
if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
error("SEV-SNP supported indicated by CC blob, but not SEV status MSR.");
- sme_me_mask = BIT_ULL(ebx & 0x3f);
+ sme_me_mask = BIT_ULL(bitpos);
+}
+
+/*
+ * sev_get_status - Retrieve the SEV status mask
+ *
+ * Returns 0 if the CPU is not SEV capable, otherwise the value of the
+ * AMD64_SEV MSR.
+ */
+u64 sev_get_status(void)
+{
+ struct msr m;
+
+ if (sev_check_cpu_support() < 0)
+ return 0;
+
+ boot_rdmsr(MSR_AMD64_SEV, &m);
+ return m.q;
}
/* Search for Confidential Computing blob in the EFI config table. */
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 75a343f10e58..1b411bbf3cb0 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -33,7 +33,6 @@ CONFIG_HYPERVISOR_GUEST=y
CONFIG_PARAVIRT=y
CONFIG_NR_CPUS=8
CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
-CONFIG_MICROCODE_AMD=y
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
CONFIG_X86_CHECK_BIOS_CORRUPTION=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 0902518e9b93..409e9182bd29 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -31,7 +31,6 @@ CONFIG_SMP=y
CONFIG_HYPERVISOR_GUEST=y
CONFIG_PARAVIRT=y
CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
-CONFIG_MICROCODE_AMD=y
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
CONFIG_NUMA=y
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index a5b0cb3efeba..39d6a62ac627 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -229,10 +229,9 @@ static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
return (struct crypto_aes_ctx *)ALIGN(addr, align);
}
-static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
+static int aes_set_key_common(struct crypto_aes_ctx *ctx,
const u8 *in_key, unsigned int key_len)
{
- struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
int err;
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
@@ -253,7 +252,8 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
- return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
+ return aes_set_key_common(aes_ctx(crypto_tfm_ctx(tfm)), in_key,
+ key_len);
}
static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -285,8 +285,7 @@ static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
- return aes_set_key_common(crypto_skcipher_tfm(tfm),
- crypto_skcipher_ctx(tfm), key, len);
+ return aes_set_key_common(aes_ctx(crypto_skcipher_ctx(tfm)), key, len);
}
static int ecb_encrypt(struct skcipher_request *req)
@@ -627,8 +626,7 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
- return aes_set_key_common(crypto_aead_tfm(aead),
- &ctx->aes_key_expanded, key, key_len) ?:
+ return aes_set_key_common(&ctx->aes_key_expanded, key, key_len) ?:
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}
@@ -893,14 +891,13 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
keylen /= 2;
/* first half of xts-key is for crypt */
- err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
- key, keylen);
+ err = aes_set_key_common(aes_ctx(ctx->raw_crypt_ctx), key, keylen);
if (err)
return err;
/* second half of xts-key is for tweak */
- return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
- key + keylen, keylen);
+ return aes_set_key_common(aes_ctx(ctx->raw_tweak_ctx), key + keylen,
+ keylen);
}
static int xts_crypt(struct skcipher_request *req, bool encrypt)
@@ -1150,8 +1147,7 @@ static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
{
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
- return aes_set_key_common(crypto_aead_tfm(aead),
- &ctx->aes_key_expanded, key, key_len) ?:
+ return aes_set_key_common(&ctx->aes_key_expanded, key, key_len) ?:
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index bc0a3c941b35..2d0b1bd866ea 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -456,3 +456,4 @@
449 i386 futex_waitv sys_futex_waitv
450 i386 set_mempolicy_home_node sys_set_mempolicy_home_node
451 i386 cachestat sys_cachestat
+452 i386 fchmodat2 sys_fchmodat2
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 227538b0ce80..814768249eae 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -373,6 +373,7 @@
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 371014802191..6911c5399d02 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -156,8 +156,8 @@ perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
* count to the generic event atomically:
*/
prev_raw_count = local64_read(&hwc->prev_count);
- if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
- new_raw_count) != prev_raw_count)
+ if (!local64_try_cmpxchg(&hwc->prev_count,
+ &prev_raw_count, new_raw_count))
return 0;
/*
@@ -247,11 +247,33 @@ int forward_event_to_ibs(struct perf_event *event)
return -ENOENT;
}
+/*
+ * Grouping of IBS events is not possible since IBS can have only
+ * one event active at any point in time.
+ */
+static int validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling;
+
+ if (event->group_leader == event)
+ return 0;
+
+ if (event->group_leader->pmu == event->pmu)
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling->pmu == event->pmu)
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int perf_ibs_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct perf_ibs *perf_ibs;
u64 max_cnt, config;
+ int ret;
perf_ibs = get_ibs_pmu(event->attr.type);
if (!perf_ibs)
@@ -265,6 +287,10 @@ static int perf_ibs_init(struct perf_event *event)
if (config & ~perf_ibs->config_mask)
return -EINVAL;
+ ret = validate_group(event);
+ if (ret)
+ return ret;
+
if (hwc->sample_period) {
if (config & perf_ibs->cnt_mask)
/* raw max_cnt may not be set */
@@ -702,38 +728,63 @@ static u8 perf_ibs_data_src(union ibs_op_data2 *op_data2)
return op_data2->data_src_lo;
}
-static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
- union ibs_op_data3 *op_data3,
- struct perf_sample_data *data)
+#define L(x) (PERF_MEM_S(LVL, x) | PERF_MEM_S(LVL, HIT))
+#define LN(x) PERF_MEM_S(LVLNUM, x)
+#define REM PERF_MEM_S(REMOTE, REMOTE)
+#define HOPS(x) PERF_MEM_S(HOPS, x)
+
+static u64 g_data_src[8] = {
+ [IBS_DATA_SRC_LOC_CACHE] = L(L3) | L(REM_CCE1) | LN(ANY_CACHE) | HOPS(0),
+ [IBS_DATA_SRC_DRAM] = L(LOC_RAM) | LN(RAM),
+ [IBS_DATA_SRC_REM_CACHE] = L(REM_CCE2) | LN(ANY_CACHE) | REM | HOPS(1),
+ [IBS_DATA_SRC_IO] = L(IO) | LN(IO),
+};
+
+#define RMT_NODE_BITS (1 << IBS_DATA_SRC_DRAM)
+#define RMT_NODE_APPLICABLE(x) (RMT_NODE_BITS & (1 << x))
+
+static u64 g_zen4_data_src[32] = {
+ [IBS_DATA_SRC_EXT_LOC_CACHE] = L(L3) | LN(L3),
+ [IBS_DATA_SRC_EXT_NEAR_CCX_CACHE] = L(REM_CCE1) | LN(ANY_CACHE) | REM | HOPS(0),
+ [IBS_DATA_SRC_EXT_DRAM] = L(LOC_RAM) | LN(RAM),
+ [IBS_DATA_SRC_EXT_FAR_CCX_CACHE] = L(REM_CCE2) | LN(ANY_CACHE) | REM | HOPS(1),
+ [IBS_DATA_SRC_EXT_PMEM] = LN(PMEM),
+ [IBS_DATA_SRC_EXT_IO] = L(IO) | LN(IO),
+ [IBS_DATA_SRC_EXT_EXT_MEM] = LN(CXL),
+};
+
+#define ZEN4_RMT_NODE_BITS ((1 << IBS_DATA_SRC_EXT_DRAM) | \
+ (1 << IBS_DATA_SRC_EXT_PMEM) | \
+ (1 << IBS_DATA_SRC_EXT_EXT_MEM))
+#define ZEN4_RMT_NODE_APPLICABLE(x) (ZEN4_RMT_NODE_BITS & (1 << x))
+
+static __u64 perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
+ union ibs_op_data3 *op_data3,
+ struct perf_sample_data *data)
{
union perf_mem_data_src *data_src = &data->data_src;
u8 ibs_data_src = perf_ibs_data_src(op_data2);
data_src->mem_lvl = 0;
+ data_src->mem_lvl_num = 0;
/*
* DcMiss, L2Miss, DataSrc, DcMissLat etc. are all invalid for Uncached
* memory accesses. So, check DcUcMemAcc bit early.
*/
- if (op_data3->dc_uc_mem_acc && ibs_data_src != IBS_DATA_SRC_EXT_IO) {
- data_src->mem_lvl = PERF_MEM_LVL_UNC | PERF_MEM_LVL_HIT;
- return;
- }
+ if (op_data3->dc_uc_mem_acc && ibs_data_src != IBS_DATA_SRC_EXT_IO)
+ return L(UNC) | LN(UNC);
/* L1 Hit */
- if (op_data3->dc_miss == 0) {
- data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
- return;
- }
+ if (op_data3->dc_miss == 0)
+ return L(L1) | LN(L1);
/* L2 Hit */
if (op_data3->l2_miss == 0) {
/* Erratum #1293 */
if (boot_cpu_data.x86 != 0x19 || boot_cpu_data.x86_model > 0xF ||
- !(op_data3->sw_pf || op_data3->dc_miss_no_mab_alloc)) {
- data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
- return;
- }
+ !(op_data3->sw_pf || op_data3->dc_miss_no_mab_alloc))
+ return L(L2) | LN(L2);
}
/*
@@ -743,82 +794,36 @@ static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
if (data_src->mem_op != PERF_MEM_OP_LOAD)
goto check_mab;
- /* L3 Hit */
if (ibs_caps & IBS_CAPS_ZEN4) {
- if (ibs_data_src == IBS_DATA_SRC_EXT_LOC_CACHE) {
- data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
- return;
- }
- } else {
- if (ibs_data_src == IBS_DATA_SRC_LOC_CACHE) {
- data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_REM_CCE1 |
- PERF_MEM_LVL_HIT;
- return;
- }
- }
+ u64 val = g_zen4_data_src[ibs_data_src];
- /* A peer cache in a near CCX */
- if (ibs_caps & IBS_CAPS_ZEN4 &&
- ibs_data_src == IBS_DATA_SRC_EXT_NEAR_CCX_CACHE) {
- data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1 | PERF_MEM_LVL_HIT;
- return;
- }
+ if (!val)
+ goto check_mab;
- /* A peer cache in a far CCX */
- if (ibs_caps & IBS_CAPS_ZEN4) {
- if (ibs_data_src == IBS_DATA_SRC_EXT_FAR_CCX_CACHE) {
- data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2 | PERF_MEM_LVL_HIT;
- return;
+ /* HOPS_1 because IBS doesn't provide remote socket detail */
+ if (op_data2->rmt_node && ZEN4_RMT_NODE_APPLICABLE(ibs_data_src)) {
+ if (ibs_data_src == IBS_DATA_SRC_EXT_DRAM)
+ val = L(REM_RAM1) | LN(RAM) | REM | HOPS(1);
+ else
+ val |= REM | HOPS(1);
}
- } else {
- if (ibs_data_src == IBS_DATA_SRC_REM_CACHE) {
- data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2 | PERF_MEM_LVL_HIT;
- return;
- }
- }
- /* DRAM */
- if (ibs_data_src == IBS_DATA_SRC_EXT_DRAM) {
- if (op_data2->rmt_node == 0)
- data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
- else
- data_src->mem_lvl = PERF_MEM_LVL_REM_RAM1 | PERF_MEM_LVL_HIT;
- return;
- }
+ return val;
+ } else {
+ u64 val = g_data_src[ibs_data_src];
- /* PMEM */
- if (ibs_caps & IBS_CAPS_ZEN4 && ibs_data_src == IBS_DATA_SRC_EXT_PMEM) {
- data_src->mem_lvl_num = PERF_MEM_LVLNUM_PMEM;
- if (op_data2->rmt_node) {
- data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
- /* IBS doesn't provide Remote socket detail */
- data_src->mem_hops = PERF_MEM_HOPS_1;
- }
- return;
- }
+ if (!val)
+ goto check_mab;
- /* Extension Memory */
- if (ibs_caps & IBS_CAPS_ZEN4 &&
- ibs_data_src == IBS_DATA_SRC_EXT_EXT_MEM) {
- data_src->mem_lvl_num = PERF_MEM_LVLNUM_CXL;
- if (op_data2->rmt_node) {
- data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
- /* IBS doesn't provide Remote socket detail */
- data_src->mem_hops = PERF_MEM_HOPS_1;
+ /* HOPS_1 because IBS doesn't provide remote socket detail */
+ if (op_data2->rmt_node && RMT_NODE_APPLICABLE(ibs_data_src)) {
+ if (ibs_data_src == IBS_DATA_SRC_DRAM)
+ val = L(REM_RAM1) | LN(RAM) | REM | HOPS(1);
+ else
+ val |= REM | HOPS(1);
}
- return;
- }
- /* IO */
- if (ibs_data_src == IBS_DATA_SRC_EXT_IO) {
- data_src->mem_lvl = PERF_MEM_LVL_IO;
- data_src->mem_lvl_num = PERF_MEM_LVLNUM_IO;
- if (op_data2->rmt_node) {
- data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
- /* IBS doesn't provide Remote socket detail */
- data_src->mem_hops = PERF_MEM_HOPS_1;
- }
- return;
+ return val;
}
check_mab:
@@ -829,12 +834,11 @@ check_mab:
* DataSrc simultaneously. Prioritize DataSrc over MAB, i.e. set
* MAB only when IBS fails to provide DataSrc.
*/
- if (op_data3->dc_miss_no_mab_alloc) {
- data_src->mem_lvl = PERF_MEM_LVL_LFB | PERF_MEM_LVL_HIT;
- return;
- }
+ if (op_data3->dc_miss_no_mab_alloc)
+ return L(LFB) | LN(LFB);
- data_src->mem_lvl = PERF_MEM_LVL_NA;
+ /* Don't set HIT with NA */
+ return PERF_MEM_S(LVL, NA) | LN(NA);
}
static bool perf_ibs_cache_hit_st_valid(void)
@@ -924,7 +928,9 @@ static void perf_ibs_get_data_src(struct perf_ibs_data *ibs_data,
union ibs_op_data2 *op_data2,
union ibs_op_data3 *op_data3)
{
- perf_ibs_get_mem_lvl(op_data2, op_data3, data);
+ union perf_mem_data_src *data_src = &data->data_src;
+
+ data_src->val |= perf_ibs_get_mem_lvl(op_data2, op_data3, data);
perf_ibs_get_mem_snoop(op_data2, data);
perf_ibs_get_tlb_lvl(op_data3, data);
perf_ibs_get_mem_lock(op_data3, data);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 9d248703cbdd..185f902e5f28 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -129,13 +129,11 @@ u64 x86_perf_event_update(struct perf_event *event)
* exchange a new raw count - then add that new-prev delta
* count to the generic event atomically:
*/
-again:
prev_raw_count = local64_read(&hwc->prev_count);
- rdpmcl(hwc->event_base_rdpmc, new_raw_count);
-
- if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
- new_raw_count) != prev_raw_count)
- goto again;
+ do {
+ rdpmcl(hwc->event_base_rdpmc, new_raw_count);
+ } while (!local64_try_cmpxchg(&hwc->prev_count,
+ &prev_raw_count, new_raw_count));
/*
* Now we have the new raw value and have updated the prev
@@ -2168,7 +2166,6 @@ static int __init init_hw_perf_events(void)
hybrid_pmu->pmu = pmu;
hybrid_pmu->pmu.type = -1;
hybrid_pmu->pmu.attr_update = x86_pmu.attr_update;
- hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_EXTENDED_HW_TYPE;
err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 2a284ba951b7..fa355d3658a6 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2129,6 +2129,17 @@ static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
EVENT_EXTRA_END
};
+EVENT_ATTR_STR(topdown-retiring, td_retiring_cmt, "event=0x72,umask=0x0");
+EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_cmt, "event=0x73,umask=0x0");
+
+static struct attribute *cmt_events_attrs[] = {
+ EVENT_PTR(td_fe_bound_tnt),
+ EVENT_PTR(td_retiring_cmt),
+ EVENT_PTR(td_bad_spec_cmt),
+ EVENT_PTR(td_be_bound_tnt),
+ NULL
+};
+
static struct extra_reg intel_cmt_extra_regs[] __read_mostly = {
/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff3ffffffffffull, RSP_0),
@@ -4847,6 +4858,8 @@ PMU_FORMAT_ATTR(ldlat, "config1:0-15");
PMU_FORMAT_ATTR(frontend, "config1:0-23");
+PMU_FORMAT_ATTR(snoop_rsp, "config1:0-63");
+
static struct attribute *intel_arch3_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask.attr,
@@ -4877,6 +4890,13 @@ static struct attribute *slm_format_attr[] = {
NULL
};
+static struct attribute *cmt_format_attr[] = {
+ &format_attr_offcore_rsp.attr,
+ &format_attr_ldlat.attr,
+ &format_attr_snoop_rsp.attr,
+ NULL
+};
+
static struct attribute *skl_format_attr[] = {
&format_attr_frontend.attr,
NULL,
@@ -5656,7 +5676,6 @@ static struct attribute *adl_hybrid_extra_attr[] = {
NULL
};
-PMU_FORMAT_ATTR_SHOW(snoop_rsp, "config1:0-63");
FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small);
static struct attribute *mtl_hybrid_extra_attr_rtm[] = {
@@ -6174,7 +6193,7 @@ __init int intel_pmu_init(void)
name = "Tremont";
break;
- case INTEL_FAM6_ALDERLAKE_N:
+ case INTEL_FAM6_ATOM_GRACEMONT:
x86_pmu.mid_ack = true;
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -6204,6 +6223,37 @@ __init int intel_pmu_init(void)
name = "gracemont";
break;
+ case INTEL_FAM6_ATOM_CRESTMONT:
+ case INTEL_FAM6_ATOM_CRESTMONT_X:
+ x86_pmu.mid_ack = true;
+ memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
+ hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
+
+ x86_pmu.event_constraints = intel_slm_event_constraints;
+ x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints;
+ x86_pmu.extra_regs = intel_cmt_extra_regs;
+
+ x86_pmu.pebs_aliases = NULL;
+ x86_pmu.pebs_prec_dist = true;
+ x86_pmu.lbr_pt_coexist = true;
+ x86_pmu.pebs_block = true;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
+
+ intel_pmu_pebs_data_source_cmt();
+ x86_pmu.pebs_latency_data = mtl_latency_data_small;
+ x86_pmu.get_event_constraints = cmt_get_event_constraints;
+ x86_pmu.limit_period = spr_limit_period;
+ td_attr = cmt_events_attrs;
+ mem_attr = grt_mem_attrs;
+ extra_attr = cmt_format_attr;
+ pr_cont("Crestmont events, ");
+ name = "crestmont";
+ break;
+
case INTEL_FAM6_WESTMERE:
case INTEL_FAM6_WESTMERE_EP:
case INTEL_FAM6_WESTMERE_EX:
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 835862c548cc..96fffb2d521d 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -365,13 +365,11 @@ static void cstate_pmu_event_update(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
u64 prev_raw_count, new_raw_count;
-again:
prev_raw_count = local64_read(&hwc->prev_count);
- new_raw_count = cstate_pmu_read_counter(event);
-
- if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
- new_raw_count) != prev_raw_count)
- goto again;
+ do {
+ new_raw_count = cstate_pmu_read_counter(event);
+ } while (!local64_try_cmpxchg(&hwc->prev_count,
+ &prev_raw_count, new_raw_count));
local64_add(new_raw_count - prev_raw_count, &event->count);
}
@@ -671,6 +669,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &glm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates),
@@ -686,7 +685,6 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_cstates),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &adl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_cstates),
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index df88576d6b2a..eb8dd8b8a1e8 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -144,7 +144,7 @@ void __init intel_pmu_pebs_data_source_adl(void)
__intel_pmu_pebs_data_source_grt(data_source);
}
-static void __init intel_pmu_pebs_data_source_cmt(u64 *data_source)
+static void __init __intel_pmu_pebs_data_source_cmt(u64 *data_source)
{
data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD);
data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
@@ -164,7 +164,12 @@ void __init intel_pmu_pebs_data_source_mtl(void)
data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source;
memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
- intel_pmu_pebs_data_source_cmt(data_source);
+ __intel_pmu_pebs_data_source_cmt(data_source);
+}
+
+void __init intel_pmu_pebs_data_source_cmt(void)
+{
+ __intel_pmu_pebs_data_source_cmt(pebs_data_source);
}
static u64 precise_store_data(u64 status)
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index bc226603ef3e..69043e02e8a7 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1858,7 +1858,6 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rkl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_uncore_init),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_uncore_init),
@@ -1867,6 +1866,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init),
{},
};
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index d49e90dc04a4..4d349986f76a 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1502,7 +1502,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
pci_dev_put(ubox_dev);
- return err ? pcibios_err_to_errno(err) : 0;
+ return pcibios_err_to_errno(err);
}
int snbep_uncore_pci_init(void)
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 0feaaa571303..9e237b30f017 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -106,7 +106,7 @@ static bool test_intel(int idx, void *data)
case INTEL_FAM6_ROCKETLAKE:
case INTEL_FAM6_ALDERLAKE:
case INTEL_FAM6_ALDERLAKE_L:
- case INTEL_FAM6_ALDERLAKE_N:
+ case INTEL_FAM6_ATOM_GRACEMONT:
case INTEL_FAM6_RAPTORLAKE:
case INTEL_FAM6_RAPTORLAKE_P:
case INTEL_FAM6_RAPTORLAKE_S:
@@ -244,12 +244,10 @@ static void msr_event_update(struct perf_event *event)
s64 delta;
/* Careful, an NMI might modify the previous event value: */
-again:
prev = local64_read(&event->hw.prev_count);
- now = msr_read_counter(event);
-
- if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
- goto again;
+ do {
+ now = msr_read_counter(event);
+ } while (!local64_try_cmpxchg(&event->hw.prev_count, &prev, now));
delta = now - prev;
if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index d6de4487348c..c8ba2be7585d 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1606,6 +1606,8 @@ void intel_pmu_pebs_data_source_grt(void);
void intel_pmu_pebs_data_source_mtl(void);
+void intel_pmu_pebs_data_source_cmt(void);
+
int intel_pmu_setup_lbr_filter(struct perf_event *event);
void intel_pt_interrupt(void);
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 52e6e7ed4f78..1579429846cc 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -804,7 +804,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &model_skl),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &model_spr),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &model_skl),
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 2888c0ee4df0..c8a7fc23f63c 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -6,7 +6,7 @@
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
*/
-#include <acpi/pdc_intel.h>
+#include <acpi/proc_cap_intel.h>
#include <asm/numa.h>
#include <asm/fixmap.h>
@@ -102,23 +102,31 @@ static inline bool arch_has_acpi_pdc(void)
c->x86_vendor == X86_VENDOR_CENTAUR);
}
-static inline void arch_acpi_set_pdc_bits(u32 *buf)
+static inline void arch_acpi_set_proc_cap_bits(u32 *cap)
{
struct cpuinfo_x86 *c = &cpu_data(0);
- buf[2] |= ACPI_PDC_C_CAPABILITY_SMP;
+ *cap |= ACPI_PROC_CAP_C_CAPABILITY_SMP;
+
+ /* Enable coordination with firmware's _TSD info */
+ *cap |= ACPI_PROC_CAP_SMP_T_SWCOORD;
if (cpu_has(c, X86_FEATURE_EST))
- buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
+ *cap |= ACPI_PROC_CAP_EST_CAPABILITY_SWSMP;
if (cpu_has(c, X86_FEATURE_ACPI))
- buf[2] |= ACPI_PDC_T_FFH;
+ *cap |= ACPI_PROC_CAP_T_FFH;
+
+ if (cpu_has(c, X86_FEATURE_HWP))
+ *cap |= ACPI_PROC_CAP_COLLAB_PROC_PERF;
/*
- * If mwait/monitor is unsupported, C2/C3_FFH will be disabled
+ * If mwait/monitor is unsupported, C_C1_FFH and
+ * C2/C3_FFH will be disabled.
*/
- if (!cpu_has(c, X86_FEATURE_MWAIT))
- buf[2] &= ~(ACPI_PDC_C_C2C3_FFH);
+ if (!cpu_has(c, X86_FEATURE_MWAIT) ||
+ boot_option_idle_override == IDLE_NOMWAIT)
+ *cap &= ~(ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH);
}
static inline bool acpi_has_cpu_in_madt(void)
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 9191280d9ea3..4ae14339cb8c 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -62,4 +62,12 @@
# define BOOT_STACK_SIZE 0x1000
#endif
+#ifndef __ASSEMBLY__
+extern unsigned int output_len;
+extern const unsigned long kernel_total_size;
+
+unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
+ void (*error)(char *x));
+#endif
+
#endif /* _ASM_X86_BOOT_H */
diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
index b8f1dc0761e4..9931e4c7d73f 100644
--- a/arch/x86/include/asm/div64.h
+++ b/arch/x86/include/asm/div64.h
@@ -71,6 +71,12 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
}
#define mul_u32_u32 mul_u32_u32
+/*
+ * __div64_32() is never called on x86, so prevent the
+ * generic definition from getting built.
+ */
+#define __div64_32
+
#else
# include <asm-generic/div64.h>
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 8b4be7cecdb8..b0994ae3bc23 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -90,6 +90,8 @@ static inline void efi_fpu_end(void)
}
#ifdef CONFIG_X86_32
+#define EFI_X86_KERNEL_ALLOC_LIMIT (SZ_512M - 1)
+
#define arch_efi_call_virt_setup() \
({ \
efi_fpu_begin(); \
@@ -103,8 +105,7 @@ static inline void efi_fpu_end(void)
})
#else /* !CONFIG_X86_32 */
-
-#define EFI_LOADER_SIGNATURE "EL64"
+#define EFI_X86_KERNEL_ALLOC_LIMIT EFI_ALLOC_LIMIT
extern asmlinkage u64 __efi_call(void *fp, ...);
@@ -218,6 +219,8 @@ efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
#ifdef CONFIG_EFI_MIXED
+#define EFI_ALLOC_LIMIT (efi_is_64bit() ? ULONG_MAX : U32_MAX)
+
#define ARCH_HAS_EFISTUB_WRAPPERS
static inline bool efi_is_64bit(void)
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index b3af2d45bbbb..5fcd85fd64fd 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -98,8 +98,6 @@
#define INTEL_FAM6_ICELAKE_L 0x7E /* Sunny Cove */
#define INTEL_FAM6_ICELAKE_NNPI 0x9D /* Sunny Cove */
-#define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */
-
#define INTEL_FAM6_ROCKETLAKE 0xA7 /* Cypress Cove */
#define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */
@@ -112,21 +110,24 @@
#define INTEL_FAM6_GRANITERAPIDS_X 0xAD
#define INTEL_FAM6_GRANITERAPIDS_D 0xAE
+/* "Hybrid" Processors (P-Core/E-Core) */
+
+#define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */
+
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
-#define INTEL_FAM6_ALDERLAKE_N 0xBE
-#define INTEL_FAM6_RAPTORLAKE 0xB7
+#define INTEL_FAM6_RAPTORLAKE 0xB7 /* Raptor Cove / Enhanced Gracemont */
#define INTEL_FAM6_RAPTORLAKE_P 0xBA
#define INTEL_FAM6_RAPTORLAKE_S 0xBF
#define INTEL_FAM6_METEORLAKE 0xAC
#define INTEL_FAM6_METEORLAKE_L 0xAA
-#define INTEL_FAM6_LUNARLAKE_M 0xBD
-
#define INTEL_FAM6_ARROWLAKE 0xC6
+#define INTEL_FAM6_LUNARLAKE_M 0xBD
+
/* "Small Core" Processors (Atom/E-Core) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
@@ -154,9 +155,10 @@
#define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */
#define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */
-#define INTEL_FAM6_SIERRAFOREST_X 0xAF
+#define INTEL_FAM6_ATOM_GRACEMONT 0xBE /* Alderlake N */
-#define INTEL_FAM6_GRANDRIDGE 0xB6
+#define INTEL_FAM6_ATOM_CRESTMONT_X 0xAF /* Sierra Forest */
+#define INTEL_FAM6_ATOM_CRESTMONT 0xB6 /* Grand Ridge */
/* Xeon Phi */
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 56d4ef604b91..635132a12778 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -127,8 +127,8 @@ static inline long local_cmpxchg(local_t *l, long old, long new)
static inline bool local_try_cmpxchg(local_t *l, long *old, long new)
{
- typeof(l->a.counter) *__old = (typeof(l->a.counter) *) old;
- return try_cmpxchg_local(&l->a.counter, __old, new);
+ return try_cmpxchg_local(&l->a.counter,
+ (typeof(l->a.counter) *) old, new);
}
/* Always has a lock prefix */
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 7f97a8a97e24..473b16d73b47 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -50,8 +50,8 @@ void __init sme_enable(struct boot_params *bp);
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
-void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
- bool enc);
+void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr,
+ unsigned long size, bool enc);
void __init mem_encrypt_free_decrypted_mem(void);
@@ -85,7 +85,7 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
static inline int __init
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
static inline void __init
-early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}
+early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc) {}
static inline void mem_encrypt_free_decrypted_mem(void) { }
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 66dbba181bd9..bbbe9d744977 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -2,138 +2,77 @@
#ifndef _ASM_X86_MICROCODE_H
#define _ASM_X86_MICROCODE_H
-#include <asm/cpu.h>
-#include <linux/earlycpio.h>
-#include <linux/initrd.h>
-#include <asm/microcode_amd.h>
-
-struct ucode_patch {
- struct list_head plist;
- void *data; /* Intel uses only this one */
- unsigned int size;
- u32 patch_id;
- u16 equiv_cpu;
-};
-
-extern struct list_head microcode_cache;
-
struct cpu_signature {
unsigned int sig;
unsigned int pf;
unsigned int rev;
};
-struct device;
-
-enum ucode_state {
- UCODE_OK = 0,
- UCODE_NEW,
- UCODE_UPDATED,
- UCODE_NFOUND,
- UCODE_ERROR,
-};
-
-struct microcode_ops {
- enum ucode_state (*request_microcode_fw) (int cpu, struct device *);
-
- void (*microcode_fini_cpu) (int cpu);
-
- /*
- * The generic 'microcode_core' part guarantees that
- * the callbacks below run on a target cpu when they
- * are being called.
- * See also the "Synchronization" section in microcode_core.c.
- */
- enum ucode_state (*apply_microcode) (int cpu);
- int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
-};
-
struct ucode_cpu_info {
struct cpu_signature cpu_sig;
void *mc;
};
-extern struct ucode_cpu_info ucode_cpu_info[];
-struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa);
-
-#ifdef CONFIG_MICROCODE_INTEL
-extern struct microcode_ops * __init init_intel_microcode(void);
-#else
-static inline struct microcode_ops * __init init_intel_microcode(void)
-{
- return NULL;
-}
-#endif /* CONFIG_MICROCODE_INTEL */
-#ifdef CONFIG_MICROCODE_AMD
-extern struct microcode_ops * __init init_amd_microcode(void);
-extern void __exit exit_amd_microcode(void);
+#ifdef CONFIG_MICROCODE
+void load_ucode_bsp(void);
+void load_ucode_ap(void);
+void microcode_bsp_resume(void);
#else
-static inline struct microcode_ops * __init init_amd_microcode(void)
-{
- return NULL;
-}
-static inline void __exit exit_amd_microcode(void) {}
+static inline void load_ucode_bsp(void) { }
+static inline void load_ucode_ap(void) { }
+static inline void microcode_bsp_resume(void) { }
#endif
-#define MAX_UCODE_COUNT 128
+#ifdef CONFIG_CPU_SUP_INTEL
+/* Intel specific microcode defines. Public for IFS */
+struct microcode_header_intel {
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int datasize;
+ unsigned int totalsize;
+ unsigned int metasize;
+ unsigned int reserved[2];
+};
-#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
-#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
-#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
-#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
-#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
-#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
-#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
+struct microcode_intel {
+ struct microcode_header_intel hdr;
+ unsigned int bits[];
+};
-#define CPUID_IS(a, b, c, ebx, ecx, edx) \
- (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c))))
+#define DEFAULT_UCODE_DATASIZE (2000)
+#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel))
+#define MC_HEADER_TYPE_MICROCODE 1
+#define MC_HEADER_TYPE_IFS 2
-/*
- * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
- * x86_cpuid_vendor() gets vendor id for BSP.
- *
- * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
- * coding, we still use x86_cpuid_vendor() to get vendor id for AP.
- *
- * x86_cpuid_vendor() gets vendor information directly from CPUID.
- */
-static inline int x86_cpuid_vendor(void)
+static inline int intel_microcode_get_datasize(struct microcode_header_intel *hdr)
{
- u32 eax = 0x00000000;
- u32 ebx, ecx = 0, edx;
-
- native_cpuid(&eax, &ebx, &ecx, &edx);
-
- if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
- return X86_VENDOR_INTEL;
-
- if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
- return X86_VENDOR_AMD;
-
- return X86_VENDOR_UNKNOWN;
+ return hdr->datasize ? : DEFAULT_UCODE_DATASIZE;
}
-static inline unsigned int x86_cpuid_family(void)
+static inline u32 intel_get_microcode_revision(void)
{
- u32 eax = 0x00000001;
- u32 ebx, ecx = 0, edx;
+ u32 rev, dummy;
+
+ native_wrmsrl(MSR_IA32_UCODE_REV, 0);
- native_cpuid(&eax, &ebx, &ecx, &edx);
+ /* As documented in the SDM: Do a CPUID 1 here */
+ native_cpuid_eax(1);
- return x86_family(eax);
+ /* get the current revision from MSR 0x8B */
+ native_rdmsr(MSR_IA32_UCODE_REV, dummy, rev);
+
+ return rev;
}
-#ifdef CONFIG_MICROCODE
-extern void __init load_ucode_bsp(void);
-extern void load_ucode_ap(void);
-void reload_early_microcode(unsigned int cpu);
-extern bool initrd_gone;
-void microcode_bsp_resume(void);
-#else
-static inline void __init load_ucode_bsp(void) { }
-static inline void load_ucode_ap(void) { }
-static inline void reload_early_microcode(unsigned int cpu) { }
-static inline void microcode_bsp_resume(void) { }
-#endif
+void show_ucode_info_early(void);
+
+#else /* CONFIG_CPU_SUP_INTEL */
+static inline void show_ucode_info_early(void) { }
+#endif /* !CONFIG_CPU_SUP_INTEL */
#endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
deleted file mode 100644
index 9675c621c1ca..000000000000
--- a/arch/x86/include/asm/microcode_amd.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_MICROCODE_AMD_H
-#define _ASM_X86_MICROCODE_AMD_H
-
-#include <asm/microcode.h>
-
-#define UCODE_MAGIC 0x00414d44
-#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
-#define UCODE_UCODE_TYPE 0x00000001
-
-#define SECTION_HDR_SIZE 8
-#define CONTAINER_HDR_SZ 12
-
-struct equiv_cpu_entry {
- u32 installed_cpu;
- u32 fixed_errata_mask;
- u32 fixed_errata_compare;
- u16 equiv_cpu;
- u16 res;
-} __attribute__((packed));
-
-struct microcode_header_amd {
- u32 data_code;
- u32 patch_id;
- u16 mc_patch_data_id;
- u8 mc_patch_data_len;
- u8 init_flag;
- u32 mc_patch_data_checksum;
- u32 nb_dev_id;
- u32 sb_dev_id;
- u16 processor_rev_id;
- u8 nb_rev_id;
- u8 sb_rev_id;
- u8 bios_api_rev;
- u8 reserved1[3];
- u32 match_reg[8];
-} __attribute__((packed));
-
-struct microcode_amd {
- struct microcode_header_amd hdr;
- unsigned int mpb[];
-};
-
-#define PATCH_MAX_SIZE (3 * PAGE_SIZE)
-
-#ifdef CONFIG_MICROCODE_AMD
-extern void __init load_ucode_amd_bsp(unsigned int family);
-extern void load_ucode_amd_ap(unsigned int family);
-extern int __init save_microcode_in_initrd_amd(unsigned int family);
-void reload_ucode_amd(unsigned int cpu);
-extern void amd_check_microcode(void);
-#else
-static inline void __init load_ucode_amd_bsp(unsigned int family) {}
-static inline void load_ucode_amd_ap(unsigned int family) {}
-static inline int __init
-save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
-static inline void reload_ucode_amd(unsigned int cpu) {}
-static inline void amd_check_microcode(void) {}
-#endif
-#endif /* _ASM_X86_MICROCODE_AMD_H */
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
deleted file mode 100644
index f1fa979e05bf..000000000000
--- a/arch/x86/include/asm/microcode_intel.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_MICROCODE_INTEL_H
-#define _ASM_X86_MICROCODE_INTEL_H
-
-#include <asm/microcode.h>
-
-struct microcode_header_intel {
- unsigned int hdrver;
- unsigned int rev;
- unsigned int date;
- unsigned int sig;
- unsigned int cksum;
- unsigned int ldrver;
- unsigned int pf;
- unsigned int datasize;
- unsigned int totalsize;
- unsigned int metasize;
- unsigned int reserved[2];
-};
-
-struct microcode_intel {
- struct microcode_header_intel hdr;
- unsigned int bits[];
-};
-
-/* microcode format is extended from prescott processors */
-struct extended_signature {
- unsigned int sig;
- unsigned int pf;
- unsigned int cksum;
-};
-
-struct extended_sigtable {
- unsigned int count;
- unsigned int cksum;
- unsigned int reserved[3];
- struct extended_signature sigs[];
-};
-
-#define DEFAULT_UCODE_DATASIZE (2000)
-#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel))
-#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
-#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
-#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
-#define MC_HEADER_TYPE_MICROCODE 1
-#define MC_HEADER_TYPE_IFS 2
-
-#define get_totalsize(mc) \
- (((struct microcode_intel *)mc)->hdr.datasize ? \
- ((struct microcode_intel *)mc)->hdr.totalsize : \
- DEFAULT_UCODE_TOTALSIZE)
-
-#define get_datasize(mc) \
- (((struct microcode_intel *)mc)->hdr.datasize ? \
- ((struct microcode_intel *)mc)->hdr.datasize : DEFAULT_UCODE_DATASIZE)
-
-#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
-
-static inline u32 intel_get_microcode_revision(void)
-{
- u32 rev, dummy;
-
- native_wrmsrl(MSR_IA32_UCODE_REV, 0);
-
- /* As documented in the SDM: Do a CPUID 1 here */
- native_cpuid_eax(1);
-
- /* get the current revision from MSR 0x8B */
- native_rdmsr(MSR_IA32_UCODE_REV, dummy, rev);
-
- return rev;
-}
-
-#ifdef CONFIG_MICROCODE_INTEL
-extern void __init load_ucode_intel_bsp(void);
-extern void load_ucode_intel_ap(void);
-extern void show_ucode_info_early(void);
-extern int __init save_microcode_in_initrd_intel(void);
-void reload_ucode_intel(void);
-#else
-static inline __init void load_ucode_intel_bsp(void) {}
-static inline void load_ucode_intel_ap(void) {}
-static inline void show_ucode_info_early(void) {}
-static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
-static inline void reload_ucode_intel(void) {}
-#endif
-
-#endif /* _ASM_X86_MICROCODE_INTEL_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index b49778664d2b..6c8ff12140ae 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -739,6 +739,7 @@ static __always_inline unsigned long arch_local_irq_save(void)
".popsection")
extern void default_banner(void);
+void native_pv_lock_init(void) __init;
#else /* __ASSEMBLY__ */
@@ -778,6 +779,12 @@ extern void default_banner(void);
#endif /* __ASSEMBLY__ */
#else /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop
+
+#ifndef __ASSEMBLY__
+static inline void native_pv_lock_init(void)
+{
+}
+#endif
#endif /* !CONFIG_PARAVIRT */
#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index fd750247ca89..861e53e201e9 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -678,11 +678,13 @@ extern u32 amd_get_nodes_per_socket(void);
extern u32 amd_get_highest_perf(void);
extern bool cpu_has_ibpb_brtype_microcode(void);
extern void amd_clear_divider(void);
+extern void amd_check_microcode(void);
#else
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
static inline u32 amd_get_highest_perf(void) { return 0; }
static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
static inline void amd_clear_divider(void) { }
+static inline void amd_check_microcode(void) { }
#endif
extern unsigned long arch_align_stack(unsigned long sp);
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index d87451df480b..cde8357bb226 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -74,8 +74,6 @@ static inline bool vcpu_is_preempted(long cpu)
*/
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
-void native_pv_lock_init(void) __init;
-
/*
* Shortcut for the queued_spin_lock_slowpath() function that allows
* virt to hijack it.
@@ -103,10 +101,7 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
return true;
}
-#else
-static inline void native_pv_lock_init(void)
-{
-}
+
#endif /* CONFIG_PARAVIRT */
#include <asm-generic/qspinlock.h>
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index 42b17cf10b10..85b6e3609cb9 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -4,6 +4,8 @@
#include <asm/ibt.h>
+void __lockfunc __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked);
+
/*
* For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit
* registers. For i386, however, only 1 32-bit register needs to be saved
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 66c806784c52..5b4a1ce3d368 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -164,6 +164,7 @@ static __always_inline void sev_es_nmi_complete(void)
__sev_es_nmi_complete();
}
extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
+extern void sev_enable(struct boot_params *bp);
static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs)
{
@@ -210,12 +211,15 @@ bool snp_init(struct boot_params *bp);
void __init __noreturn snp_abort(void);
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
+u64 snp_get_unsupported_features(u64 status);
+u64 sev_get_status(void);
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
static inline void sev_es_ist_exit(void) { }
static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
static inline void sev_es_nmi_complete(void) { }
static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
+static inline void sev_enable(struct boot_params *bp) { }
static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) { return 0; }
static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; }
static inline void setup_ghcb(void) { }
@@ -235,6 +239,8 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in
}
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
+static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
+static inline u64 sev_get_status(void) { return 0; }
#endif
#endif
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index caf41c4869a0..3235ba1e5b06 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -136,10 +136,11 @@ static inline int topology_max_smt_threads(void)
return __max_smt_threads;
}
+#include <linux/cpu_smt.h>
+
int topology_update_package_map(unsigned int apicid, unsigned int cpu);
int topology_update_die_map(unsigned int dieid, unsigned int cpu);
int topology_phys_to_logical_pkg(unsigned int pkg);
-bool topology_smt_supported(void);
extern struct cpumask __cpu_primary_thread_mask;
#define cpu_primary_thread_mask ((const struct cpumask *)&__cpu_primary_thread_mask)
@@ -162,7 +163,6 @@ static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
static inline int topology_max_die_per_package(void) { return 1; }
static inline int topology_max_smt_threads(void) { return 1; }
static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
-static inline bool topology_smt_supported(void) { return false; }
#endif /* !CONFIG_SMP */
static inline void arch_fix_phys_package_id(int num, u32 slot)
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index 1b6455f881f9..6989b824fd32 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -10,6 +10,7 @@
* Copyright (c) Russ Anderson <rja@sgi.com>
*/
+#include <linux/efi.h>
#include <linux/rtc.h>
/*
@@ -115,7 +116,8 @@ struct uv_arch_type_entry {
struct uv_systab {
char signature[4]; /* must be UV_SYSTAB_SIG */
u32 revision; /* distinguish different firmware revs */
- u64 function; /* BIOS runtime callback function ptr */
+ u64 (__efiapi *function)(enum uv_bios_cmd, ...);
+ /* BIOS runtime callback function ptr */
u32 size; /* systab size (starting with _VERSION_UV4) */
struct {
u32 type:8; /* type of entry */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index fa9ec20783fa..85e63d58c074 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -295,7 +295,10 @@ static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
/* VIRT <-> MACHINE conversion */
#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
-#define virt_to_pfn(v) (PFN_DOWN(__pa(v)))
+static inline unsigned long virt_to_pfn(const void *v)
+{
+ return PFN_DOWN(__pa(v));
+}
#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 099d58d02a26..a5ead6a6d233 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1527,6 +1527,7 @@ static noinline void __init int3_selftest(void)
static __initdata int __alt_reloc_selftest_addr;
+extern void __init __alt_reloc_selftest(void *arg);
__visible noinline void __init __alt_reloc_selftest(void *arg)
{
WARN_ON(arg != &__alt_reloc_selftest_addr);
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 035a3db5330b..356de955e78d 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -24,6 +24,8 @@
#define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5
#define PCI_DEVICE_ID_AMD_19H_M60H_ROOT 0x14d8
#define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8
+#define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT 0x153a
+#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
#define PCI_DEVICE_ID_AMD_MI200_ROOT 0x14bb
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
@@ -39,6 +41,7 @@
#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc
+#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4
#define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4
/* Protect the PCI config register pairs used for SMN. */
@@ -56,6 +59,8 @@ static const struct pci_device_id amd_root_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) },
{}
};
@@ -85,6 +90,8 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) },
{}
};
@@ -106,6 +113,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) },
{}
};
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 2a6509e8c840..9bfd6e397384 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -301,6 +301,7 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
local_irq_restore(flags);
}
+#ifdef CONFIG_SMP
/* must come after the send_IPI functions above for inlining */
static int convert_apicid_to_cpu(int apic_id)
{
@@ -329,3 +330,4 @@ int safe_smp_processor_id(void)
return cpuid >= 0 ? cpuid : 0;
}
#endif
+#endif
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index d9384d5b4b8e..b524dee1cbbb 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -294,8 +294,7 @@ static void __init early_get_apic_socketid_shift(void)
static void __init uv_stringify(int len, char *to, char *from)
{
- /* Relies on 'to' being NULL chars so result will be NULL terminated */
- strncpy(to, from, len-1);
+ strscpy(to, from, len);
/* Trim trailing spaces */
(void)strim(to);
@@ -1013,7 +1012,7 @@ static void __init calc_mmioh_map(enum mmioh_arch index,
/* One (UV2) mapping */
if (index == UV2_MMIOH) {
- strncpy(id, "MMIOH", sizeof(id));
+ strscpy(id, "MMIOH", sizeof(id));
max_io = max_pnode;
mapped = 0;
goto map_exit;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e3a65e9fc750..41b573f34a10 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -59,7 +59,6 @@
#include <asm/cacheinfo.h>
#include <asm/memtype.h>
#include <asm/microcode.h>
-#include <asm/microcode_intel.h>
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/uv/uv.h>
@@ -2300,8 +2299,7 @@ void store_cpu_caps(struct cpuinfo_x86 *curr_info)
* @prev_info: CPU capabilities stored before an update.
*
* The microcode loader calls this upon late microcode load to recheck features,
- * only when microcode has been updated. Caller holds microcode_mutex and CPU
- * hotplug lock.
+ * only when microcode has been updated. Caller holds and CPU hotplug lock.
*
* Return: None
*/
@@ -2343,7 +2341,7 @@ void __init arch_cpu_finalize_init(void)
* identify_boot_cpu() initialized SMT support information, let the
* core code know.
*/
- cpu_smt_check_topology();
+ cpu_smt_set_num_threads(smp_num_siblings, smp_num_siblings);
if (!IS_ENABLED(CONFIG_SMP)) {
pr_info("CPU: ");
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1c4639588ff9..be4045628fd3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -20,7 +20,7 @@
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/intel-family.h>
-#include <asm/microcode_intel.h>
+#include <asm/microcode.h>
#include <asm/hwcap2.h>
#include <asm/elf.h>
#include <asm/cpu_device_id.h>
@@ -184,180 +184,6 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
return false;
}
-int intel_cpu_collect_info(struct ucode_cpu_info *uci)
-{
- unsigned int val[2];
- unsigned int family, model;
- struct cpu_signature csig = { 0 };
- unsigned int eax, ebx, ecx, edx;
-
- memset(uci, 0, sizeof(*uci));
-
- eax = 0x00000001;
- ecx = 0;
- native_cpuid(&eax, &ebx, &ecx, &edx);
- csig.sig = eax;
-
- family = x86_family(eax);
- model = x86_model(eax);
-
- if (model >= 5 || family > 6) {
- /* get processor flags from MSR 0x17 */
- native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
- csig.pf = 1 << ((val[1] >> 18) & 7);
- }
-
- csig.rev = intel_get_microcode_revision();
-
- uci->cpu_sig = csig;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_cpu_collect_info);
-
-/*
- * Returns 1 if update has been found, 0 otherwise.
- */
-int intel_find_matching_signature(void *mc, unsigned int csig, int cpf)
-{
- struct microcode_header_intel *mc_hdr = mc;
- struct extended_sigtable *ext_hdr;
- struct extended_signature *ext_sig;
- int i;
-
- if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
- return 1;
-
- /* Look for ext. headers: */
- if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
- return 0;
-
- ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
- ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
-
- for (i = 0; i < ext_hdr->count; i++) {
- if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
- return 1;
- ext_sig++;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_find_matching_signature);
-
-/**
- * intel_microcode_sanity_check() - Sanity check microcode file.
- * @mc: Pointer to the microcode file contents.
- * @print_err: Display failure reason if true, silent if false.
- * @hdr_type: Type of file, i.e. normal microcode file or In Field Scan file.
- * Validate if the microcode header type matches with the type
- * specified here.
- *
- * Validate certain header fields and verify if computed checksum matches
- * with the one specified in the header.
- *
- * Return: 0 if the file passes all the checks, -EINVAL if any of the checks
- * fail.
- */
-int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type)
-{
- unsigned long total_size, data_size, ext_table_size;
- struct microcode_header_intel *mc_header = mc;
- struct extended_sigtable *ext_header = NULL;
- u32 sum, orig_sum, ext_sigcount = 0, i;
- struct extended_signature *ext_sig;
-
- total_size = get_totalsize(mc_header);
- data_size = get_datasize(mc_header);
-
- if (data_size + MC_HEADER_SIZE > total_size) {
- if (print_err)
- pr_err("Error: bad microcode data file size.\n");
- return -EINVAL;
- }
-
- if (mc_header->ldrver != 1 || mc_header->hdrver != hdr_type) {
- if (print_err)
- pr_err("Error: invalid/unknown microcode update format. Header type %d\n",
- mc_header->hdrver);
- return -EINVAL;
- }
-
- ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
- if (ext_table_size) {
- u32 ext_table_sum = 0;
- u32 *ext_tablep;
-
- if (ext_table_size < EXT_HEADER_SIZE ||
- ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
- if (print_err)
- pr_err("Error: truncated extended signature table.\n");
- return -EINVAL;
- }
-
- ext_header = mc + MC_HEADER_SIZE + data_size;
- if (ext_table_size != exttable_size(ext_header)) {
- if (print_err)
- pr_err("Error: extended signature table size mismatch.\n");
- return -EFAULT;
- }
-
- ext_sigcount = ext_header->count;
-
- /*
- * Check extended table checksum: the sum of all dwords that
- * comprise a valid table must be 0.
- */
- ext_tablep = (u32 *)ext_header;
-
- i = ext_table_size / sizeof(u32);
- while (i--)
- ext_table_sum += ext_tablep[i];
-
- if (ext_table_sum) {
- if (print_err)
- pr_warn("Bad extended signature table checksum, aborting.\n");
- return -EINVAL;
- }
- }
-
- /*
- * Calculate the checksum of update data and header. The checksum of
- * valid update data and header including the extended signature table
- * must be 0.
- */
- orig_sum = 0;
- i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
- while (i--)
- orig_sum += ((u32 *)mc)[i];
-
- if (orig_sum) {
- if (print_err)
- pr_err("Bad microcode data checksum, aborting.\n");
- return -EINVAL;
- }
-
- if (!ext_table_size)
- return 0;
-
- /*
- * Check extended signature checksum: 0 => valid.
- */
- for (i = 0; i < ext_sigcount; i++) {
- ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
- EXT_SIGNATURE_SIZE * i;
-
- sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
- (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
- if (sum) {
- if (print_err)
- pr_err("Bad extended signature checksum, aborting.\n");
- return -EINVAL;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(intel_microcode_sanity_check);
-
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c
index 3b8476158236..e4c3ba91321c 100644
--- a/arch/x86/kernel/cpu/intel_epb.c
+++ b/arch/x86/kernel/cpu/intel_epb.c
@@ -206,7 +206,7 @@ static int intel_epb_offline(unsigned int cpu)
static const struct x86_cpu_id intel_epb_normal[] = {
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,
ENERGY_PERF_BIAS_NORMAL_POWERSAVE),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N,
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT,
ENERGY_PERF_BIAS_NORMAL_POWERSAVE),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,
ENERGY_PERF_BIAS_NORMAL_POWERSAVE),
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 89e2aab5d34d..6f35f724cc14 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -843,6 +843,26 @@ static noinstr bool quirk_skylake_repmov(void)
}
/*
+ * Some Zen-based Instruction Fetch Units set EIPV=RIPV=0 on poison consumption
+ * errors. This means mce_gather_info() will not save the "ip" and "cs" registers.
+ *
+ * However, the context is still valid, so save the "cs" register for later use.
+ *
+ * The "ip" register is truly unknown, so don't save it or fixup EIPV/RIPV.
+ *
+ * The Instruction Fetch Unit is at MCA bank 1 for all affected systems.
+ */
+static __always_inline void quirk_zen_ifu(int bank, struct mce *m, struct pt_regs *regs)
+{
+ if (bank != 1)
+ return;
+ if (!(m->status & MCI_STATUS_POISON))
+ return;
+
+ m->cs = regs->cs;
+}
+
+/*
* Do a quick check if any of the events requires a panic.
* This decides if we keep the events around or clear them.
*/
@@ -861,6 +881,9 @@ static __always_inline int mce_no_way_out(struct mce *m, char **msg, unsigned lo
if (mce_flags.snb_ifu_quirk)
quirk_sandybridge_ifu(i, m, regs);
+ if (mce_flags.zen_ifu_quirk)
+ quirk_zen_ifu(i, m, regs);
+
m->bank = i;
if (mce_severity(m, regs, &tmp, true) >= MCE_PANIC_SEVERITY) {
mce_read_aux(m, i);
@@ -1608,6 +1631,13 @@ static void __start_timer(struct timer_list *t, unsigned long interval)
local_irq_restore(flags);
}
+static void mc_poll_banks_default(void)
+{
+ machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
+}
+
+void (*mc_poll_banks)(void) = mc_poll_banks_default;
+
static void mce_timer_fn(struct timer_list *t)
{
struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
@@ -1618,7 +1648,7 @@ static void mce_timer_fn(struct timer_list *t)
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
- machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
+ mc_poll_banks();
if (mce_intel_cmci_poll()) {
iv = mce_adjust_timer(iv);
@@ -1842,6 +1872,9 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
if (c->x86 == 0x15 && c->x86_model <= 0xf)
mce_flags.overflow_recov = 1;
+ if (c->x86 >= 0x17 && c->x86 <= 0x1A)
+ mce_flags.zen_ifu_quirk = 1;
+
}
if (c->x86_vendor == X86_VENDOR_INTEL) {
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index 95275a5e57e0..f5323551c1a9 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -56,6 +56,13 @@ static DEFINE_PER_CPU(int, cmci_backoff_cnt);
*/
static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
+/*
+ * On systems that do support CMCI but it's disabled, polling for MCEs can
+ * cause the same event to be reported multiple times because IA32_MCi_STATUS
+ * is shared by the same package.
+ */
+static DEFINE_SPINLOCK(cmci_poll_lock);
+
#define CMCI_THRESHOLD 1
#define CMCI_POLL_INTERVAL (30 * HZ)
#define CMCI_STORM_INTERVAL (HZ)
@@ -426,12 +433,22 @@ void cmci_disable_bank(int bank)
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}
+/* Bank polling function when CMCI is disabled. */
+static void cmci_mc_poll_banks(void)
+{
+ spin_lock(&cmci_poll_lock);
+ machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
+ spin_unlock(&cmci_poll_lock);
+}
+
void intel_init_cmci(void)
{
int banks;
- if (!cmci_supported(&banks))
+ if (!cmci_supported(&banks)) {
+ mc_poll_banks = cmci_mc_poll_banks;
return;
+ }
mce_threshold_vector = intel_threshold_interrupt;
cmci_discover(banks);
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index d2412ce2d312..bcf1b3c66c9c 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -157,6 +157,9 @@ struct mce_vendor_flags {
*/
smca : 1,
+ /* Zen IFU quirk */
+ zen_ifu_quirk : 1,
+
/* AMD-style error thresholding banks present. */
amd_threshold : 1,
@@ -172,7 +175,7 @@ struct mce_vendor_flags {
/* Skylake, Cascade Lake, Cooper Lake REP;MOVS* quirk */
skx_repmov_quirk : 1,
- __reserved_0 : 56;
+ __reserved_0 : 55;
};
extern struct mce_vendor_flags mce_flags;
@@ -274,4 +277,5 @@ static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg)
return 0;
}
+extern void (*mc_poll_banks)(void);
#endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/arch/x86/kernel/cpu/microcode/Makefile b/arch/x86/kernel/cpu/microcode/Makefile
index 34098d48c48f..193d98b33a0a 100644
--- a/arch/x86/kernel/cpu/microcode/Makefile
+++ b/arch/x86/kernel/cpu/microcode/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
microcode-y := core.o
obj-$(CONFIG_MICROCODE) += microcode.o
-microcode-$(CONFIG_MICROCODE_INTEL) += intel.o
-microcode-$(CONFIG_MICROCODE_AMD) += amd.o
+microcode-$(CONFIG_CPU_SUP_INTEL) += intel.o
+microcode-$(CONFIG_CPU_SUP_AMD) += amd.o
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 87208e46f7ed..bbd1dc38ea03 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -29,13 +29,53 @@
#include <linux/kernel.h>
#include <linux/pci.h>
-#include <asm/microcode_amd.h>
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/cpu.h>
#include <asm/msr.h>
+#include "internal.h"
+
+#define UCODE_MAGIC 0x00414d44
+#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000
+#define UCODE_UCODE_TYPE 0x00000001
+
+#define SECTION_HDR_SIZE 8
+#define CONTAINER_HDR_SZ 12
+
+struct equiv_cpu_entry {
+ u32 installed_cpu;
+ u32 fixed_errata_mask;
+ u32 fixed_errata_compare;
+ u16 equiv_cpu;
+ u16 res;
+} __packed;
+
+struct microcode_header_amd {
+ u32 data_code;
+ u32 patch_id;
+ u16 mc_patch_data_id;
+ u8 mc_patch_data_len;
+ u8 init_flag;
+ u32 mc_patch_data_checksum;
+ u32 nb_dev_id;
+ u32 sb_dev_id;
+ u16 processor_rev_id;
+ u8 nb_rev_id;
+ u8 sb_rev_id;
+ u8 bios_api_rev;
+ u8 reserved1[3];
+ u32 match_reg[8];
+} __packed;
+
+struct microcode_amd {
+ struct microcode_header_amd hdr;
+ unsigned int mpb[];
+};
+
+#define PATCH_MAX_SIZE (3 * PAGE_SIZE)
+
static struct equiv_cpu_table {
unsigned int num_entries;
struct equiv_cpu_entry *entry;
@@ -56,9 +96,6 @@ struct cont_desc {
static u32 ucode_new_rev;
-/* One blob per node. */
-static u8 amd_ucode_patch[MAX_NUMNODES][PATCH_MAX_SIZE];
-
/*
* Microcode patch container file is prepended to the initrd in cpio
* format. See Documentation/arch/x86/microcode.rst
@@ -415,20 +452,17 @@ static int __apply_microcode_amd(struct microcode_amd *mc)
*
* Returns true if container found (sets @desc), false otherwise.
*/
-static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
+static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
{
struct cont_desc desc = { 0 };
- u8 (*patch)[PATCH_MAX_SIZE];
struct microcode_amd *mc;
u32 rev, dummy, *new_rev;
bool ret = false;
#ifdef CONFIG_X86_32
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
- patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
#else
new_rev = &ucode_new_rev;
- patch = &amd_ucode_patch[0];
#endif
desc.cpuid_1_eax = cpuid_1_eax;
@@ -452,9 +486,6 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size, boo
if (!__apply_microcode_amd(mc)) {
*new_rev = mc->hdr.patch_id;
ret = true;
-
- if (save_patch)
- memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
}
return ret;
@@ -507,7 +538,7 @@ static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data
*ret = cp;
}
-void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
+static void apply_ucode_from_containers(unsigned int cpuid_1_eax)
{
struct cpio_data cp = { };
@@ -515,42 +546,12 @@ void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
if (!(cp.data && cp.size))
return;
- early_apply_microcode(cpuid_1_eax, cp.data, cp.size, true);
+ early_apply_microcode(cpuid_1_eax, cp.data, cp.size);
}
-void load_ucode_amd_ap(unsigned int cpuid_1_eax)
+void load_ucode_amd_early(unsigned int cpuid_1_eax)
{
- struct microcode_amd *mc;
- struct cpio_data cp;
- u32 *new_rev, rev, dummy;
-
- if (IS_ENABLED(CONFIG_X86_32)) {
- mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
- new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
- } else {
- mc = (struct microcode_amd *)amd_ucode_patch;
- new_rev = &ucode_new_rev;
- }
-
- native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
-
- /*
- * Check whether a new patch has been saved already. Also, allow application of
- * the same revision in order to pick up SMT-thread-specific configuration even
- * if the sibling SMT thread already has an up-to-date revision.
- */
- if (*new_rev && rev <= mc->hdr.patch_id) {
- if (!__apply_microcode_amd(mc)) {
- *new_rev = mc->hdr.patch_id;
- return;
- }
- }
-
- find_blobs_in_containers(cpuid_1_eax, &cp);
- if (!(cp.data && cp.size))
- return;
-
- early_apply_microcode(cpuid_1_eax, cp.data, cp.size, false);
+ return apply_ucode_from_containers(cpuid_1_eax);
}
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
@@ -578,23 +579,6 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
return 0;
}
-void reload_ucode_amd(unsigned int cpu)
-{
- u32 rev, dummy __always_unused;
- struct microcode_amd *mc;
-
- mc = (struct microcode_amd *)amd_ucode_patch[cpu_to_node(cpu)];
-
- rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
-
- if (rev < mc->hdr.patch_id) {
- if (!__apply_microcode_amd(mc)) {
- ucode_new_rev = mc->hdr.patch_id;
- pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
- }
- }
-}
-
/*
* a small, trivial cache of per-family ucode patches
*/
@@ -655,6 +639,28 @@ static struct ucode_patch *find_patch(unsigned int cpu)
return cache_find_patch(equiv_id);
}
+void reload_ucode_amd(unsigned int cpu)
+{
+ u32 rev, dummy __always_unused;
+ struct microcode_amd *mc;
+ struct ucode_patch *p;
+
+ p = find_patch(cpu);
+ if (!p)
+ return;
+
+ mc = p->data;
+
+ rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+
+ if (rev < mc->hdr.patch_id) {
+ if (!__apply_microcode_amd(mc)) {
+ ucode_new_rev = mc->hdr.patch_id;
+ pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
+ }
+ }
+}
+
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -875,9 +881,6 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
continue;
ret = UCODE_NEW;
-
- memset(&amd_ucode_patch[nid], 0, PATCH_MAX_SIZE);
- memcpy(&amd_ucode_patch[nid], p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
}
return ret;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 3afcf3de0dd4..6cc7a2c181da 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -31,15 +31,14 @@
#include <linux/fs.h>
#include <linux/mm.h>
-#include <asm/microcode_intel.h>
#include <asm/cpu_device_id.h>
-#include <asm/microcode_amd.h>
#include <asm/perf_event.h>
-#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/cmdline.h>
#include <asm/setup.h>
+#include "internal.h"
+
#define DRIVER_VERSION "2.2"
static struct microcode_ops *microcode_ops;
@@ -54,15 +53,12 @@ LIST_HEAD(microcode_cache);
*
* All non cpu-hotplug-callback call sites use:
*
- * - microcode_mutex to synchronize with each other;
* - cpus_read_lock/unlock() to synchronize with
* the cpu-hotplug-callback call sites.
*
* We guarantee that only a single cpu is being
* updated at any particular moment of time.
*/
-static DEFINE_MUTEX(microcode_mutex);
-
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
struct cpu_info_ctx {
@@ -172,7 +168,7 @@ void __init load_ucode_bsp(void)
if (intel)
load_ucode_intel_bsp();
else
- load_ucode_amd_bsp(cpuid_1_eax);
+ load_ucode_amd_early(cpuid_1_eax);
}
static bool check_loader_disabled_ap(void)
@@ -200,7 +196,7 @@ void load_ucode_ap(void)
break;
case X86_VENDOR_AMD:
if (x86_family(cpuid_1_eax) >= 0x10)
- load_ucode_amd_ap(cpuid_1_eax);
+ load_ucode_amd_early(cpuid_1_eax);
break;
default:
break;
@@ -298,7 +294,7 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
#endif
}
-void reload_early_microcode(unsigned int cpu)
+static void reload_early_microcode(unsigned int cpu)
{
int vendor, family;
@@ -488,10 +484,7 @@ static ssize_t reload_store(struct device *dev,
if (tmp_ret != UCODE_NEW)
goto put;
- mutex_lock(&microcode_mutex);
ret = microcode_reload_late();
- mutex_unlock(&microcode_mutex);
-
put:
cpus_read_unlock();
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 467cf37ea90a..94dd6af9c963 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -10,15 +10,7 @@
* Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
* H Peter Anvin" <hpa@zytor.com>
*/
-
-/*
- * This needs to be before all headers so that pr_debug in printk.h doesn't turn
- * printk calls into no_printk().
- *
- *#define DEBUG
- */
#define pr_fmt(fmt) "microcode: " fmt
-
#include <linux/earlycpio.h>
#include <linux/firmware.h>
#include <linux/uaccess.h>
@@ -30,13 +22,14 @@
#include <linux/uio.h>
#include <linux/mm.h>
-#include <asm/microcode_intel.h>
#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/setup.h>
#include <asm/msr.h>
+#include "internal.h"
+
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
/* Current microcode patch used in early patching on the APs. */
@@ -45,6 +38,208 @@ static struct microcode_intel *intel_ucode_patch;
/* last level cache size per core */
static int llc_size_per_core;
+/* microcode format is extended from prescott processors */
+struct extended_signature {
+ unsigned int sig;
+ unsigned int pf;
+ unsigned int cksum;
+};
+
+struct extended_sigtable {
+ unsigned int count;
+ unsigned int cksum;
+ unsigned int reserved[3];
+ struct extended_signature sigs[];
+};
+
+#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
+#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
+#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
+
+static inline unsigned int get_totalsize(struct microcode_header_intel *hdr)
+{
+ return hdr->datasize ? hdr->totalsize : DEFAULT_UCODE_TOTALSIZE;
+}
+
+static inline unsigned int exttable_size(struct extended_sigtable *et)
+{
+ return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE;
+}
+
+int intel_cpu_collect_info(struct ucode_cpu_info *uci)
+{
+ unsigned int val[2];
+ unsigned int family, model;
+ struct cpu_signature csig = { 0 };
+ unsigned int eax, ebx, ecx, edx;
+
+ memset(uci, 0, sizeof(*uci));
+
+ eax = 0x00000001;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+ csig.sig = eax;
+
+ family = x86_family(eax);
+ model = x86_model(eax);
+
+ if (model >= 5 || family > 6) {
+ /* get processor flags from MSR 0x17 */
+ native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
+ csig.pf = 1 << ((val[1] >> 18) & 7);
+ }
+
+ csig.rev = intel_get_microcode_revision();
+
+ uci->cpu_sig = csig;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_cpu_collect_info);
+
+/*
+ * Returns 1 if update has been found, 0 otherwise.
+ */
+int intel_find_matching_signature(void *mc, unsigned int csig, int cpf)
+{
+ struct microcode_header_intel *mc_hdr = mc;
+ struct extended_sigtable *ext_hdr;
+ struct extended_signature *ext_sig;
+ int i;
+
+ if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
+ return 1;
+
+ /* Look for ext. headers: */
+ if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE)
+ return 0;
+
+ ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE;
+ ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
+
+ for (i = 0; i < ext_hdr->count; i++) {
+ if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
+ return 1;
+ ext_sig++;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_find_matching_signature);
+
+/**
+ * intel_microcode_sanity_check() - Sanity check microcode file.
+ * @mc: Pointer to the microcode file contents.
+ * @print_err: Display failure reason if true, silent if false.
+ * @hdr_type: Type of file, i.e. normal microcode file or In Field Scan file.
+ * Validate if the microcode header type matches with the type
+ * specified here.
+ *
+ * Validate certain header fields and verify if computed checksum matches
+ * with the one specified in the header.
+ *
+ * Return: 0 if the file passes all the checks, -EINVAL if any of the checks
+ * fail.
+ */
+int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type)
+{
+ unsigned long total_size, data_size, ext_table_size;
+ struct microcode_header_intel *mc_header = mc;
+ struct extended_sigtable *ext_header = NULL;
+ u32 sum, orig_sum, ext_sigcount = 0, i;
+ struct extended_signature *ext_sig;
+
+ total_size = get_totalsize(mc_header);
+ data_size = intel_microcode_get_datasize(mc_header);
+
+ if (data_size + MC_HEADER_SIZE > total_size) {
+ if (print_err)
+ pr_err("Error: bad microcode data file size.\n");
+ return -EINVAL;
+ }
+
+ if (mc_header->ldrver != 1 || mc_header->hdrver != hdr_type) {
+ if (print_err)
+ pr_err("Error: invalid/unknown microcode update format. Header type %d\n",
+ mc_header->hdrver);
+ return -EINVAL;
+ }
+
+ ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
+ if (ext_table_size) {
+ u32 ext_table_sum = 0;
+ u32 *ext_tablep;
+
+ if (ext_table_size < EXT_HEADER_SIZE ||
+ ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
+ if (print_err)
+ pr_err("Error: truncated extended signature table.\n");
+ return -EINVAL;
+ }
+
+ ext_header = mc + MC_HEADER_SIZE + data_size;
+ if (ext_table_size != exttable_size(ext_header)) {
+ if (print_err)
+ pr_err("Error: extended signature table size mismatch.\n");
+ return -EFAULT;
+ }
+
+ ext_sigcount = ext_header->count;
+
+ /*
+ * Check extended table checksum: the sum of all dwords that
+ * comprise a valid table must be 0.
+ */
+ ext_tablep = (u32 *)ext_header;
+
+ i = ext_table_size / sizeof(u32);
+ while (i--)
+ ext_table_sum += ext_tablep[i];
+
+ if (ext_table_sum) {
+ if (print_err)
+ pr_warn("Bad extended signature table checksum, aborting.\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Calculate the checksum of update data and header. The checksum of
+ * valid update data and header including the extended signature table
+ * must be 0.
+ */
+ orig_sum = 0;
+ i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
+ while (i--)
+ orig_sum += ((u32 *)mc)[i];
+
+ if (orig_sum) {
+ if (print_err)
+ pr_err("Bad microcode data checksum, aborting.\n");
+ return -EINVAL;
+ }
+
+ if (!ext_table_size)
+ return 0;
+
+ /*
+ * Check extended signature checksum: 0 => valid.
+ */
+ for (i = 0; i < ext_sigcount; i++) {
+ ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
+ EXT_SIGNATURE_SIZE * i;
+
+ sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
+ (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
+ if (sum) {
+ if (print_err)
+ pr_err("Bad extended signature checksum, aborting.\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_microcode_sanity_check);
+
/*
* Returns 1 if update has been found, 0 otherwise.
*/
@@ -202,86 +397,6 @@ next:
return patch;
}
-static void show_saved_mc(void)
-{
-#ifdef DEBUG
- int i = 0, j;
- unsigned int sig, pf, rev, total_size, data_size, date;
- struct ucode_cpu_info uci;
- struct ucode_patch *p;
-
- if (list_empty(&microcode_cache)) {
- pr_debug("no microcode data saved.\n");
- return;
- }
-
- intel_cpu_collect_info(&uci);
-
- sig = uci.cpu_sig.sig;
- pf = uci.cpu_sig.pf;
- rev = uci.cpu_sig.rev;
- pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
-
- list_for_each_entry(p, &microcode_cache, plist) {
- struct microcode_header_intel *mc_saved_header;
- struct extended_sigtable *ext_header;
- struct extended_signature *ext_sig;
- int ext_sigcount;
-
- mc_saved_header = (struct microcode_header_intel *)p->data;
-
- sig = mc_saved_header->sig;
- pf = mc_saved_header->pf;
- rev = mc_saved_header->rev;
- date = mc_saved_header->date;
-
- total_size = get_totalsize(mc_saved_header);
- data_size = get_datasize(mc_saved_header);
-
- pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
- i++, sig, pf, rev, total_size,
- date & 0xffff,
- date >> 24,
- (date >> 16) & 0xff);
-
- /* Look for ext. headers: */
- if (total_size <= data_size + MC_HEADER_SIZE)
- continue;
-
- ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
- ext_sigcount = ext_header->count;
- ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
-
- for (j = 0; j < ext_sigcount; j++) {
- sig = ext_sig->sig;
- pf = ext_sig->pf;
-
- pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
- j, sig, pf);
-
- ext_sig++;
- }
- }
-#endif
-}
-
-/*
- * Save this microcode patch. It will be loaded early when a CPU is
- * hot-added or resumes.
- */
-static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size)
-{
- /* Synchronization during CPU hotplug. */
- static DEFINE_MUTEX(x86_cpu_microcode_mutex);
-
- mutex_lock(&x86_cpu_microcode_mutex);
-
- save_microcode_patch(uci, mc, size);
- show_saved_mc();
-
- mutex_unlock(&x86_cpu_microcode_mutex);
-}
-
static bool load_builtin_intel_microcode(struct cpio_data *cp)
{
unsigned int eax = 1, ebx, ecx = 0, edx;
@@ -428,9 +543,6 @@ int __init save_microcode_in_initrd_intel(void)
intel_cpu_collect_info(&uci);
scan_microcode(cp.data, cp.size, &uci, true);
-
- show_saved_mc();
-
return 0;
}
@@ -701,12 +813,8 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
vfree(uci->mc);
uci->mc = (struct microcode_intel *)new_mc;
- /*
- * If early loading microcode is supported, save this mc into
- * permanent memory. So it will be loaded early when a CPU is hot added
- * or resumes.
- */
- save_mc_for_early(uci, new_mc, new_mc_size);
+ /* Save for CPU hotplug */
+ save_microcode_patch(uci, new_mc, new_mc_size);
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h
new file mode 100644
index 000000000000..bf883aa71233
--- /dev/null
+++ b/arch/x86/kernel/cpu/microcode/internal.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _X86_MICROCODE_INTERNAL_H
+#define _X86_MICROCODE_INTERNAL_H
+
+#include <linux/earlycpio.h>
+#include <linux/initrd.h>
+
+#include <asm/cpu.h>
+#include <asm/microcode.h>
+
+struct ucode_patch {
+ struct list_head plist;
+ void *data; /* Intel uses only this one */
+ unsigned int size;
+ u32 patch_id;
+ u16 equiv_cpu;
+};
+
+extern struct list_head microcode_cache;
+
+struct device;
+
+enum ucode_state {
+ UCODE_OK = 0,
+ UCODE_NEW,
+ UCODE_UPDATED,
+ UCODE_NFOUND,
+ UCODE_ERROR,
+};
+
+struct microcode_ops {
+ enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev);
+
+ void (*microcode_fini_cpu)(int cpu);
+
+ /*
+ * The generic 'microcode_core' part guarantees that
+ * the callbacks below run on a target cpu when they
+ * are being called.
+ * See also the "Synchronization" section in microcode_core.c.
+ */
+ enum ucode_state (*apply_microcode)(int cpu);
+ int (*collect_cpu_info)(int cpu, struct cpu_signature *csig);
+};
+
+extern struct ucode_cpu_info ucode_cpu_info[];
+struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa);
+
+#define MAX_UCODE_COUNT 128
+
+#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24))
+#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u')
+#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I')
+#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l')
+#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h')
+#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i')
+#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D')
+
+#define CPUID_IS(a, b, c, ebx, ecx, edx) \
+ (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c))))
+
+/*
+ * In early loading microcode phase on BSP, boot_cpu_data is not set up yet.
+ * x86_cpuid_vendor() gets vendor id for BSP.
+ *
+ * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify
+ * coding, we still use x86_cpuid_vendor() to get vendor id for AP.
+ *
+ * x86_cpuid_vendor() gets vendor information directly from CPUID.
+ */
+static inline int x86_cpuid_vendor(void)
+{
+ u32 eax = 0x00000000;
+ u32 ebx, ecx = 0, edx;
+
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+
+ if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
+ return X86_VENDOR_INTEL;
+
+ if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx))
+ return X86_VENDOR_AMD;
+
+ return X86_VENDOR_UNKNOWN;
+}
+
+static inline unsigned int x86_cpuid_family(void)
+{
+ u32 eax = 0x00000001;
+ u32 ebx, ecx = 0, edx;
+
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+
+ return x86_family(eax);
+}
+
+extern bool initrd_gone;
+
+#ifdef CONFIG_CPU_SUP_AMD
+void load_ucode_amd_bsp(unsigned int family);
+void load_ucode_amd_ap(unsigned int family);
+void load_ucode_amd_early(unsigned int cpuid_1_eax);
+int save_microcode_in_initrd_amd(unsigned int family);
+void reload_ucode_amd(unsigned int cpu);
+struct microcode_ops *init_amd_microcode(void);
+void exit_amd_microcode(void);
+#else /* CONFIG_CPU_SUP_AMD */
+static inline void load_ucode_amd_bsp(unsigned int family) { }
+static inline void load_ucode_amd_ap(unsigned int family) { }
+static inline void load_ucode_amd_early(unsigned int family) { }
+static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
+static inline void reload_ucode_amd(unsigned int cpu) { }
+static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
+static inline void exit_amd_microcode(void) { }
+#endif /* !CONFIG_CPU_SUP_AMD */
+
+#ifdef CONFIG_CPU_SUP_INTEL
+void load_ucode_intel_bsp(void);
+void load_ucode_intel_ap(void);
+int save_microcode_in_initrd_intel(void);
+void reload_ucode_intel(void);
+struct microcode_ops *init_intel_microcode(void);
+#else /* CONFIG_CPU_SUP_INTEL */
+static inline void load_ucode_intel_bsp(void) { }
+static inline void load_ucode_intel_ap(void) { }
+static inline int save_microcode_in_initrd_intel(void) { return -EINVAL; }
+static inline void reload_ucode_intel(void) { }
+static inline struct microcode_ops *init_intel_microcode(void) { return NULL; }
+#endif /* !CONFIG_CPU_SUP_INTEL */
+
+#endif /* _X86_MICROCODE_INTERNAL_H */
diff --git a/arch/x86/kernel/fpu/context.h b/arch/x86/kernel/fpu/context.h
index af5cbdd9bd29..f6d856bd50bc 100644
--- a/arch/x86/kernel/fpu/context.h
+++ b/arch/x86/kernel/fpu/context.h
@@ -19,8 +19,7 @@
* FPU state for a task MUST let the rest of the kernel know that the
* FPU registers are no longer valid for this task.
*
- * Either one of these invalidation functions is enough. Invalidate
- * a resource you control: CPU if using the CPU for something else
+ * Invalidate a resource you control: CPU if using the CPU for something else
* (with preemption disabled), FPU for the current task, or a task that
* is prevented from running by the current task.
*/
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 1015af1ae562..98e507cc7d34 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -679,7 +679,7 @@ static void fpu_reset_fpregs(void)
struct fpu *fpu = &current->thread.fpu;
fpregs_lock();
- fpu__drop(fpu);
+ __fpu_invalidate_fpregs_state(fpu);
/*
* This does not change the actual hardware registers. It just
* resets the memory image and sets TIF_NEED_FPU_LOAD so a
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 0bab497c9436..1afbc4866b10 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -882,6 +882,13 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
goto out_disable;
}
+ /*
+ * CPU capabilities initialization runs before FPU init. So
+ * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
+ * functional, set the feature bit so depending code works.
+ */
+ setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
+
print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
fpu_kernel_cfg.max_features,
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index c5b9289837dc..ea6995920b7a 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -51,7 +51,9 @@ SYM_CODE_START_NOALIGN(startup_64)
* for us. These identity mapped page tables map all of the
* kernel pages and possibly all of memory.
*
- * %rsi holds a physical pointer to real_mode_data.
+ * %RSI holds the physical address of the boot_params structure
+ * provided by the bootloader. Preserve it in %R15 so C function calls
+ * will not clobber it.
*
* We come here either directly from a 64bit bootloader, or from
* arch/x86/boot/compressed/head_64.S.
@@ -62,6 +64,7 @@ SYM_CODE_START_NOALIGN(startup_64)
* compiled to run at we first fixup the physical addresses in our page
* tables and then reload them.
*/
+ mov %rsi, %r15
/* Set up the stack for verify_cpu() */
leaq (__end_init_task - PTREGS_SIZE)(%rip), %rsp
@@ -75,9 +78,7 @@ SYM_CODE_START_NOALIGN(startup_64)
shrq $32, %rdx
wrmsr
- pushq %rsi
call startup_64_setup_env
- popq %rsi
/* Now switch to __KERNEL_CS so IRET works reliably */
pushq $__KERNEL_CS
@@ -93,12 +94,10 @@ SYM_CODE_START_NOALIGN(startup_64)
* Activate SEV/SME memory encryption if supported/enabled. This needs to
* be done now, since this also includes setup of the SEV-SNP CPUID table,
* which needs to be done before any CPUID instructions are executed in
- * subsequent code.
+ * subsequent code. Pass the boot_params pointer as the first argument.
*/
- movq %rsi, %rdi
- pushq %rsi
+ movq %r15, %rdi
call sme_enable
- popq %rsi
#endif
/* Sanitize CPU configuration */
@@ -111,9 +110,8 @@ SYM_CODE_START_NOALIGN(startup_64)
* programmed into CR3.
*/
leaq _text(%rip), %rdi
- pushq %rsi
+ movq %r15, %rsi
call __startup_64
- popq %rsi
/* Form the CR3 value being sure to include the CR3 modifier */
addq $(early_top_pgt - __START_KERNEL_map), %rax
@@ -127,8 +125,6 @@ SYM_CODE_START(secondary_startup_64)
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
* and someone has loaded a mapped page table.
*
- * %rsi holds a physical pointer to real_mode_data.
- *
* We come here either from startup_64 (using physical addresses)
* or from trampoline.S (using virtual addresses).
*
@@ -153,6 +149,9 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
UNWIND_HINT_END_OF_STACK
ANNOTATE_NOENDBR
+ /* Clear %R15 which holds the boot_params pointer on the boot CPU */
+ xorq %r15, %r15
+
/*
* Retrieve the modifier (SME encryption mask if SME is active) to be
* added to the initial pgdir entry that will be programmed into CR3.
@@ -199,13 +198,9 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
* hypervisor could lie about the C-bit position to perform a ROP
* attack on the guest by writing to the unencrypted stack and wait for
* the next RET instruction.
- * %rsi carries pointer to realmode data and is callee-clobbered. Save
- * and restore it.
*/
- pushq %rsi
movq %rax, %rdi
call sev_verify_cbit
- popq %rsi
/*
* Switch to new page-table
@@ -365,9 +360,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
wrmsr
/* Setup and Load IDT */
- pushq %rsi
call early_setup_idt
- popq %rsi
/* Check if nx is implemented */
movl $0x80000001, %eax
@@ -403,9 +396,8 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
pushq $0
popfq
- /* rsi is pointer to real mode structure with interesting info.
- pass it to C */
- movq %rsi, %rdi
+ /* Pass the boot_params pointer as first argument */
+ movq %r15, %rdi
.Ljump_to_C_code:
/*
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index c8eb1ac5125a..1648aa0204d9 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -421,7 +421,7 @@ static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc)
* the IO_APIC has been initialized.
*/
hc->cpu = boot_cpu_data.cpu_index;
- strncpy(hc->name, "hpet", sizeof(hc->name));
+ strscpy(hc->name, "hpet", sizeof(hc->name));
hpet_init_clockevent(hc, 50);
hc->evt.tick_resume = hpet_clkevt_legacy_resume;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 1cceac5984da..526d4da3dcd4 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -966,10 +966,8 @@ static void __init kvm_init_platform(void)
* Ensure that _bss_decrypted section is marked as decrypted in the
* shared pages list.
*/
- nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
- PAGE_SIZE);
early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
- nr_pages, 0);
+ __end_bss_decrypted - __start_bss_decrypted, 0);
/*
* If not booted using EFI, enable Live migration support.
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index ac10b46c5832..975f98d5eee5 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -75,10 +75,16 @@ DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
void __init native_pv_lock_init(void)
{
- if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
+ !boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_disable(&virt_spin_lock_key);
}
+static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ tlb_remove_page(tlb, table);
+}
+
unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr,
unsigned int len)
{
@@ -295,8 +301,7 @@ struct paravirt_patch_template pv_ops = {
.mmu.flush_tlb_kernel = native_flush_tlb_global,
.mmu.flush_tlb_one_user = native_flush_tlb_one_user,
.mmu.flush_tlb_multi = native_flush_tlb_multi,
- .mmu.tlb_remove_table =
- (void (*)(struct mmu_gather *, void *))tlb_remove_page,
+ .mmu.tlb_remove_table = native_tlb_remove_table,
.mmu.exit_mmap = paravirt_nop,
.mmu.notify_page_enc_status_changed = paravirt_nop,
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 1ee7bed453de..d380c9399480 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -1575,6 +1575,9 @@ static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
long val, *reg = vc_insn_get_rm(ctxt);
enum es_result ret;
+ if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
+ return ES_VMM_ERROR;
+
if (!reg)
return ES_DECODE_FAILED;
@@ -1612,6 +1615,9 @@ static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
long *reg = vc_insn_get_rm(ctxt);
+ if (sev_status & MSR_AMD64_SNP_DEBUG_SWAP)
+ return ES_VMM_ERROR;
+
if (!reg)
return ES_DECODE_FAILED;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index e1aa2cd7734b..d40ed3a7dc23 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -327,14 +327,6 @@ static void notrace start_secondary(void *unused)
}
/**
- * topology_smt_supported - Check whether SMT is supported by the CPUs
- */
-bool topology_smt_supported(void)
-{
- return smp_num_siblings > 1;
-}
-
-/**
* topology_phys_to_logical_pkg - Map a physical package id to a logical
* @phys_pkg: The physical package id to map
*
@@ -632,14 +624,9 @@ static void __init build_sched_topology(void)
};
#endif
#ifdef CONFIG_SCHED_CLUSTER
- /*
- * For now, skip the cluster domain on Hybrid.
- */
- if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
- x86_topology[i++] = (struct sched_domain_topology_level){
- cpu_clustergroup_mask, x86_cluster_flags, SD_INIT_NAME(CLS)
- };
- }
+ x86_topology[i++] = (struct sched_domain_topology_level){
+ cpu_clustergroup_mask, x86_cluster_flags, SD_INIT_NAME(CLS)
+ };
#endif
#ifdef CONFIG_SCHED_MC
x86_topology[i++] = (struct sched_domain_topology_level){
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 3425c6a943e4..15f97c0abc9d 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1258,7 +1258,7 @@ static void __init check_system_tsc_reliable(void)
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
- nr_online_nodes <= 2)
+ nr_online_nodes <= 4)
tsc_disable_clocksource_watchdog();
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 8192452d1d2d..ffa25e962343 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -20,7 +20,6 @@
#include <asm/tlb.h>
#include <asm/proto.h>
#include <asm/dma.h> /* for MAX_DMA_PFN */
-#include <asm/microcode.h>
#include <asm/kaslr.h>
#include <asm/hypervisor.h>
#include <asm/cpufeature.h>
@@ -273,7 +272,7 @@ static void __init probe_page_size_mask(void)
static const struct x86_cpu_id invlpg_miss_ids[] = {
INTEL_MATCH(INTEL_FAM6_ALDERLAKE ),
INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
- INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
+ INTEL_MATCH(INTEL_FAM6_ATOM_GRACEMONT ),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 54bbd5163e8d..6faea41e99b6 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -288,11 +288,10 @@ static bool amd_enc_cache_flush_required(void)
return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
}
-static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
+static void enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
{
#ifdef CONFIG_PARAVIRT
- unsigned long sz = npages << PAGE_SHIFT;
- unsigned long vaddr_end = vaddr + sz;
+ unsigned long vaddr_end = vaddr + size;
while (vaddr < vaddr_end) {
int psize, pmask, level;
@@ -342,7 +341,7 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e
snp_set_memory_private(vaddr, npages);
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
- enc_dec_hypercall(vaddr, npages, enc);
+ enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc);
return true;
}
@@ -466,7 +465,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
ret = 0;
- early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
+ early_set_mem_enc_dec_hypercall(start, size, enc);
out:
__flush_tlb_all();
return ret;
@@ -482,9 +481,9 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
return early_set_memory_enc_dec(vaddr, size, true);
}
-void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
+void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool enc)
{
- enc_dec_hypercall(vaddr, npages, enc);
+ enc_dec_hypercall(vaddr, size, enc);
}
void __init sme_early_init(void)
diff --git a/arch/x86/platform/efi/memmap.c b/arch/x86/platform/efi/memmap.c
index c69f8471e6d0..4ef20b49eb5e 100644
--- a/arch/x86/platform/efi/memmap.c
+++ b/arch/x86/platform/efi/memmap.c
@@ -82,7 +82,7 @@ int __init efi_memmap_alloc(unsigned int num_entries,
/**
* efi_memmap_install - Install a new EFI memory map in efi.memmap
- * @ctx: map allocation parameters (address, size, flags)
+ * @data: efi memmap installation parameters
*
* Unlike efi_memmap_init_*(), this function does not allow the caller
* to switch from early to late mappings. It simply uses the existing
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index a60af0230e27..a6ab43f69b7d 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -202,21 +202,17 @@ static int param_set_action(const char *val, const struct kernel_param *kp)
{
int i;
int n = ARRAY_SIZE(valid_acts);
- char arg[ACTION_LEN], *p;
+ char arg[ACTION_LEN];
/* (remove possible '\n') */
- strncpy(arg, val, ACTION_LEN - 1);
- arg[ACTION_LEN - 1] = '\0';
- p = strchr(arg, '\n');
- if (p)
- *p = '\0';
+ strscpy(arg, val, strnchrnul(val, sizeof(arg)-1, '\n') - val + 1);
for (i = 0; i < n; i++)
if (!strcmp(arg, valid_acts[i].action))
break;
if (i < n) {
- strcpy(uv_nmi_action, arg);
+ strscpy(uv_nmi_action, arg, sizeof(uv_nmi_action));
pr_info("UV: New NMI action:%s\n", uv_nmi_action);
return 0;
}
@@ -959,7 +955,7 @@ static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
/* Unexpected return, revert action to "dump" */
if (master)
- strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
+ strscpy(uv_nmi_action, "dump", sizeof(uv_nmi_action));
}
/* Pause as all CPU's enter the NMI handler */
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index 7558139920f8..aea47e793963 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -14,6 +14,7 @@
#include <crypto/sha2.h>
#include <asm/purgatory.h>
+#include "../boot/compressed/error.h"
#include "../boot/string.h"
u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(".kexec-purgatory");
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 93b658248d01..27fc170838e9 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -79,7 +79,7 @@
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#include <asm/acpi.h>
-#include <acpi/pdc_intel.h>
+#include <acpi/proc_cap_intel.h>
#include <acpi/processor.h>
#include <xen/interface/platform.h>
#endif
@@ -288,17 +288,17 @@ static bool __init xen_check_mwait(void)
native_cpuid(&ax, &bx, &cx, &dx);
- /* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so,
+ /* Ask the Hypervisor whether to clear ACPI_PROC_CAP_C_C2C3_FFH. If so,
* don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3.
*/
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
- buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);
+ buf[2] = (ACPI_PROC_CAP_C_CAPABILITY_SMP | ACPI_PROC_CAP_EST_CAPABILITY_SWSMP);
set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
if ((HYPERVISOR_platform_op(&op) == 0) &&
- (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
+ (buf[2] & (ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH))) {
cpuid_leaf5_ecx_val = cx;
cpuid_leaf5_edx_val = dx;
}
@@ -523,7 +523,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
BUG_ON(size > PAGE_SIZE);
BUG_ON(va & ~PAGE_MASK);
- pfn = virt_to_pfn(va);
+ pfn = virt_to_pfn((void *)va);
mfn = pfn_to_mfn(pfn);
pte = pfn_pte(pfn, PAGE_KERNEL_RO);
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index e0a975165de7..eb3bac0b22a1 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2202,13 +2202,13 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
mcs = __xen_mc_entry(0);
if (in_frames)
- in_frames[i] = virt_to_mfn(vaddr);
+ in_frames[i] = virt_to_mfn((void *)vaddr);
MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
- __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
+ __set_phys_to_machine(virt_to_pfn((void *)vaddr), INVALID_P2M_ENTRY);
if (out_frames)
- out_frames[i] = virt_to_pfn(vaddr);
+ out_frames[i] = virt_to_pfn((void *)vaddr);
}
xen_mc_issue(0);
}
@@ -2250,7 +2250,7 @@ static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
MULTI_update_va_mapping(mcs.mc, vaddr,
mfn_pte(mfn, PAGE_KERNEL), flags);
- set_phys_to_machine(virt_to_pfn(vaddr), mfn);
+ set_phys_to_machine(virt_to_pfn((void *)vaddr), mfn);
}
xen_mc_issue(0);
@@ -2310,12 +2310,6 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
int success;
unsigned long vstart = (unsigned long)phys_to_virt(pstart);
- /*
- * Currently an auto-translated guest will not perform I/O, nor will
- * it require PAE page directories below 4GB. Therefore any calls to
- * this function are redundant and can be ignored.
- */
-
if (unlikely(order > MAX_CONTIG_ORDER))
return -ENOMEM;
@@ -2327,7 +2321,7 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
xen_zap_pfn_range(vstart, order, in_frames, NULL);
/* 2. Get a new contiguous memory extent. */
- out_frame = virt_to_pfn(vstart);
+ out_frame = virt_to_pfn((void *)vstart);
success = xen_exchange_memory(1UL << order, 0, in_frames,
1, order, &out_frame,
address_bits);
@@ -2360,7 +2354,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
spin_lock_irqsave(&xen_reservation_lock, flags);
/* 1. Find start MFN of contiguous extent. */
- in_frame = virt_to_mfn(vstart);
+ in_frame = virt_to_mfn((void *)vstart);
/* 2. Zap current PTEs. */
xen_zap_pfn_range(vstart, order, NULL, out_frames);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 8b5cf7bb1f47..50c998b844fb 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -340,7 +340,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
WARN_ON(size == 0);
- mfn_save = virt_to_mfn(buf);
+ mfn_save = virt_to_mfn((void *)buf);
for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
ident_pfn_iter < ident_end_pfn;
@@ -503,7 +503,7 @@ void __init xen_remap_memory(void)
unsigned long pfn_s = ~0UL;
unsigned long len = 0;
- mfn_save = virt_to_mfn(buf);
+ mfn_save = virt_to_mfn((void *)buf);
while (xen_remap_mfn != INVALID_P2M_ENTRY) {
/* Map the remap information */
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 2b69c3c035b6..fc1a4f3c81d9 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -422,3 +422,4 @@
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
diff --git a/block/bdev.c b/block/bdev.c
index 979e28a46b98..f3b13aa1b7d4 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -206,23 +206,6 @@ int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
}
EXPORT_SYMBOL(sync_blockdev_range);
-/*
- * Write out and wait upon all dirty data associated with this
- * device. Filesystem data as well as the underlying block
- * device. Takes the superblock lock.
- */
-int fsync_bdev(struct block_device *bdev)
-{
- struct super_block *sb = get_super(bdev);
- if (sb) {
- int res = sync_filesystem(sb);
- drop_super(sb);
- return res;
- }
- return sync_blockdev(bdev);
-}
-EXPORT_SYMBOL(fsync_bdev);
-
/**
* freeze_bdev - lock a filesystem and force it into a consistent state
* @bdev: blockdevice to lock
@@ -248,9 +231,9 @@ int freeze_bdev(struct block_device *bdev)
if (!sb)
goto sync;
if (sb->s_op->freeze_super)
- error = sb->s_op->freeze_super(sb);
+ error = sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE);
else
- error = freeze_super(sb);
+ error = freeze_super(sb, FREEZE_HOLDER_USERSPACE);
deactivate_super(sb);
if (error) {
@@ -291,9 +274,9 @@ int thaw_bdev(struct block_device *bdev)
goto out;
if (sb->s_op->thaw_super)
- error = sb->s_op->thaw_super(sb);
+ error = sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE);
else
- error = thaw_super(sb);
+ error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
if (error)
bdev->bd_fsfreeze_count++;
else
@@ -960,26 +943,38 @@ out_path_put:
}
EXPORT_SYMBOL(lookup_bdev);
-int __invalidate_device(struct block_device *bdev, bool kill_dirty)
+/**
+ * bdev_mark_dead - mark a block device as dead
+ * @bdev: block device to operate on
+ * @surprise: indicate a surprise removal
+ *
+ * Tell the file system that this devices or media is dead. If @surprise is set
+ * to %true the device or media is already gone, if not we are preparing for an
+ * orderly removal.
+ *
+ * This calls into the file system, which then typicall syncs out all dirty data
+ * and writes back inodes and then invalidates any cached data in the inodes on
+ * the file system. In addition we also invalidate the block device mapping.
+ */
+void bdev_mark_dead(struct block_device *bdev, bool surprise)
{
- struct super_block *sb = get_super(bdev);
- int res = 0;
+ mutex_lock(&bdev->bd_holder_lock);
+ if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
+ bdev->bd_holder_ops->mark_dead(bdev, surprise);
+ else
+ sync_blockdev(bdev);
+ mutex_unlock(&bdev->bd_holder_lock);
- if (sb) {
- /*
- * no need to lock the super, get_super holds the
- * read mutex so the filesystem cannot go away
- * under us (->put_super runs with the write lock
- * hold).
- */
- shrink_dcache_sb(sb);
- res = invalidate_inodes(sb, kill_dirty);
- drop_super(sb);
- }
invalidate_bdev(bdev);
- return res;
}
-EXPORT_SYMBOL(__invalidate_device);
+#ifdef CONFIG_DASD_MODULE
+/*
+ * Drivers should not use this directly, but the DASD driver has historically
+ * had a shutdown to offline mode that doesn't actually remove the gendisk
+ * that otherwise looks a lot like a safe device removal.
+ */
+EXPORT_SYMBOL_GPL(bdev_mark_dead);
+#endif
void sync_bdevs(bool wait)
{
diff --git a/block/disk-events.c b/block/disk-events.c
index 0cfac464e6d1..422db8292d09 100644
--- a/block/disk-events.c
+++ b/block/disk-events.c
@@ -281,9 +281,7 @@ bool disk_check_media_change(struct gendisk *disk)
if (!(events & DISK_EVENT_MEDIA_CHANGE))
return false;
- if (__invalidate_device(disk->part0, true))
- pr_warn("VFS: busy inodes on changed media %s\n",
- disk->disk_name);
+ bdev_mark_dead(disk->part0, true);
set_bit(GD_NEED_PART_SCAN, &disk->state);
return true;
}
@@ -294,25 +292,16 @@ EXPORT_SYMBOL(disk_check_media_change);
* @disk: the disk which will raise the event
* @events: the events to raise
*
- * Generate uevents for the disk. If DISK_EVENT_MEDIA_CHANGE is present,
- * attempt to free all dentries and inodes and invalidates all block
+ * Should be called when the media changes for @disk. Generates a uevent
+ * and attempts to free all dentries and inodes and invalidates all block
* device page cache entries in that case.
- *
- * Returns %true if DISK_EVENT_MEDIA_CHANGE was raised, or %false if not.
*/
-bool disk_force_media_change(struct gendisk *disk, unsigned int events)
+void disk_force_media_change(struct gendisk *disk)
{
- disk_event_uevent(disk, events);
-
- if (!(events & DISK_EVENT_MEDIA_CHANGE))
- return false;
-
+ disk_event_uevent(disk, DISK_EVENT_MEDIA_CHANGE);
inc_diskseq(disk);
- if (__invalidate_device(disk->part0, true))
- pr_warn("VFS: busy inodes on changed media %s\n",
- disk->disk_name);
+ bdev_mark_dead(disk->part0, true);
set_bit(GD_NEED_PART_SCAN, &disk->state);
- return true;
}
EXPORT_SYMBOL_GPL(disk_force_media_change);
diff --git a/block/genhd.c b/block/genhd.c
index 3d287b32d50d..cc32a0c704eb 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -554,7 +554,7 @@ out_exit_elevator:
}
EXPORT_SYMBOL(device_add_disk);
-static void blk_report_disk_dead(struct gendisk *disk)
+static void blk_report_disk_dead(struct gendisk *disk, bool surprise)
{
struct block_device *bdev;
unsigned long idx;
@@ -565,10 +565,7 @@ static void blk_report_disk_dead(struct gendisk *disk)
continue;
rcu_read_unlock();
- mutex_lock(&bdev->bd_holder_lock);
- if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
- bdev->bd_holder_ops->mark_dead(bdev);
- mutex_unlock(&bdev->bd_holder_lock);
+ bdev_mark_dead(bdev, surprise);
put_device(&bdev->bd_device);
rcu_read_lock();
@@ -576,14 +573,7 @@ static void blk_report_disk_dead(struct gendisk *disk)
rcu_read_unlock();
}
-/**
- * blk_mark_disk_dead - mark a disk as dead
- * @disk: disk to mark as dead
- *
- * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
- * to this disk.
- */
-void blk_mark_disk_dead(struct gendisk *disk)
+static void __blk_mark_disk_dead(struct gendisk *disk)
{
/*
* Fail any new I/O.
@@ -603,8 +593,19 @@ void blk_mark_disk_dead(struct gendisk *disk)
* Prevent new I/O from crossing bio_queue_enter().
*/
blk_queue_start_drain(disk->queue);
+}
- blk_report_disk_dead(disk);
+/**
+ * blk_mark_disk_dead - mark a disk as dead
+ * @disk: disk to mark as dead
+ *
+ * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
+ * to this disk.
+ */
+void blk_mark_disk_dead(struct gendisk *disk)
+{
+ __blk_mark_disk_dead(disk);
+ blk_report_disk_dead(disk, true);
}
EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
@@ -641,18 +642,20 @@ void del_gendisk(struct gendisk *disk)
disk_del_events(disk);
/*
- * Prevent new openers by unlinked the bdev inode, and write out
- * dirty data before marking the disk dead and stopping all I/O.
+ * Prevent new openers by unlinked the bdev inode.
*/
mutex_lock(&disk->open_mutex);
- xa_for_each(&disk->part_tbl, idx, part) {
+ xa_for_each(&disk->part_tbl, idx, part)
remove_inode_hash(part->bd_inode);
- fsync_bdev(part);
- __invalidate_device(part, true);
- }
mutex_unlock(&disk->open_mutex);
- blk_mark_disk_dead(disk);
+ /*
+ * Tell the file system to write back all dirty data and shut down if
+ * it hasn't been notified earlier.
+ */
+ if (!test_bit(GD_DEAD, &disk->state))
+ blk_report_disk_dead(disk, false);
+ __blk_mark_disk_dead(disk);
/*
* Drop all partitions now that the disk is marked dead.
diff --git a/block/ioctl.c b/block/ioctl.c
index 3be11941fb2d..648670ddb164 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -364,7 +364,14 @@ static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
{
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- fsync_bdev(bdev);
+
+ mutex_lock(&bdev->bd_holder_lock);
+ if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync)
+ bdev->bd_holder_ops->sync(bdev);
+ else
+ sync_blockdev(bdev);
+ mutex_unlock(&bdev->bd_holder_lock);
+
invalidate_bdev(bdev);
return 0;
}
diff --git a/block/partitions/core.c b/block/partitions/core.c
index 13a7341299a9..e137a87f4db0 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -281,10 +281,7 @@ static void delete_partition(struct block_device *part)
* looked up any more even when openers still hold references.
*/
remove_inode_hash(part->bd_inode);
-
- fsync_bdev(part);
- __invalidate_device(part, true);
-
+ bdev_mark_dead(part, false);
drop_partition(part);
}
diff --git a/certs/system_keyring.c b/certs/system_keyring.c
index a7a49b17ceb1..9de610bf1f4b 100644
--- a/certs/system_keyring.c
+++ b/certs/system_keyring.c
@@ -51,6 +51,26 @@ int restrict_link_by_builtin_trusted(struct key *dest_keyring,
builtin_trusted_keys);
}
+/**
+ * restrict_link_by_digsig_builtin - Restrict digitalSignature key additions by the built-in keyring
+ * @dest_keyring: Keyring being linked to.
+ * @type: The type of key being added.
+ * @payload: The payload of the new key.
+ * @restriction_key: A ring of keys that can be used to vouch for the new cert.
+ *
+ * Restrict the addition of keys into a keyring based on the key-to-be-added
+ * being vouched for by a key in the built in system keyring. The new key
+ * must have the digitalSignature usage field set.
+ */
+int restrict_link_by_digsig_builtin(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key)
+{
+ return restrict_link_by_digsig(dest_keyring, type, payload,
+ builtin_trusted_keys);
+}
+
#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
/**
* restrict_link_by_builtin_and_secondary_trusted - Restrict keyring
@@ -83,6 +103,35 @@ int restrict_link_by_builtin_and_secondary_trusted(
secondary_trusted_keys);
}
+/**
+ * restrict_link_by_digsig_builtin_and_secondary - Restrict by digitalSignature.
+ * @dest_keyring: Keyring being linked to.
+ * @type: The type of key being added.
+ * @payload: The payload of the new key.
+ * @restrict_key: A ring of keys that can be used to vouch for the new cert.
+ *
+ * Restrict the addition of keys into a keyring based on the key-to-be-added
+ * being vouched for by a key in either the built-in or the secondary system
+ * keyrings. The new key must have the digitalSignature usage field set.
+ */
+int restrict_link_by_digsig_builtin_and_secondary(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restrict_key)
+{
+ /* If we have a secondary trusted keyring, then that contains a link
+ * through to the builtin keyring and the search will follow that link.
+ */
+ if (type == &key_type_keyring &&
+ dest_keyring == secondary_trusted_keys &&
+ payload == &builtin_trusted_keys->payload)
+ /* Allow the builtin keyring to be added to the secondary */
+ return 0;
+
+ return restrict_link_by_digsig(dest_keyring, type, payload,
+ secondary_trusted_keys);
+}
+
/*
* Allocate a struct key_restriction for the "builtin and secondary trust"
* keyring. Only for use in system_trusted_keyring_init().
@@ -103,6 +152,36 @@ static __init struct key_restriction *get_builtin_and_secondary_restriction(void
return restriction;
}
+
+/**
+ * add_to_secondary_keyring - Add to secondary keyring.
+ * @source: Source of key
+ * @data: The blob holding the key
+ * @len: The length of the data blob
+ *
+ * Add a key to the secondary keyring. The key must be vouched for by a key in the builtin,
+ * machine or secondary keyring itself.
+ */
+void __init add_to_secondary_keyring(const char *source, const void *data, size_t len)
+{
+ key_ref_t key;
+ key_perm_t perm;
+
+ perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW;
+
+ key = key_create_or_update(make_key_ref(secondary_trusted_keys, 1),
+ "asymmetric",
+ NULL, data, len, perm,
+ KEY_ALLOC_NOT_IN_QUOTA);
+ if (IS_ERR(key)) {
+ pr_err("Problem loading X.509 certificate from %s to secondary keyring %ld\n",
+ source, PTR_ERR(key));
+ return;
+ }
+
+ pr_notice("Loaded X.509 cert '%s'\n", key_ref_to_ptr(key)->description);
+ key_ref_put(key);
+}
#endif
#ifdef CONFIG_INTEGRITY_MACHINE_KEYRING
void __init set_machine_trusted_keys(struct key *keyring)
@@ -251,6 +330,12 @@ int verify_pkcs7_message_sig(const void *data, size_t len,
if (ret < 0)
goto error;
+ ret = is_key_on_revocation_list(pkcs7);
+ if (ret != -ENOKEY) {
+ pr_devel("PKCS#7 key is on revocation list\n");
+ goto error;
+ }
+
if (!trusted_keys) {
trusted_keys = builtin_trusted_keys;
} else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) {
@@ -270,12 +355,6 @@ int verify_pkcs7_message_sig(const void *data, size_t len,
pr_devel("PKCS#7 platform keyring is not available\n");
goto error;
}
-
- ret = is_key_on_revocation_list(pkcs7);
- if (ret != -ENOKEY) {
- pr_devel("PKCS#7 platform key is on revocation list\n");
- goto error;
- }
}
ret = pkcs7_validate_trust(pkcs7, trusted_keys);
if (ret < 0) {
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 10efb56d8b48..ea6fb8e89d06 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -320,18 +320,21 @@ static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval,
if (IS_ERR(ret)) {
up_read(&key->sem);
+ key_put(key);
return PTR_ERR(ret);
}
key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL);
if (!key_data) {
up_read(&key->sem);
+ key_put(key);
return -ENOMEM;
}
memcpy(key_data, ret, key_datalen);
up_read(&key->sem);
+ key_put(key);
err = type->setkey(ask->private, key_data, key_datalen);
@@ -1192,6 +1195,7 @@ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
areq->areqlen = areqlen;
areq->sk = sk;
+ areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
areq->last_rsgl = NULL;
INIT_LIST_HEAD(&areq->rsgl_list);
areq->tsgl = NULL;
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 5e7cd603d489..4fe95c448047 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -17,6 +17,7 @@
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include "internal.h"
@@ -74,15 +75,26 @@ static void crypto_free_instance(struct crypto_instance *inst)
inst->alg.cra_type->free(inst);
}
-static void crypto_destroy_instance(struct crypto_alg *alg)
+static void crypto_destroy_instance_workfn(struct work_struct *w)
{
- struct crypto_instance *inst = (void *)alg;
+ struct crypto_instance *inst = container_of(w, struct crypto_instance,
+ free_work);
struct crypto_template *tmpl = inst->tmpl;
crypto_free_instance(inst);
crypto_tmpl_put(tmpl);
}
+static void crypto_destroy_instance(struct crypto_alg *alg)
+{
+ struct crypto_instance *inst = container_of(alg,
+ struct crypto_instance,
+ alg);
+
+ INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn);
+ schedule_work(&inst->free_work);
+}
+
/*
* This function adds a spawn to the list secondary_spawns which
* will be used at the end of crypto_remove_spawns to unregister
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index 773e159dbbcb..abeecb8329b3 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -42,7 +42,7 @@ static void public_key_describe(const struct key *asymmetric_key,
void public_key_free(struct public_key *key)
{
if (key) {
- kfree(key->key);
+ kfree_sensitive(key->key);
kfree(key->params);
kfree(key);
}
@@ -263,7 +263,7 @@ error_free_tfm:
else
crypto_free_akcipher(tfm);
error_free_key:
- kfree(key);
+ kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
@@ -369,7 +369,7 @@ error_free_tfm:
else
crypto_free_akcipher(tfm);
error_free_key:
- kfree(key);
+ kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
@@ -441,7 +441,7 @@ int public_key_verify_signature(const struct public_key *pkey,
sig->digest, sig->digest_size);
error_free_key:
- kfree(key);
+ kfree_sensitive(key);
error_free_tfm:
crypto_free_sig(tfm);
pr_devel("<==%s() = %d\n", __func__, ret);
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
index 276bdb627498..6b69ea40da23 100644
--- a/crypto/asymmetric_keys/restrict.c
+++ b/crypto/asymmetric_keys/restrict.c
@@ -148,6 +148,50 @@ int restrict_link_by_ca(struct key *dest_keyring,
return 0;
}
+/**
+ * restrict_link_by_digsig - Restrict additions to a ring of digsig keys
+ * @dest_keyring: Keyring being linked to.
+ * @type: The type of key being added.
+ * @payload: The payload of the new key.
+ * @trust_keyring: A ring of keys that can be used to vouch for the new cert.
+ *
+ * Check if the new certificate has digitalSignature usage set. If it is,
+ * then mark the new certificate as being ok to link. Afterwards verify
+ * the new certificate against the ones in the trust_keyring.
+ *
+ * Returns 0 if the new certificate was accepted, -ENOKEY if the
+ * certificate is not a digsig. -ENOPKG if the signature uses unsupported
+ * crypto, or some other error if there is a matching certificate but
+ * the signature check cannot be performed.
+ */
+int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring)
+{
+ const struct public_key *pkey;
+
+ if (type != &key_type_asymmetric)
+ return -EOPNOTSUPP;
+
+ pkey = payload->data[asym_crypto];
+
+ if (!pkey)
+ return -ENOPKG;
+
+ if (!test_bit(KEY_EFLAG_DIGITALSIG, &pkey->key_eflags))
+ return -ENOKEY;
+
+ if (test_bit(KEY_EFLAG_CA, &pkey->key_eflags))
+ return -ENOKEY;
+
+ if (test_bit(KEY_EFLAG_KEYCERTSIGN, &pkey->key_eflags))
+ return -ENOKEY;
+
+ return restrict_link_by_signature(dest_keyring, type, payload,
+ trust_keyring);
+}
+
static bool match_either_id(const struct asymmetric_key_id **pair,
const struct asymmetric_key_id *single)
{
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index 22beaf2213a2..f440767bd727 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -391,7 +391,7 @@ error_no_desc:
* verify_pefile_signature - Verify the signature on a PE binary image
* @pebuf: Buffer containing the PE binary image
* @pelen: Length of the binary image
- * @trust_keys: Signing certificate(s) to use as starting points
+ * @trusted_keys: Signing certificate(s) to use as starting points
* @usage: The use to which the key is being put.
*
* Validate that the certificate chain inside the PKCS#7 message inside the PE
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 6fdfc82e23a8..7c71db3ac23d 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -130,6 +130,11 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
goto out;
}
+ if (cert->unsupported_sig) {
+ ret = 0;
+ goto out;
+ }
+
ret = public_key_verify_signature(cert->pub, cert->sig);
if (ret < 0) {
if (ret == -ENOPKG) {
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 74fcc0897041..108d9d55c509 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -7,15 +7,30 @@
* Author: Baolin Wang <baolin.wang@linaro.org>
*/
+#include <crypto/internal/aead.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <crypto/engine.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <uapi/linux/sched/types.h>
#include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10
+/* Temporary algorithm flag used to indicate an updated driver. */
+#define CRYPTO_ALG_ENGINE 0x200
+
+struct crypto_engine_alg {
+ struct crypto_alg base;
+ struct crypto_engine_op op;
+};
+
/**
* crypto_finalize_request - finalize one request if the request is done
* @engine: the hardware engine
@@ -26,9 +41,6 @@ static void crypto_finalize_request(struct crypto_engine *engine,
struct crypto_async_request *req, int err)
{
unsigned long flags;
- bool finalize_req = false;
- int ret;
- struct crypto_engine_ctx *enginectx;
/*
* If hardware cannot enqueue more requests
@@ -38,21 +50,11 @@ static void crypto_finalize_request(struct crypto_engine *engine,
if (!engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == req) {
- finalize_req = true;
engine->cur_req = NULL;
}
spin_unlock_irqrestore(&engine->queue_lock, flags);
}
- if (finalize_req || engine->retry_support) {
- enginectx = crypto_tfm_ctx(req->tfm);
- if (enginectx->op.prepare_request &&
- enginectx->op.unprepare_request) {
- ret = enginectx->op.unprepare_request(engine, req);
- if (ret)
- dev_err(engine->dev, "failed to unprepare request\n");
- }
- }
lockdep_assert_in_softirq();
crypto_request_complete(req, err);
@@ -72,10 +74,11 @@ static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread)
{
struct crypto_async_request *async_req, *backlog;
+ struct crypto_engine_alg *alg;
+ struct crypto_engine_op *op;
unsigned long flags;
bool was_busy = false;
int ret;
- struct crypto_engine_ctx *enginectx;
spin_lock_irqsave(&engine->queue_lock, flags);
@@ -141,27 +144,21 @@ start_request:
ret = engine->prepare_crypt_hardware(engine);
if (ret) {
dev_err(engine->dev, "failed to prepare crypt hardware\n");
- goto req_err_2;
+ goto req_err_1;
}
}
- enginectx = crypto_tfm_ctx(async_req->tfm);
-
- if (enginectx->op.prepare_request) {
- ret = enginectx->op.prepare_request(engine, async_req);
- if (ret) {
- dev_err(engine->dev, "failed to prepare request: %d\n",
- ret);
- goto req_err_2;
- }
- }
- if (!enginectx->op.do_one_request) {
+ if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) {
+ alg = container_of(async_req->tfm->__crt_alg,
+ struct crypto_engine_alg, base);
+ op = &alg->op;
+ } else {
dev_err(engine->dev, "failed to do request\n");
ret = -EINVAL;
goto req_err_1;
}
- ret = enginectx->op.do_one_request(engine, async_req);
+ ret = op->do_one_request(engine, async_req);
/* Request unsuccessfully executed by hardware */
if (ret < 0) {
@@ -177,18 +174,6 @@ start_request:
ret);
goto req_err_1;
}
- /*
- * If retry mechanism is supported,
- * unprepare current request and
- * enqueue it back into crypto-engine queue.
- */
- if (enginectx->op.unprepare_request) {
- ret = enginectx->op.unprepare_request(engine,
- async_req);
- if (ret)
- dev_err(engine->dev,
- "failed to unprepare request\n");
- }
spin_lock_irqsave(&engine->queue_lock, flags);
/*
* If hardware was unable to execute request, enqueue it
@@ -204,13 +189,6 @@ start_request:
goto retry;
req_err_1:
- if (enginectx->op.unprepare_request) {
- ret = enginectx->op.unprepare_request(engine, async_req);
- if (ret)
- dev_err(engine->dev, "failed to unprepare request\n");
- }
-
-req_err_2:
crypto_request_complete(async_req, ret);
retry:
@@ -591,5 +569,177 @@ int crypto_engine_exit(struct crypto_engine *engine)
}
EXPORT_SYMBOL_GPL(crypto_engine_exit);
+int crypto_engine_register_aead(struct aead_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+
+ alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+ return crypto_register_aead(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
+
+void crypto_engine_unregister_aead(struct aead_engine_alg *alg)
+{
+ crypto_unregister_aead(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead);
+
+int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_engine_register_aead(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ crypto_engine_unregister_aeads(algs, i);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_aeads);
+
+void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_engine_unregister_aead(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads);
+
+int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+
+ alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+ return crypto_register_ahash(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
+
+void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg)
+{
+ crypto_unregister_ahash(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash);
+
+int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_engine_register_ahash(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ crypto_engine_unregister_ahashes(algs, i);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes);
+
+void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
+ int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_engine_unregister_ahash(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes);
+
+int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+
+ alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+ return crypto_register_akcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
+
+void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg)
+{
+ crypto_unregister_akcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher);
+
+int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+
+ alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+ return crypto_register_kpp(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
+
+void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg)
+{
+ crypto_unregister_kpp(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp);
+
+int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+
+ alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+ return crypto_register_skcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
+
+void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg)
+{
+ return crypto_unregister_skcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher);
+
+int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
+ int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_engine_register_skcipher(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ crypto_engine_unregister_skciphers(algs, i);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers);
+
+void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
+ int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_engine_unregister_skcipher(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto hardware engine framework");
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index c7d7f2caa779..fe9c233ec769 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -89,10 +89,14 @@ struct rand_data {
unsigned int rct_count; /* Number of stuck values */
/* Intermittent health test failure threshold of 2^-30 */
-#define JENT_RCT_CUTOFF 30 /* Taken from SP800-90B sec 4.4.1 */
-#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
+ /* From an SP800-90B perspective, this RCT cutoff value is equal to 31. */
+ /* However, our RCT implementation starts at 1, so we subtract 1 here. */
+#define JENT_RCT_CUTOFF (31 - 1) /* Taken from SP800-90B sec 4.4.1 */
+#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
/* Permanent health test failure threshold of 2^-60 */
-#define JENT_RCT_CUTOFF_PERMANENT 60
+ /* From an SP800-90B perspective, this RCT cutoff value is equal to 61. */
+ /* However, our RCT implementation starts at 1, so we subtract 1 here. */
+#define JENT_RCT_CUTOFF_PERMANENT (61 - 1)
#define JENT_APT_CUTOFF_PERMANENT 355
#define JENT_APT_WINDOW_SIZE 512 /* Data window size */
/* LSB of time stamp to process */
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 1b0f76ba3eb5..59260aefed28 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -357,10 +357,10 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
* cipher name.
*/
if (!strncmp(cipher_name, "ecb(", 4)) {
- unsigned len;
+ int len;
- len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
- if (len < 2 || len >= sizeof(ecb_name))
+ len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
+ if (len < 2)
goto err_free_inst;
if (ecb_name[len - 1] != ')')
diff --git a/crypto/sig.c b/crypto/sig.c
index b48c18ec65cd..224c47019297 100644
--- a/crypto/sig.c
+++ b/crypto/sig.c
@@ -21,11 +21,6 @@
static const struct crypto_type crypto_sig_type;
-static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm)
-{
- return container_of(tfm, struct crypto_sig, base);
-}
-
static int crypto_sig_init_tfm(struct crypto_tfm *tfm)
{
if (tfm->__crt_alg->cra_type != &crypto_sig_type)
diff --git a/crypto/xts.c b/crypto/xts.c
index 09be909a6a1a..548b302c6c6a 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -396,10 +396,10 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
* cipher name.
*/
if (!strncmp(cipher_name, "ecb(", 4)) {
- unsigned len;
+ int len;
- len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
- if (len < 2 || len >= sizeof(ctx->name))
+ len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
+ if (len < 2)
goto err_free_inst;
if (ctx->name[len - 1] != ')')
diff --git a/drivers/Makefile b/drivers/Makefile
index 7241d80a7b29..a7459e77df37 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -195,3 +195,5 @@ obj-$(CONFIG_PECI) += peci/
obj-$(CONFIG_HTE) += hte/
obj-$(CONFIG_DRM_ACCEL) += accel/
obj-$(CONFIG_CDX_BUS) += cdx/
+
+obj-$(CONFIG_S390) += s390/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 00dd309b6682..cee82b473dc5 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -581,7 +581,7 @@ config ACPI_VIOT
config ACPI_PRMT
bool "Platform Runtime Mechanism Support"
- depends on EFI && (X86_64 || ARM64)
+ depends on EFI_RUNTIME_WRAPPERS && (X86_64 || ARM64)
default y
help
Platform Runtime Mechanism (PRM) is a firmware interface exposing a
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 3fc5a0d54f6e..eaa09bf52f17 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -50,7 +50,6 @@ acpi-$(CONFIG_PCI) += acpi_lpss.o
acpi-y += acpi_apd.o
acpi-y += acpi_platform.o
acpi-y += acpi_pnp.o
-acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
acpi-y += power.o
acpi-y += event.o
acpi-y += evged.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 1ace70b831cd..225dc6818751 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -34,7 +34,7 @@ MODULE_LICENSE("GPL");
static int acpi_ac_add(struct acpi_device *device);
static void acpi_ac_remove(struct acpi_device *device);
-static void acpi_ac_notify(struct acpi_device *device, u32 event);
+static void acpi_ac_notify(acpi_handle handle, u32 event, void *data);
static const struct acpi_device_id ac_device_ids[] = {
{"ACPI0003", 0},
@@ -54,11 +54,9 @@ static struct acpi_driver acpi_ac_driver = {
.name = "ac",
.class = ACPI_AC_CLASS,
.ids = ac_device_ids,
- .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_ac_add,
.remove = acpi_ac_remove,
- .notify = acpi_ac_notify,
},
.drv.pm = &acpi_ac_pm,
};
@@ -128,8 +126,9 @@ static enum power_supply_property ac_props[] = {
};
/* Driver Model */
-static void acpi_ac_notify(struct acpi_device *device, u32 event)
+static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
{
+ struct acpi_device *device = data;
struct acpi_ac *ac = acpi_driver_data(device);
if (!ac)
@@ -235,7 +234,7 @@ static int acpi_ac_add(struct acpi_device *device)
result = acpi_ac_get_state(ac);
if (result)
- goto end;
+ goto err_release_ac;
psy_cfg.drv_data = ac;
@@ -248,7 +247,7 @@ static int acpi_ac_add(struct acpi_device *device)
&ac->charger_desc, &psy_cfg);
if (IS_ERR(ac->charger)) {
result = PTR_ERR(ac->charger);
- goto end;
+ goto err_release_ac;
}
pr_info("%s [%s] (%s)\n", acpi_device_name(device),
@@ -256,9 +255,19 @@ static int acpi_ac_add(struct acpi_device *device)
ac->battery_nb.notifier_call = acpi_ac_battery_notify;
register_acpi_notifier(&ac->battery_nb);
-end:
+
+ result = acpi_dev_install_notify_handler(device, ACPI_ALL_NOTIFY,
+ acpi_ac_notify);
if (result)
- kfree(ac);
+ goto err_unregister;
+
+ return 0;
+
+err_unregister:
+ power_supply_unregister(ac->charger);
+ unregister_acpi_notifier(&ac->battery_nb);
+err_release_ac:
+ kfree(ac);
return result;
}
@@ -297,6 +306,8 @@ static void acpi_ac_remove(struct acpi_device *device)
ac = acpi_driver_data(device);
+ acpi_dev_remove_notify_handler(device, ACPI_ALL_NOTIFY,
+ acpi_ac_notify);
power_supply_unregister(ac->charger);
unregister_acpi_notifier(&ac->battery_nb);
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c
index 4cf4aef7ce0c..9b55d1593d16 100644
--- a/drivers/acpi/acpi_cmos_rtc.c
+++ b/drivers/acpi/acpi_cmos_rtc.c
@@ -51,12 +51,11 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address,
return AE_OK;
}
-static int acpi_install_cmos_rtc_space_handler(struct acpi_device *adev,
- const struct acpi_device_id *id)
+int acpi_install_cmos_rtc_space_handler(acpi_handle handle)
{
acpi_status status;
- status = acpi_install_address_space_handler(adev->handle,
+ status = acpi_install_address_space_handler(handle,
ACPI_ADR_SPACE_CMOS,
&acpi_cmos_rtc_space_handler,
NULL, NULL);
@@ -67,18 +66,30 @@ static int acpi_install_cmos_rtc_space_handler(struct acpi_device *adev,
return 1;
}
+EXPORT_SYMBOL_GPL(acpi_install_cmos_rtc_space_handler);
-static void acpi_remove_cmos_rtc_space_handler(struct acpi_device *adev)
+void acpi_remove_cmos_rtc_space_handler(acpi_handle handle)
{
- if (ACPI_FAILURE(acpi_remove_address_space_handler(adev->handle,
+ if (ACPI_FAILURE(acpi_remove_address_space_handler(handle,
ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler)))
pr_err("Error removing CMOS-RTC region handler\n");
}
+EXPORT_SYMBOL_GPL(acpi_remove_cmos_rtc_space_handler);
+
+static int acpi_cmos_rtc_attach_handler(struct acpi_device *adev, const struct acpi_device_id *id)
+{
+ return acpi_install_cmos_rtc_space_handler(adev->handle);
+}
+
+static void acpi_cmos_rtc_detach_handler(struct acpi_device *adev)
+{
+ acpi_remove_cmos_rtc_space_handler(adev->handle);
+}
static struct acpi_scan_handler cmos_rtc_handler = {
.ids = acpi_cmos_rtc_ids,
- .attach = acpi_install_cmos_rtc_space_handler,
- .detach = acpi_remove_cmos_rtc_space_handler,
+ .attach = acpi_cmos_rtc_attach_handler,
+ .detach = acpi_cmos_rtc_detach_handler,
};
void __init acpi_cmos_rtc_init(void)
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index e648158368a7..e120a96e1eae 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -172,7 +172,7 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
fru_text = "";
sec_type = (guid_t *)gdata->section_type;
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
- struct cper_sec_mem_err *mem = (void *)(gdata + 1);
+ struct cper_sec_mem_err *mem = acpi_hest_get_payload(gdata);
if (gdata->error_data_length >= sizeof(*mem))
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index f9aa02cac6d1..c711db8a9c33 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -9,9 +9,11 @@
* Copyright (C) 2013, Intel Corporation
* Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -21,6 +23,8 @@
#include <asm/cpu.h>
+#include <xen/xen.h>
+
#include "internal.h"
DEFINE_PER_CPU(struct acpi_processor *, processors);
@@ -508,54 +512,110 @@ static void acpi_processor_remove(struct acpi_device *device)
}
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-#ifdef CONFIG_X86
-static bool acpi_hwp_native_thermal_lvt_set;
-static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
- u32 lvl,
- void *context,
- void **rv)
+#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC
+bool __init processor_physically_present(acpi_handle handle)
+{
+ int cpuid, type;
+ u32 acpi_id;
+ acpi_status status;
+ acpi_object_type acpi_type;
+ unsigned long long tmp;
+ union acpi_object object = {};
+ struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
+
+ status = acpi_get_type(handle, &acpi_type);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ switch (acpi_type) {
+ case ACPI_TYPE_PROCESSOR:
+ status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return false;
+ acpi_id = object.processor.proc_id;
+ break;
+ case ACPI_TYPE_DEVICE:
+ status = acpi_evaluate_integer(handle, METHOD_NAME__UID,
+ NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ return false;
+ acpi_id = tmp;
+ break;
+ default:
+ return false;
+ }
+
+ if (xen_initial_domain())
+ /*
+ * When running as a Xen dom0 the number of processors Linux
+ * sees can be different from the real number of processors on
+ * the system, and we still need to execute _PDC or _OSC for
+ * all of them.
+ */
+ return xen_processor_present(acpi_id);
+
+ type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
+ cpuid = acpi_get_cpuid(handle, type, acpi_id);
+
+ return !invalid_logical_cpuid(cpuid);
+}
+
+/* vendor specific UUID indicating an Intel platform */
+static u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
+
+static acpi_status __init acpi_processor_osc(acpi_handle handle, u32 lvl,
+ void *context, void **rv)
{
- u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
- u32 capbuf[2];
+ u32 capbuf[2] = {};
struct acpi_osc_context osc_context = {
.uuid_str = sb_uuid_str,
.rev = 1,
.cap.length = 8,
.cap.pointer = capbuf,
};
+ acpi_status status;
- if (acpi_hwp_native_thermal_lvt_set)
- return AE_CTRL_TERMINATE;
+ if (!processor_physically_present(handle))
+ return AE_OK;
- capbuf[0] = 0x0000;
- capbuf[1] = 0x1000; /* set bit 12 */
+ arch_acpi_set_proc_cap_bits(&capbuf[OSC_SUPPORT_DWORD]);
- if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
- if (osc_context.ret.pointer && osc_context.ret.length > 1) {
- u32 *capbuf_ret = osc_context.ret.pointer;
+ status = acpi_run_osc(handle, &osc_context);
+ if (ACPI_FAILURE(status))
+ return status;
- if (capbuf_ret[1] & 0x1000) {
- acpi_handle_info(handle,
- "_OSC native thermal LVT Acked\n");
- acpi_hwp_native_thermal_lvt_set = true;
- }
- }
- kfree(osc_context.ret.pointer);
- }
+ kfree(osc_context.ret.pointer);
return AE_OK;
}
-void __init acpi_early_processor_osc(void)
+static bool __init acpi_early_processor_osc(void)
{
- if (boot_cpu_has(X86_FEATURE_HWP)) {
- acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- acpi_hwp_native_thermal_lvt_osc,
- NULL, NULL, NULL);
- acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
- acpi_hwp_native_thermal_lvt_osc,
- NULL, NULL);
+ acpi_status status;
+
+ acpi_proc_quirk_mwait_check();
+
+ status = acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, acpi_processor_osc, NULL,
+ NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ status = acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_osc,
+ NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ return true;
+}
+
+void __init acpi_early_processor_control_setup(void)
+{
+ if (acpi_early_processor_osc()) {
+ pr_info("_OSC evaluated successfully for all CPUs\n");
+ } else {
+ pr_info("_OSC evaluation for CPUs failed, trying _PDC\n");
+ acpi_early_processor_set_pdc();
}
}
#endif
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index e9b8e8305e23..33c3b16af556 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -557,6 +557,7 @@ static int acpi_tad_disable_timer(struct device *dev, u32 timer_id)
static int acpi_tad_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_tad_driver_data *dd = dev_get_drvdata(dev);
device_init_wakeup(dev, false);
@@ -577,6 +578,7 @@ static int acpi_tad_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ acpi_remove_cmos_rtc_space_handler(handle);
return 0;
}
@@ -589,6 +591,11 @@ static int acpi_tad_probe(struct platform_device *pdev)
unsigned long long caps;
int ret;
+ ret = acpi_install_cmos_rtc_space_handler(handle);
+ if (ret < 0) {
+ dev_info(dev, "Unable to install space handler\n");
+ return -ENODEV;
+ }
/*
* Initialization failure messages are mostly about firmware issues, so
* print them at the "info" level.
@@ -596,22 +603,27 @@ static int acpi_tad_probe(struct platform_device *pdev)
status = acpi_evaluate_integer(handle, "_GCP", NULL, &caps);
if (ACPI_FAILURE(status)) {
dev_info(dev, "Unable to get capabilities\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto remove_handler;
}
if (!(caps & ACPI_TAD_AC_WAKE)) {
dev_info(dev, "Unsupported capabilities\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto remove_handler;
}
if (!acpi_has_method(handle, "_PRW")) {
dev_info(dev, "Missing _PRW\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto remove_handler;
}
dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL);
- if (!dd)
- return -ENOMEM;
+ if (!dd) {
+ ret = -ENOMEM;
+ goto remove_handler;
+ }
dd->capabilities = caps;
dev_set_drvdata(dev, dd);
@@ -653,6 +665,11 @@ static int acpi_tad_probe(struct platform_device *pdev)
fail:
acpi_tad_remove(pdev);
+ /* Don't fallthrough because cmos rtc space handler is removed in acpi_tad_remove() */
+ return ret;
+
+remove_handler:
+ acpi_remove_cmos_rtc_space_handler(handle);
return ret;
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 62f4364e4460..948e31f7ce6e 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -77,7 +77,7 @@ static DEFINE_MUTEX(video_list_lock);
static LIST_HEAD(video_bus_head);
static int acpi_video_bus_add(struct acpi_device *device);
static void acpi_video_bus_remove(struct acpi_device *device);
-static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
+static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data);
/*
* Indices in the _BCL method response: the first two items are special,
@@ -104,7 +104,6 @@ static struct acpi_driver acpi_video_bus = {
.ops = {
.add = acpi_video_bus_add,
.remove = acpi_video_bus_remove,
- .notify = acpi_video_bus_notify,
},
};
@@ -1527,8 +1526,9 @@ static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
acpi_osi_is_win8() ? 0 : 1);
}
-static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
+static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data)
{
+ struct acpi_device *device = data;
struct acpi_video_bus *video = acpi_driver_data(device);
struct input_dev *input;
int keycode = 0;
@@ -2027,6 +2027,12 @@ static int acpi_video_bus_add(struct acpi_device *device)
if (error)
goto err_put_video;
+ /*
+ * HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0
+ * evaluated to have functional panel brightness control.
+ */
+ acpi_device_fix_up_power_extended(device);
+
pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
video->flags.multihead ? "yes" : "no",
@@ -2053,8 +2059,19 @@ static int acpi_video_bus_add(struct acpi_device *device)
acpi_video_bus_add_notify_handler(video);
+ error = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY,
+ acpi_video_bus_notify);
+ if (error)
+ goto err_remove;
+
return 0;
+err_remove:
+ mutex_lock(&video_list_lock);
+ list_del(&video->entry);
+ mutex_unlock(&video_list_lock);
+ acpi_video_bus_remove_notify_handler(video);
+ acpi_video_bus_unregister_backlight(video);
err_put_video:
acpi_video_bus_put_devices(video);
kfree(video->attached_array);
@@ -2075,6 +2092,9 @@ static void acpi_video_bus_remove(struct acpi_device *device)
video = acpi_driver_data(device);
+ acpi_dev_remove_notify_handler(device, ACPI_DEVICE_NOTIFY,
+ acpi_video_bus_notify);
+
mutex_lock(&video_list_lock);
list_del(&video->entry);
mutex_unlock(&video_list_lock);
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 22f1f7a9e5a3..911875c5a5f1 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -287,4 +287,6 @@ struct acpi_namespace_node *acpi_db_local_ns_lookup(char *name);
void acpi_db_uint32_to_hex_string(u32 value, char *buffer);
+void acpi_db_generate_interrupt(char *gsiv_arg);
+
#endif /* __ACDEBUG_H__ */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 778241173ed4..f4c90fc99be2 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -129,6 +129,7 @@ ACPI_GLOBAL(acpi_table_handler, acpi_gbl_table_handler);
ACPI_GLOBAL(void *, acpi_gbl_table_handler_context);
ACPI_GLOBAL(acpi_interface_handler, acpi_gbl_interface_handler);
ACPI_GLOBAL(struct acpi_sci_handler_info *, acpi_gbl_sci_handler_list);
+ACPI_GLOBAL(struct acpi_ged_handler_info *, acpi_gbl_ged_handler_list);
/* Owner ID support */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 12d4a024f029..82563b44af35 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -543,6 +543,14 @@ struct acpi_field_info {
u32 pkg_length;
};
+/* Information about the interrupt ID and _EVT of a GED device */
+
+struct acpi_ged_handler_info {
+ struct acpi_ged_handler_info *next;
+ u32 int_id; /* The interrupt ID that triggers the execution ofthe evt_method. */
+ struct acpi_namespace_node *evt_method; /* The _EVT method to be executed when an interrupt with ID = int_ID is received */
+};
+
/*****************************************************************************
*
* Generic "state" object for stacks
@@ -560,25 +568,28 @@ struct acpi_field_info {
u8 descriptor_type; /* To differentiate various internal objs */\
u8 flags; \
u16 value; \
- u16 state;
+ u16 state
/* There are 2 bytes available here until the next natural alignment boundary */
struct acpi_common_state {
-ACPI_STATE_COMMON};
+ ACPI_STATE_COMMON;
+};
/*
* Update state - used to traverse complex objects such as packages
*/
struct acpi_update_state {
- ACPI_STATE_COMMON union acpi_operand_object *object;
+ ACPI_STATE_COMMON;
+ union acpi_operand_object *object;
};
/*
* Pkg state - used to traverse nested package structures
*/
struct acpi_pkg_state {
- ACPI_STATE_COMMON u32 index;
+ ACPI_STATE_COMMON;
+ u32 index;
union acpi_operand_object *source_object;
union acpi_operand_object *dest_object;
struct acpi_walk_state *walk_state;
@@ -591,7 +602,8 @@ struct acpi_pkg_state {
* Allows nesting of these constructs
*/
struct acpi_control_state {
- ACPI_STATE_COMMON u16 opcode;
+ ACPI_STATE_COMMON;
+ u16 opcode;
union acpi_parse_object *predicate_op;
u8 *aml_predicate_start; /* Start of if/while predicate */
u8 *package_end; /* End of if/while block */
@@ -602,11 +614,13 @@ struct acpi_control_state {
* Scope state - current scope during namespace lookups
*/
struct acpi_scope_state {
- ACPI_STATE_COMMON struct acpi_namespace_node *node;
+ ACPI_STATE_COMMON;
+ struct acpi_namespace_node *node;
};
struct acpi_pscope_state {
- ACPI_STATE_COMMON u32 arg_count; /* Number of fixed arguments */
+ ACPI_STATE_COMMON;
+ u32 arg_count; /* Number of fixed arguments */
union acpi_parse_object *op; /* Current op being parsed */
u8 *arg_end; /* Current argument end */
u8 *pkg_end; /* Current package end */
@@ -618,7 +632,8 @@ struct acpi_pscope_state {
* states are created when there are nested control methods executing.
*/
struct acpi_thread_state {
- ACPI_STATE_COMMON u8 current_sync_level; /* Mutex Sync (nested acquire) level */
+ ACPI_STATE_COMMON;
+ u8 current_sync_level; /* Mutex Sync (nested acquire) level */
struct acpi_walk_state *walk_state_list; /* Head of list of walk_states for this thread */
union acpi_operand_object *acquired_mutex_list; /* List of all currently acquired mutexes */
acpi_thread_id thread_id; /* Running thread ID */
@@ -629,8 +644,8 @@ struct acpi_thread_state {
* AML arguments
*/
struct acpi_result_values {
- ACPI_STATE_COMMON
- union acpi_operand_object *obj_desc[ACPI_RESULTS_FRAME_OBJ_NUM];
+ ACPI_STATE_COMMON;
+ union acpi_operand_object *obj_desc[ACPI_RESULTS_FRAME_OBJ_NUM];
};
typedef
@@ -652,7 +667,8 @@ struct acpi_global_notify_handler {
* handler/dispatcher.
*/
struct acpi_notify_info {
- ACPI_STATE_COMMON u8 handler_list_id;
+ ACPI_STATE_COMMON;
+ u8 handler_list_id;
struct acpi_namespace_node *node;
union acpi_operand_object *handler_list_head;
struct acpi_global_notify_handler *global;
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index e64aabe3d33a..2e442f5a3123 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -440,6 +440,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_DOS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
METHOD_NO_RETURN_VALUE}},
+ {{"_DSC", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
{{"_DSD", METHOD_0ARGS, /* ACPI 6.0 */
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each: 1 Buf, 1 Pkg */
PACKAGE_INFO(ACPI_PTYPE2_UUID_PAIR, ACPI_RTYPE_BUFFER, 1,
diff --git a/drivers/acpi/acpica/dbcmds.c b/drivers/acpi/acpica/dbcmds.c
index 9eb68e0751c7..3d99a9048585 100644
--- a/drivers/acpi/acpica/dbcmds.c
+++ b/drivers/acpi/acpica/dbcmds.c
@@ -1010,6 +1010,64 @@ void acpi_db_display_resources(char *object_arg)
acpi_db_set_output_destination(ACPI_DB_CONSOLE_OUTPUT);
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_db_generate_ged
+ *
+ * PARAMETERS: ged_arg - Raw GED number, ascii string
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Simulate firing of a GED
+ *
+ ******************************************************************************/
+
+void acpi_db_generate_interrupt(char *gsiv_arg)
+{
+ u32 gsiv_number;
+ struct acpi_ged_handler_info *ged_info = acpi_gbl_ged_handler_list;
+
+ if (!ged_info) {
+ acpi_os_printf("No GED handling present\n");
+ }
+
+ gsiv_number = strtoul(gsiv_arg, NULL, 0);
+
+ while (ged_info) {
+
+ if (ged_info->int_id == gsiv_number) {
+ struct acpi_object_list arg_list;
+ union acpi_object arg0;
+ acpi_handle evt_handle = ged_info->evt_method;
+ acpi_status status;
+
+ acpi_os_printf("Evaluate GED _EVT (GSIV=%d)\n",
+ gsiv_number);
+
+ if (!evt_handle) {
+ acpi_os_printf("Undefined _EVT method\n");
+ return;
+ }
+
+ arg0.integer.type = ACPI_TYPE_INTEGER;
+ arg0.integer.value = gsiv_number;
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg0;
+
+ status =
+ acpi_evaluate_object(evt_handle, NULL, &arg_list,
+ NULL);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could not evaluate _EVT\n");
+ return;
+ }
+
+ }
+ ged_info = ged_info->next;
+ }
+}
+
#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index b8a48923064f..861b12c334ab 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -106,6 +106,7 @@ enum acpi_ex_debugger_commands {
CMD_THREADS,
CMD_TEST,
+ CMD_INTERRUPT,
#endif
};
@@ -185,6 +186,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
{"THREADS", 3},
{"TEST", 1},
+ {"INTERRUPT", 1},
#endif
{NULL, 0}
};
@@ -318,6 +320,7 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
{1, " Gpes", "Display info on all GPE devices\n"},
{1, " Sci", "Generate an SCI\n"},
{1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
+ {1, " Interrupt <GSIV>", "Simulate an interrupt\n"},
#endif
{0, NULL, NULL}
};
@@ -1064,6 +1067,11 @@ acpi_db_command_dispatch(char *input_buffer,
acpi_os_printf("Event command not implemented\n");
break;
+ case CMD_INTERRUPT:
+
+ acpi_db_generate_interrupt(acpi_gbl_db_args[1]);
+ break;
+
case CMD_GPE:
acpi_db_generate_gpe(acpi_gbl_db_args[1], acpi_gbl_db_args[2]);
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index d3841ded3a81..75338a13c802 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -146,8 +146,8 @@ acpi_ds_result_push(union acpi_operand_object *object,
if (!object) {
ACPI_ERROR((AE_INFO,
- "Null Object! Obj=%p State=%p Num=%u",
- object, walk_state, walk_state->result_count));
+ "Null Object! State=%p Num=%u",
+ walk_state, walk_state->result_count));
return (AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index 5d99b1a76c83..5241f4c01c76 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -343,8 +343,7 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
/* Copy the input buffer data to the transfer buffer */
buffer = buffer_desc->buffer.pointer;
- data_length = (buffer_length < source_desc->buffer.length ?
- buffer_length : source_desc->buffer.length);
+ data_length = ACPI_MIN(buffer_length, source_desc->buffer.length);
memcpy(buffer, source_desc->buffer.pointer, data_length);
/* Lock entire transaction if requested */
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 09029fe545f1..39e31030e5f4 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -603,7 +603,7 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
- AML_FLAGS_EXEC_0A_0T_1R),
+ AML_FLAGS_EXEC_0A_0T_1R | AML_NO_OPERAND_RESOLVE),
/* ACPI 5.0 opcodes */
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 1bbba8585fa6..c5f6c85a3a09 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -37,7 +37,12 @@ void acpi_ut_init_stack_ptr_trace(void)
{
acpi_size current_sp;
+#pragma GCC diagnostic push
+#if defined(__GNUC__) && __GNUC__ >= 12
+#pragma GCC diagnostic ignored "-Wdangling-pointer="
+#endif
acpi_gbl_entry_stack_pointer = &current_sp;
+#pragma GCC diagnostic pop
}
/*******************************************************************************
diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile
index f81fe24894b2..143debc1ba4a 100644
--- a/drivers/acpi/arm64/Makefile
+++ b/drivers/acpi/arm64/Makefile
@@ -3,4 +3,5 @@ obj-$(CONFIG_ACPI_AGDI) += agdi.o
obj-$(CONFIG_ACPI_IORT) += iort.o
obj-$(CONFIG_ACPI_GTDT) += gtdt.o
obj-$(CONFIG_ACPI_APMT) += apmt.o
+obj-$(CONFIG_ARM_AMBA) += amba.o
obj-y += dma.o init.o
diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/arm64/amba.c
index f5b443ab01c2..b2a7631d7ac7 100644
--- a/drivers/acpi/acpi_amba.c
+++ b/drivers/acpi/arm64/amba.c
@@ -17,7 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include "internal.h"
+#include "init.h"
static const struct acpi_device_id amba_id_list[] = {
{"ARMH0061", 0}, /* PL061 GPIO Device */
diff --git a/drivers/acpi/arm64/init.c b/drivers/acpi/arm64/init.c
index d3ce53dda122..d0c8aed90fd1 100644
--- a/drivers/acpi/arm64/init.c
+++ b/drivers/acpi/arm64/init.c
@@ -10,4 +10,6 @@ void __init acpi_arm_init(void)
acpi_apmt_init();
if (IS_ENABLED(CONFIG_ACPI_IORT))
acpi_iort_init();
+ if (IS_ENABLED(CONFIG_ARM_AMBA))
+ acpi_amba_init();
}
diff --git a/drivers/acpi/arm64/init.h b/drivers/acpi/arm64/init.h
index a1715a2a34e9..dcc277977194 100644
--- a/drivers/acpi/arm64/init.h
+++ b/drivers/acpi/arm64/init.h
@@ -4,3 +4,4 @@
void __init acpi_agdi_init(void);
void __init acpi_apmt_init(void);
void __init acpi_iort_init(void);
+void __init acpi_amba_init(void);
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 56d887323ae5..6496ff5a6ba2 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1708,7 +1708,10 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
static struct acpi_platform_list pmcg_plat_info[] __initdata = {
/* HiSilicon Hip08 Platform */
{"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
- "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
+ "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08},
+ /* HiSilicon Hip09 Platform */
+ {"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
{ }
};
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 9c67ed02d797..969bf81e8d54 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1034,8 +1034,9 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
}
/* Driver Interface */
-static void acpi_battery_notify(struct acpi_device *device, u32 event)
+static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
{
+ struct acpi_device *device = data;
struct acpi_battery *battery = acpi_driver_data(device);
struct power_supply *old;
@@ -1212,13 +1213,22 @@ static int acpi_battery_add(struct acpi_device *device)
device_init_wakeup(&device->dev, 1);
- return result;
+ result = acpi_dev_install_notify_handler(device, ACPI_ALL_NOTIFY,
+ acpi_battery_notify);
+ if (result)
+ goto fail_pm;
+
+ return 0;
+fail_pm:
+ device_init_wakeup(&device->dev, 0);
+ unregister_pm_notifier(&battery->pm_nb);
fail:
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
kfree(battery);
+
return result;
}
@@ -1228,10 +1238,16 @@ static void acpi_battery_remove(struct acpi_device *device)
if (!device || !acpi_driver_data(device))
return;
- device_init_wakeup(&device->dev, 0);
+
battery = acpi_driver_data(device);
+
+ acpi_dev_remove_notify_handler(device, ACPI_ALL_NOTIFY,
+ acpi_battery_notify);
+
+ device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
sysfs_remove_battery(battery);
+
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
kfree(battery);
@@ -1264,11 +1280,9 @@ static struct acpi_driver acpi_battery_driver = {
.name = "battery",
.class = ACPI_BATTERY_CLASS,
.ids = battery_device_ids,
- .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_battery_add,
.remove = acpi_battery_remove,
- .notify = acpi_battery_notify,
},
.drv.pm = &acpi_battery_pm,
};
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 2fc2b43a4ed3..f41dda2d3493 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -554,6 +554,30 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device,
acpi_os_wait_events_complete();
}
+int acpi_dev_install_notify_handler(struct acpi_device *adev,
+ u32 handler_type,
+ acpi_notify_handler handler)
+{
+ acpi_status status;
+
+ status = acpi_install_notify_handler(adev->handle, handler_type,
+ handler, adev);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_dev_install_notify_handler);
+
+void acpi_dev_remove_notify_handler(struct acpi_device *adev,
+ u32 handler_type,
+ acpi_notify_handler handler)
+{
+ acpi_remove_notify_handler(adev->handle, handler_type, handler);
+ acpi_os_wait_events_complete();
+}
+EXPORT_SYMBOL_GPL(acpi_dev_remove_notify_handler);
+
/* Handle events targeting \_SB device (at present only graceful shutdown) */
#define ACPI_SB_NOTIFY_SHUTDOWN_REQUEST 0x81
@@ -1005,8 +1029,10 @@ static int acpi_device_probe(struct device *dev)
return -ENOSYS;
ret = acpi_drv->ops.add(acpi_dev);
- if (ret)
+ if (ret) {
+ acpi_dev->driver_data = NULL;
return ret;
+ }
pr_debug("Driver [%s] successfully bound to device [%s]\n",
acpi_drv->name, acpi_dev->pnp.bus_id);
@@ -1296,9 +1322,6 @@ static int __init acpi_bus_init(void)
goto error1;
}
- /* Set capability bits for _OSC under processor scope */
- acpi_early_processor_osc();
-
/*
* _OSC method may exist in module level code,
* so it must be run after ACPI_FULL_INITIALIZATION
@@ -1314,7 +1337,7 @@ static int __init acpi_bus_init(void)
acpi_sysfs_init();
- acpi_early_processor_set_pdc();
+ acpi_early_processor_control_setup();
/*
* Maybe EC region is required at bus_scan/acpi_get_devices. So it
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index 78d44e3fe129..46c6f8c35b43 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -42,22 +42,32 @@ EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier);
* it is used by HEST Generic Hardware Error Source with notify type
* SCI.
*/
-static void acpi_hed_notify(struct acpi_device *device, u32 event)
+static void acpi_hed_notify(acpi_handle handle, u32 event, void *data)
{
blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL);
}
static int acpi_hed_add(struct acpi_device *device)
{
+ int err;
+
/* Only one hardware error device */
if (hed_handle)
return -EINVAL;
hed_handle = device->handle;
- return 0;
+
+ err = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY,
+ acpi_hed_notify);
+ if (err)
+ hed_handle = NULL;
+
+ return err;
}
static void acpi_hed_remove(struct acpi_device *device)
{
+ acpi_dev_remove_notify_handler(device, ACPI_DEVICE_NOTIFY,
+ acpi_hed_notify);
hed_handle = NULL;
}
@@ -68,7 +78,6 @@ static struct acpi_driver acpi_hed_driver = {
.ops = {
.add = acpi_hed_add,
.remove = acpi_hed_remove,
- .notify = acpi_hed_notify,
},
};
module_acpi_driver(acpi_hed_driver);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index f4148dc50b9c..866c7c4ed233 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -28,11 +28,6 @@ void acpi_processor_init(void);
void acpi_platform_init(void);
void acpi_pnp_init(void);
void acpi_int340x_thermal_init(void);
-#ifdef CONFIG_ARM_AMBA
-void acpi_amba_init(void);
-#else
-static inline void acpi_amba_init(void) {}
-#endif
int acpi_sysfs_init(void);
void acpi_gpe_apply_masked_gpes(void);
void acpi_container_init(void);
@@ -128,7 +123,6 @@ int __acpi_device_uevent_modalias(const struct acpi_device *adev,
/* --------------------------------------------------------------------------
Power Resource
-------------------------------------------------------------------------- */
-int acpi_power_init(void);
void acpi_power_resources_list_free(struct list_head *list);
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list);
@@ -152,15 +146,13 @@ int acpi_wakeup_device_init(void);
Processor
-------------------------------------------------------------------------- */
#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC
+void acpi_early_processor_control_setup(void);
void acpi_early_processor_set_pdc(void);
-#else
-static inline void acpi_early_processor_set_pdc(void) {}
-#endif
-#ifdef CONFIG_X86
-void acpi_early_processor_osc(void);
+void acpi_proc_quirk_mwait_check(void);
+bool processor_physically_present(acpi_handle handle);
#else
-static inline void acpi_early_processor_osc(void) {}
+static inline void acpi_early_processor_control_setup(void) {}
#endif
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 07204d482968..f0e6738ae3c9 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3282,6 +3282,23 @@ static void acpi_nfit_put_table(void *table)
acpi_put_table(table);
}
+static void acpi_nfit_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct acpi_device *adev = data;
+
+ device_lock(&adev->dev);
+ __acpi_nfit_notify(&adev->dev, handle, event);
+ device_unlock(&adev->dev);
+}
+
+static void acpi_nfit_remove_notify_handler(void *data)
+{
+ struct acpi_device *adev = data;
+
+ acpi_dev_remove_notify_handler(adev, ACPI_DEVICE_NOTIFY,
+ acpi_nfit_notify);
+}
+
void acpi_nfit_shutdown(void *data)
{
struct acpi_nfit_desc *acpi_desc = data;
@@ -3368,12 +3385,18 @@ static int acpi_nfit_add(struct acpi_device *adev)
if (rc)
return rc;
- return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
-}
-static void acpi_nfit_remove(struct acpi_device *adev)
-{
- /* see acpi_nfit_unregister */
+ rc = devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
+ if (rc)
+ return rc;
+
+ rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY,
+ acpi_nfit_notify);
+ if (rc)
+ return rc;
+
+ return devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler,
+ adev);
}
static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
@@ -3446,13 +3469,6 @@ void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
}
EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
-static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
-{
- device_lock(&adev->dev);
- __acpi_nfit_notify(&adev->dev, adev->handle, event);
- device_unlock(&adev->dev);
-}
-
static const struct acpi_device_id acpi_nfit_ids[] = {
{ "ACPI0012", 0 },
{ "", 0 },
@@ -3464,8 +3480,6 @@ static struct acpi_driver acpi_nfit_driver = {
.ids = acpi_nfit_ids,
.ops = {
.add = acpi_nfit_add,
- .remove = acpi_nfit_remove,
- .notify = acpi_nfit_notify,
},
};
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 3d4c4620f9f9..7020584096bf 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -53,7 +53,7 @@ static LIST_HEAD(prm_module_list);
struct prm_handler_info {
guid_t guid;
- void *handler_addr;
+ efi_status_t (__efiapi *handler_addr)(u64, void *);
u64 static_data_buffer_addr;
u64 acpi_param_buffer_addr;
@@ -260,9 +260,9 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
context.static_data_buffer = handler->static_data_buffer_addr;
context.mmio_ranges = module->mmio_info;
- status = efi_call_virt_pointer(handler, handler_addr,
- handler->acpi_param_buffer_addr,
- &context);
+ status = efi_call_acpi_prm_handler(handler->handler_addr,
+ handler->acpi_param_buffer_addr,
+ &context);
if (status == EFI_SUCCESS) {
buffer->prm_status = PRM_HANDLER_SUCCESS;
} else {
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index d6606a9f2da6..7dd6dbaa98c3 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -132,6 +132,30 @@ static int map_rintc_hartid(struct acpi_subtable_header *entry,
return -EINVAL;
}
+/*
+ * Retrieve LoongArch CPU physical id
+ */
+static int map_core_pic_id(struct acpi_subtable_header *entry,
+ int device_declaration, u32 acpi_id, phys_cpuid_t *phys_id)
+{
+ struct acpi_madt_core_pic *core_pic =
+ container_of(entry, struct acpi_madt_core_pic, header);
+
+ if (!(core_pic->flags & ACPI_MADT_ENABLED))
+ return -ENODEV;
+
+ /* device_declaration means Device object in DSDT, in LoongArch
+ * system, logical processor acpi_id is required in _UID property
+ * of DSDT table, so we should check device_declaration here
+ */
+ if (device_declaration && (core_pic->processor_id == acpi_id)) {
+ *phys_id = core_pic->core_id;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
int type, u32 acpi_id)
{
@@ -165,6 +189,9 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
} else if (header->type == ACPI_MADT_TYPE_RINTC) {
if (!map_rintc_hartid(header, type, acpi_id, &phys_id))
break;
+ } else if (header->type == ACPI_MADT_TYPE_CORE_PIC) {
+ if (!map_core_pic_id(header, type, acpi_id, &phys_id))
+ break;
}
entry += header->length;
}
@@ -216,6 +243,8 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
map_x2apic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
map_gicc_mpidr(header, type, acpi_id, &phys_id);
+ else if (header->type == ACPI_MADT_TYPE_CORE_PIC)
+ map_core_pic_id(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
index 18fb04523f93..1a8591e9a9bf 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
@@ -9,71 +9,19 @@
#define pr_fmt(fmt) "ACPI: " fmt
-#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
-#include <xen/xen.h>
-
#include "internal.h"
-static bool __init processor_physically_present(acpi_handle handle)
-{
- int cpuid, type;
- u32 acpi_id;
- acpi_status status;
- acpi_object_type acpi_type;
- unsigned long long tmp;
- union acpi_object object = { 0 };
- struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-
- status = acpi_get_type(handle, &acpi_type);
- if (ACPI_FAILURE(status))
- return false;
-
- switch (acpi_type) {
- case ACPI_TYPE_PROCESSOR:
- status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status))
- return false;
- acpi_id = object.processor.proc_id;
- break;
- case ACPI_TYPE_DEVICE:
- status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
- if (ACPI_FAILURE(status))
- return false;
- acpi_id = tmp;
- break;
- default:
- return false;
- }
-
- if (xen_initial_domain())
- /*
- * When running as a Xen dom0 the number of processors Linux
- * sees can be different from the real number of processors on
- * the system, and we still need to execute _PDC for all of
- * them.
- */
- return xen_processor_present(acpi_id);
-
- type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
- cpuid = acpi_get_cpuid(handle, type, acpi_id);
-
- return !invalid_logical_cpuid(cpuid);
-}
-
static void acpi_set_pdc_bits(u32 *buf)
{
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
- /* Enable coordination with firmware's _TSD info */
- buf[2] = ACPI_PDC_SMP_T_SWCOORD;
-
/* Twiddle arch-specific bits needed for _PDC */
- arch_acpi_set_pdc_bits(buf);
+ arch_acpi_set_proc_cap_bits(&buf[2]);
}
static struct acpi_object_list *acpi_processor_alloc_pdc(void)
@@ -123,20 +71,6 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
{
acpi_status status = AE_OK;
- if (boot_option_idle_override == IDLE_NOMWAIT) {
- /*
- * If mwait is disabled for CPU C-states, the C2C3_FFH access
- * mode will be disabled in the parameter of _PDC object.
- * Of course C1_FFH access mode will also be disabled.
- */
- union acpi_object *obj;
- u32 *buffer = NULL;
-
- obj = pdc_in->pointer;
- buffer = (u32 *)(obj->buffer.pointer);
- buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
-
- }
status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
if (ACPI_FAILURE(status))
@@ -174,36 +108,9 @@ early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
-static int __init set_no_mwait(const struct dmi_system_id *id)
-{
- pr_notice("%s detected - disabling mwait for CPU C-states\n",
- id->ident);
- boot_option_idle_override = IDLE_NOMWAIT;
- return 0;
-}
-
-static const struct dmi_system_id processor_idle_dmi_table[] __initconst = {
- {
- set_no_mwait, "Extensa 5220", {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
- DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
- {},
-};
-
-static void __init processor_dmi_check(void)
-{
- /*
- * Check whether the system is DMI table. If yes, OSPM
- * should not use mwait for CPU-states.
- */
- dmi_check_system(processor_idle_dmi_table);
-}
-
void __init acpi_early_processor_set_pdc(void)
{
- processor_dmi_check();
+ acpi_proc_quirk_mwait_check();
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 87e385542576..531a9e3df717 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -795,6 +795,9 @@ static const char * const acpi_ignore_dep_ids[] = {
/* List of HIDs for which we honor deps of matching ACPI devs, when checking _DEP lists. */
static const char * const acpi_honor_dep_ids[] = {
"INT3472", /* Camera sensor PMIC / clk and regulator info */
+ "INTC1059", /* IVSC (TGL) driver must be loaded to allow i2c access to camera sensors */
+ "INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */
+ "INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */
NULL
};
@@ -2616,7 +2619,6 @@ void __init acpi_scan_init(void)
acpi_watchdog_init();
acpi_pnp_init();
acpi_int340x_thermal_init();
- acpi_amba_init();
acpi_init_lpit();
acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index f9f6ebb08fdb..419590f41ed5 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -82,10 +82,6 @@ static int tzp;
module_param(tzp, int, 0444);
MODULE_PARM_DESC(tzp, "Thermal zone polling frequency, in 1/10 seconds.");
-static int nocrt;
-module_param(nocrt, int, 0);
-MODULE_PARM_DESC(nocrt, "Set to take no action upon ACPI thermal zone critical trips points.");
-
static int off;
module_param(off, int, 0);
MODULE_PARM_DESC(off, "Set to disable ACPI thermal support.");
@@ -96,35 +92,27 @@ MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
static struct workqueue_struct *acpi_thermal_pm_queue;
-struct acpi_thermal_critical {
- unsigned long temperature;
- bool valid;
-};
-
-struct acpi_thermal_hot {
+struct acpi_thermal_trip {
unsigned long temperature;
bool valid;
};
struct acpi_thermal_passive {
+ struct acpi_thermal_trip trip;
struct acpi_handle_list devices;
- unsigned long temperature;
unsigned long tc1;
unsigned long tc2;
unsigned long tsp;
- bool valid;
};
struct acpi_thermal_active {
+ struct acpi_thermal_trip trip;
struct acpi_handle_list devices;
- unsigned long temperature;
- bool valid;
- bool enabled;
};
struct acpi_thermal_trips {
- struct acpi_thermal_critical critical;
- struct acpi_thermal_hot hot;
+ struct acpi_thermal_trip critical;
+ struct acpi_thermal_trip hot;
struct acpi_thermal_passive passive;
struct acpi_thermal_active active[ACPI_THERMAL_MAX_ACTIVE];
};
@@ -137,6 +125,7 @@ struct acpi_thermal {
unsigned long polling_frequency;
volatile u8 zombie;
struct acpi_thermal_trips trips;
+ struct thermal_trip *trip_table;
struct acpi_handle_list devices;
struct thermal_zone_device *thermal_zone;
int kelvin_offset; /* in millidegrees */
@@ -190,7 +179,16 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
return 0;
}
-static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
+static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k)
+{
+ if (temp_deci_k == THERMAL_TEMP_INVALID)
+ return THERMAL_TEMP_INVALID;
+
+ return deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
+ tz->kelvin_offset);
+}
+
+static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
{
acpi_status status;
unsigned long long tmp;
@@ -255,9 +253,9 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
}
/* Passive (optional) */
- if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.valid) ||
+ if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.trip.valid) ||
flag == ACPI_TRIPS_INIT) {
- valid = tz->trips.passive.valid;
+ valid = tz->trips.passive.trip.valid;
if (psv == -1) {
status = AE_SUPPORT;
} else if (psv > 0) {
@@ -269,44 +267,44 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
}
if (ACPI_FAILURE(status)) {
- tz->trips.passive.valid = false;
+ tz->trips.passive.trip.valid = false;
} else {
- tz->trips.passive.temperature = tmp;
- tz->trips.passive.valid = true;
+ tz->trips.passive.trip.temperature = tmp;
+ tz->trips.passive.trip.valid = true;
if (flag == ACPI_TRIPS_INIT) {
status = acpi_evaluate_integer(tz->device->handle,
"_TC1", NULL, &tmp);
if (ACPI_FAILURE(status))
- tz->trips.passive.valid = false;
+ tz->trips.passive.trip.valid = false;
else
tz->trips.passive.tc1 = tmp;
status = acpi_evaluate_integer(tz->device->handle,
"_TC2", NULL, &tmp);
if (ACPI_FAILURE(status))
- tz->trips.passive.valid = false;
+ tz->trips.passive.trip.valid = false;
else
tz->trips.passive.tc2 = tmp;
status = acpi_evaluate_integer(tz->device->handle,
"_TSP", NULL, &tmp);
if (ACPI_FAILURE(status))
- tz->trips.passive.valid = false;
+ tz->trips.passive.trip.valid = false;
else
tz->trips.passive.tsp = tmp;
}
}
}
- if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.valid) {
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.trip.valid) {
memset(&devices, 0, sizeof(struct acpi_handle_list));
status = acpi_evaluate_reference(tz->device->handle, "_PSL",
NULL, &devices);
if (ACPI_FAILURE(status)) {
acpi_handle_info(tz->device->handle,
"Invalid passive threshold\n");
- tz->trips.passive.valid = false;
+ tz->trips.passive.trip.valid = false;
} else {
- tz->trips.passive.valid = true;
+ tz->trips.passive.trip.valid = true;
}
if (memcmp(&tz->trips.passive.devices, &devices,
@@ -317,24 +315,24 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
}
}
if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) {
- if (valid != tz->trips.passive.valid)
+ if (valid != tz->trips.passive.trip.valid)
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
}
/* Active (optional) */
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
char name[5] = { '_', 'A', 'C', ('0' + i), '\0' };
- valid = tz->trips.active[i].valid;
+ valid = tz->trips.active[i].trip.valid;
if (act == -1)
break; /* disable all active trip points */
if (flag == ACPI_TRIPS_INIT || ((flag & ACPI_TRIPS_ACTIVE) &&
- tz->trips.active[i].valid)) {
+ tz->trips.active[i].trip.valid)) {
status = acpi_evaluate_integer(tz->device->handle,
name, NULL, &tmp);
if (ACPI_FAILURE(status)) {
- tz->trips.active[i].valid = false;
+ tz->trips.active[i].trip.valid = false;
if (i == 0)
break;
@@ -342,35 +340,36 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
break;
if (i == 1)
- tz->trips.active[0].temperature = celsius_to_deci_kelvin(act);
+ tz->trips.active[0].trip.temperature =
+ celsius_to_deci_kelvin(act);
else
/*
* Don't allow override higher than
* the next higher trip point
*/
- tz->trips.active[i-1].temperature =
+ tz->trips.active[i-1].trip.temperature =
min_t(unsigned long,
- tz->trips.active[i-2].temperature,
+ tz->trips.active[i-2].trip.temperature,
celsius_to_deci_kelvin(act));
break;
} else {
- tz->trips.active[i].temperature = tmp;
- tz->trips.active[i].valid = true;
+ tz->trips.active[i].trip.temperature = tmp;
+ tz->trips.active[i].trip.valid = true;
}
}
name[2] = 'L';
- if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].valid) {
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].trip.valid) {
memset(&devices, 0, sizeof(struct acpi_handle_list));
status = acpi_evaluate_reference(tz->device->handle,
name, NULL, &devices);
if (ACPI_FAILURE(status)) {
acpi_handle_info(tz->device->handle,
"Invalid active%d threshold\n", i);
- tz->trips.active[i].valid = false;
+ tz->trips.active[i].trip.valid = false;
} else {
- tz->trips.active[i].valid = true;
+ tz->trips.active[i].trip.valid = true;
}
if (memcmp(&tz->trips.active[i].devices, &devices,
@@ -381,10 +380,10 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
}
}
if ((flag & ACPI_TRIPS_ACTIVE) || (flag & ACPI_TRIPS_DEVICES))
- if (valid != tz->trips.active[i].valid)
+ if (valid != tz->trips.active[i].trip.valid)
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "state");
- if (!tz->trips.active[i].valid)
+ if (!tz->trips.active[i].trip.valid)
break;
}
@@ -398,24 +397,73 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device");
}
}
+}
+
+static int acpi_thermal_adjust_trip(struct thermal_trip *trip, void *data)
+{
+ struct acpi_thermal_trip *acpi_trip = trip->priv;
+ struct acpi_thermal *tz = data;
+
+ if (!acpi_trip)
+ return 0;
+
+ if (acpi_trip->valid)
+ trip->temperature = acpi_thermal_temp(tz, acpi_trip->temperature);
+ else
+ trip->temperature = THERMAL_TEMP_INVALID;
return 0;
}
+static void acpi_thermal_adjust_thermal_zone(struct thermal_zone_device *thermal,
+ unsigned long data)
+{
+ struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
+ int flag = data == ACPI_THERMAL_NOTIFY_THRESHOLDS ?
+ ACPI_TRIPS_THRESHOLDS : ACPI_TRIPS_DEVICES;
+
+ __acpi_thermal_trips_update(tz, flag);
+
+ for_each_thermal_trip(tz->thermal_zone, acpi_thermal_adjust_trip, tz);
+}
+
+static void acpi_queue_thermal_check(struct acpi_thermal *tz)
+{
+ if (!work_pending(&tz->thermal_check_work))
+ queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+}
+
+static void acpi_thermal_trips_update(struct acpi_thermal *tz, u32 event)
+{
+ struct acpi_device *adev = tz->device;
+
+ /*
+ * Use thermal_zone_device_exec() to carry out the trip points
+ * update, so as to protect thermal_get_trend() from getting stale
+ * trip point temperatures and to prevent thermal_zone_device_update()
+ * invoked from acpi_thermal_check_fn() from producing inconsistent
+ * results.
+ */
+ thermal_zone_device_exec(tz->thermal_zone,
+ acpi_thermal_adjust_thermal_zone, event);
+ acpi_queue_thermal_check(tz);
+ acpi_bus_generate_netlink_event(adev->pnp.device_class,
+ dev_name(&adev->dev), event, 0);
+}
+
static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
{
- int i, ret = acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT);
bool valid;
+ int i;
- if (ret)
- return ret;
+ __acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT);
valid = tz->trips.critical.valid |
tz->trips.hot.valid |
- tz->trips.passive.valid;
+ tz->trips.passive.trip.valid;
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++)
- valid = valid || tz->trips.active[i].valid;
+ valid = valid || tz->trips.active[i].trip.valid;
if (!valid) {
pr_warn(FW_BUG "No valid trip found\n");
@@ -443,159 +491,55 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
return 0;
}
-static int thermal_get_trip_type(struct thermal_zone_device *thermal,
- int trip, enum thermal_trip_type *type)
+static int thermal_get_trend(struct thermal_zone_device *thermal,
+ int trip_index, enum thermal_trend *trend)
{
struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
- int i;
+ struct acpi_thermal_trip *acpi_trip;
+ int t, i;
- if (!tz || trip < 0)
+ if (!tz || trip_index < 0)
return -EINVAL;
- if (tz->trips.critical.valid) {
- if (!trip) {
- *type = THERMAL_TRIP_CRITICAL;
- return 0;
- }
- trip--;
- }
-
- if (tz->trips.hot.valid) {
- if (!trip) {
- *type = THERMAL_TRIP_HOT;
- return 0;
- }
- trip--;
- }
-
- if (tz->trips.passive.valid) {
- if (!trip) {
- *type = THERMAL_TRIP_PASSIVE;
- return 0;
- }
- trip--;
- }
-
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].valid; i++) {
- if (!trip) {
- *type = THERMAL_TRIP_ACTIVE;
- return 0;
- }
- trip--;
- }
-
- return -EINVAL;
-}
+ if (tz->trips.critical.valid)
+ trip_index--;
-static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
- int trip, int *temp)
-{
- struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
- int i;
+ if (tz->trips.hot.valid)
+ trip_index--;
- if (!tz || trip < 0)
+ if (trip_index < 0)
return -EINVAL;
- if (tz->trips.critical.valid) {
- if (!trip) {
- *temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.critical.temperature,
- tz->kelvin_offset);
- return 0;
- }
- trip--;
- }
-
- if (tz->trips.hot.valid) {
- if (!trip) {
- *temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.hot.temperature,
- tz->kelvin_offset);
- return 0;
- }
- trip--;
- }
-
- if (tz->trips.passive.valid) {
- if (!trip) {
- *temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.passive.temperature,
- tz->kelvin_offset);
- return 0;
- }
- trip--;
- }
-
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
- tz->trips.active[i].valid; i++) {
- if (!trip) {
- *temp = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.active[i].temperature,
- tz->kelvin_offset);
- return 0;
- }
- trip--;
- }
-
- return -EINVAL;
-}
-
-static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
- int *temperature)
-{
- struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
+ acpi_trip = &tz->trips.passive.trip;
+ if (acpi_trip->valid && !trip_index--) {
+ t = tz->trips.passive.tc1 * (tz->temperature -
+ tz->last_temperature) +
+ tz->trips.passive.tc2 * (tz->temperature -
+ acpi_trip->temperature);
+ if (t > 0)
+ *trend = THERMAL_TREND_RAISING;
+ else if (t < 0)
+ *trend = THERMAL_TREND_DROPPING;
+ else
+ *trend = THERMAL_TREND_STABLE;
- if (tz->trips.critical.valid) {
- *temperature = deci_kelvin_to_millicelsius_with_offset(
- tz->trips.critical.temperature,
- tz->kelvin_offset);
return 0;
}
- return -EINVAL;
-}
-
-static int thermal_get_trend(struct thermal_zone_device *thermal,
- int trip, enum thermal_trend *trend)
-{
- struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
- enum thermal_trip_type type;
- int i;
-
- if (thermal_get_trip_type(thermal, trip, &type))
- return -EINVAL;
-
- if (type == THERMAL_TRIP_ACTIVE) {
- int trip_temp;
- int temp = deci_kelvin_to_millicelsius_with_offset(
- tz->temperature, tz->kelvin_offset);
- if (thermal_get_trip_temp(thermal, trip, &trip_temp))
- return -EINVAL;
+ t = acpi_thermal_temp(tz, tz->temperature);
- if (temp > trip_temp) {
- *trend = THERMAL_TREND_RAISING;
- return 0;
- } else {
- /* Fall back on default trend */
- return -EINVAL;
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
+ acpi_trip = &tz->trips.active[i].trip;
+ if (acpi_trip->valid && !trip_index--) {
+ if (t > acpi_thermal_temp(tz, acpi_trip->temperature)) {
+ *trend = THERMAL_TREND_RAISING;
+ return 0;
+ }
+ break;
}
}
- /*
- * tz->temperature has already been updated by generic thermal layer,
- * before this callback being invoked
- */
- i = tz->trips.passive.tc1 * (tz->temperature - tz->last_temperature) +
- tz->trips.passive.tc2 * (tz->temperature - tz->trips.passive.temperature);
-
- if (i > 0)
- *trend = THERMAL_TREND_RAISING;
- else if (i < 0)
- *trend = THERMAL_TREND_DROPPING;
- else
- *trend = THERMAL_TREND_STABLE;
-
- return 0;
+ return -EINVAL;
}
static void acpi_thermal_zone_device_hot(struct thermal_zone_device *thermal)
@@ -637,7 +581,7 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
if (tz->trips.hot.valid)
trip++;
- if (tz->trips.passive.valid) {
+ if (tz->trips.passive.trip.valid) {
trip++;
for (i = 0; i < tz->trips.passive.devices.count; i++) {
handle = tz->trips.passive.devices.handles[i];
@@ -662,7 +606,7 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
}
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
- if (!tz->trips.active[i].valid)
+ if (!tz->trips.active[i].trip.valid)
break;
trip++;
@@ -709,9 +653,6 @@ static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
.bind = acpi_thermal_bind_cooling_device,
.unbind = acpi_thermal_unbind_cooling_device,
.get_temp = thermal_get_temp,
- .get_trip_type = thermal_get_trip_type,
- .get_trip_temp = thermal_get_trip_temp,
- .get_crit_temp = thermal_get_crit_temp,
.get_trend = thermal_get_trend,
.hot = acpi_thermal_zone_device_hot,
.critical = acpi_thermal_zone_device_critical,
@@ -745,63 +686,97 @@ static void acpi_thermal_zone_sysfs_remove(struct acpi_thermal *tz)
static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
{
- int trips = 0;
+ struct acpi_thermal_trip *acpi_trip;
+ struct thermal_trip *trip;
+ int passive_delay = 0;
+ int trip_count = 0;
int result;
- acpi_status status;
int i;
if (tz->trips.critical.valid)
- trips++;
+ trip_count++;
if (tz->trips.hot.valid)
- trips++;
+ trip_count++;
+
+ if (tz->trips.passive.trip.valid) {
+ trip_count++;
+ passive_delay = tz->trips.passive.tsp * 100;
+ }
- if (tz->trips.passive.valid)
- trips++;
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].trip.valid; i++)
+ trip_count++;
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && tz->trips.active[i].valid;
- i++, trips++);
+ trip = kcalloc(trip_count, sizeof(*trip), GFP_KERNEL);
+ if (!trip)
+ return -ENOMEM;
- if (tz->trips.passive.valid)
- tz->thermal_zone = thermal_zone_device_register("acpitz", trips, 0, tz,
- &acpi_thermal_zone_ops, NULL,
- tz->trips.passive.tsp * 100,
- tz->polling_frequency * 100);
- else
- tz->thermal_zone =
- thermal_zone_device_register("acpitz", trips, 0, tz,
- &acpi_thermal_zone_ops, NULL,
- 0, tz->polling_frequency * 100);
+ tz->trip_table = trip;
- if (IS_ERR(tz->thermal_zone))
- return -ENODEV;
+ if (tz->trips.critical.valid) {
+ trip->type = THERMAL_TRIP_CRITICAL;
+ trip->temperature = acpi_thermal_temp(tz, tz->trips.critical.temperature);
+ trip++;
+ }
+
+ if (tz->trips.hot.valid) {
+ trip->type = THERMAL_TRIP_HOT;
+ trip->temperature = acpi_thermal_temp(tz, tz->trips.hot.temperature);
+ trip++;
+ }
+
+ acpi_trip = &tz->trips.passive.trip;
+ if (acpi_trip->valid) {
+ trip->type = THERMAL_TRIP_PASSIVE;
+ trip->temperature = acpi_thermal_temp(tz, acpi_trip->temperature);
+ trip->priv = acpi_trip;
+ trip++;
+ }
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
+ acpi_trip = &tz->trips.active[i].trip;
+
+ if (!acpi_trip->valid)
+ break;
+
+ trip->type = THERMAL_TRIP_ACTIVE;
+ trip->temperature = acpi_thermal_temp(tz, acpi_trip->temperature);
+ trip->priv = acpi_trip;
+ trip++;
+ }
+
+ tz->thermal_zone = thermal_zone_device_register_with_trips("acpitz",
+ tz->trip_table,
+ trip_count,
+ 0, tz,
+ &acpi_thermal_zone_ops,
+ NULL,
+ passive_delay,
+ tz->polling_frequency * 100);
+ if (IS_ERR(tz->thermal_zone)) {
+ result = PTR_ERR(tz->thermal_zone);
+ goto free_trip_table;
+ }
result = acpi_thermal_zone_sysfs_add(tz);
if (result)
goto unregister_tzd;
- status = acpi_bus_attach_private_data(tz->device->handle,
- tz->thermal_zone);
- if (ACPI_FAILURE(status)) {
- result = -ENODEV;
- goto remove_links;
- }
-
result = thermal_zone_device_enable(tz->thermal_zone);
if (result)
- goto acpi_bus_detach;
+ goto remove_links;
dev_info(&tz->device->dev, "registered as thermal_zone%d\n",
thermal_zone_device_id(tz->thermal_zone));
return 0;
-acpi_bus_detach:
- acpi_bus_detach_private_data(tz->device->handle);
remove_links:
acpi_thermal_zone_sysfs_remove(tz);
unregister_tzd:
thermal_zone_device_unregister(tz->thermal_zone);
+free_trip_table:
+ kfree(tz->trip_table);
return result;
}
@@ -810,8 +785,8 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
{
acpi_thermal_zone_sysfs_remove(tz);
thermal_zone_device_unregister(tz->thermal_zone);
+ kfree(tz->trip_table);
tz->thermal_zone = NULL;
- acpi_bus_detach_private_data(tz->device->handle);
}
@@ -819,14 +794,9 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
Driver Interface
-------------------------------------------------------------------------- */
-static void acpi_queue_thermal_check(struct acpi_thermal *tz)
-{
- if (!work_pending(&tz->thermal_check_work))
- queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
-}
-
-static void acpi_thermal_notify(struct acpi_device *device, u32 event)
+static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data)
{
+ struct acpi_device *device = data;
struct acpi_thermal *tz = acpi_driver_data(device);
if (!tz)
@@ -837,16 +807,8 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
acpi_queue_thermal_check(tz);
break;
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
- acpi_thermal_trips_update(tz, ACPI_TRIPS_THRESHOLDS);
- acpi_queue_thermal_check(tz);
- acpi_bus_generate_netlink_event(device->pnp.device_class,
- dev_name(&device->dev), event, 0);
- break;
case ACPI_THERMAL_NOTIFY_DEVICES:
- acpi_thermal_trips_update(tz, ACPI_TRIPS_DEVICES);
- acpi_queue_thermal_check(tz);
- acpi_bus_generate_netlink_event(device->pnp.device_class,
- dev_name(&device->dev), event, 0);
+ acpi_thermal_trips_update(tz, event);
break;
default:
acpi_handle_debug(device->handle, "Unsupported event [0x%x]\n",
@@ -997,11 +959,20 @@ static int acpi_thermal_add(struct acpi_device *device)
pr_info("%s [%s] (%ld C)\n", acpi_device_name(device),
acpi_device_bid(device), deci_kelvin_to_celsius(tz->temperature));
- goto end;
+ result = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY,
+ acpi_thermal_notify);
+ if (result)
+ goto flush_wq;
+
+ return 0;
+
+flush_wq:
+ flush_workqueue(acpi_thermal_pm_queue);
+ acpi_thermal_unregister_thermal_zone(tz);
free_memory:
kfree(tz);
-end:
+
return result;
}
@@ -1012,10 +983,14 @@ static void acpi_thermal_remove(struct acpi_device *device)
if (!device || !acpi_driver_data(device))
return;
- flush_workqueue(acpi_thermal_pm_queue);
tz = acpi_driver_data(device);
+ acpi_dev_remove_notify_handler(device, ACPI_DEVICE_NOTIFY,
+ acpi_thermal_notify);
+
+ flush_workqueue(acpi_thermal_pm_queue);
acpi_thermal_unregister_thermal_zone(tz);
+
kfree(tz);
}
@@ -1030,7 +1005,7 @@ static int acpi_thermal_suspend(struct device *dev)
static int acpi_thermal_resume(struct device *dev)
{
struct acpi_thermal *tz;
- int i, j, power_state, result;
+ int i, j, power_state;
if (!dev)
return -EINVAL;
@@ -1040,18 +1015,12 @@ static int acpi_thermal_resume(struct device *dev)
return -EINVAL;
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
- if (!tz->trips.active[i].valid)
+ if (!tz->trips.active[i].trip.valid)
break;
- tz->trips.active[i].enabled = true;
for (j = 0; j < tz->trips.active[i].devices.count; j++) {
- result = acpi_bus_update_power(
- tz->trips.active[i].devices.handles[j],
- &power_state);
- if (result || (power_state != ACPI_STATE_D0)) {
- tz->trips.active[i].enabled = false;
- break;
- }
+ acpi_bus_update_power(tz->trips.active[i].devices.handles[j],
+ &power_state);
}
}
@@ -1078,7 +1047,6 @@ static struct acpi_driver acpi_thermal_driver = {
.ops = {
.add = acpi_thermal_add,
.remove = acpi_thermal_remove,
- .notify = acpi_thermal_notify,
},
.drv.pm = &acpi_thermal_pm,
};
@@ -1094,7 +1062,7 @@ static int thermal_act(const struct dmi_system_id *d) {
static int thermal_nocrt(const struct dmi_system_id *d) {
pr_notice("%s detected: disabling all critical thermal trip point actions.\n",
d->ident);
- nocrt = 1;
+ crt = -1;
return 0;
}
static int thermal_tzp(const struct dmi_system_id *d) {
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 18cc08c858cf..442396f6ed1f 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -446,6 +446,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ /* https://bugzilla.suse.com/show_bug.cgi?id=1208724 */
+ .callback = video_detect_force_native,
+ /* Lenovo Ideapad Z470 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Z470"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1187004 */
.callback = video_detect_force_native,
/* Lenovo Ideapad Z570 */
@@ -487,6 +496,24 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
{
+ /* https://gitlab.freedesktop.org/drm/amd/-/issues/1838 */
+ .callback = video_detect_force_native,
+ /* Apple iMac12,1 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,1"),
+ },
+ },
+ {
+ /* https://gitlab.freedesktop.org/drm/amd/-/issues/2753 */
+ .callback = video_detect_force_native,
+ /* Apple iMac12,2 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,2"),
+ },
+ },
+ {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */
.callback = video_detect_force_native,
/* Apple MacBook Pro 12,1 */
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index ce62e61a9605..08f7c6708206 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -94,6 +94,11 @@ static struct lpi_constraints *lpi_constraints_table;
static int lpi_constraints_table_size;
static int rev_id;
+#define for_each_lpi_constraint(entry) \
+ for (int i = 0; \
+ entry = &lpi_constraints_table[i], i < lpi_constraints_table_size; \
+ i++)
+
static void lpi_device_get_constraints_amd(void)
{
union acpi_object *out_obj;
@@ -113,6 +118,12 @@ static void lpi_device_get_constraints_amd(void)
union acpi_object *package = &out_obj->package.elements[i];
if (package->type == ACPI_TYPE_PACKAGE) {
+ if (lpi_constraints_table) {
+ acpi_handle_err(lps0_device_handle,
+ "Duplicate constraints list\n");
+ goto free_acpi_buffer;
+ }
+
lpi_constraints_table = kcalloc(package->package.count,
sizeof(*lpi_constraints_table),
GFP_KERNEL);
@@ -123,17 +134,16 @@ static void lpi_device_get_constraints_amd(void)
acpi_handle_debug(lps0_device_handle,
"LPI: constraints list begin:\n");
- for (j = 0; j < package->package.count; ++j) {
+ for (j = 0; j < package->package.count; j++) {
union acpi_object *info_obj = &package->package.elements[j];
struct lpi_device_constraint_amd dev_info = {};
struct lpi_constraints *list;
acpi_status status;
- for (k = 0; k < info_obj->package.count; ++k) {
- union acpi_object *obj = &info_obj->package.elements[k];
+ list = &lpi_constraints_table[lpi_constraints_table_size];
- list = &lpi_constraints_table[lpi_constraints_table_size];
- list->min_dstate = -1;
+ for (k = 0; k < info_obj->package.count; k++) {
+ union acpi_object *obj = &info_obj->package.elements[k];
switch (k) {
case 0:
@@ -149,27 +159,25 @@ static void lpi_device_get_constraints_amd(void)
dev_info.min_dstate = obj->integer.value;
break;
}
+ }
- if (!dev_info.enabled || !dev_info.name ||
- !dev_info.min_dstate)
- continue;
+ acpi_handle_debug(lps0_device_handle,
+ "Name:%s, Enabled: %d, States: %d, MinDstate: %d\n",
+ dev_info.name,
+ dev_info.enabled,
+ dev_info.function_states,
+ dev_info.min_dstate);
- status = acpi_get_handle(NULL, dev_info.name,
- &list->handle);
- if (ACPI_FAILURE(status))
- continue;
+ if (!dev_info.enabled || !dev_info.name ||
+ !dev_info.min_dstate)
+ continue;
- acpi_handle_debug(lps0_device_handle,
- "Name:%s\n", dev_info.name);
+ status = acpi_get_handle(NULL, dev_info.name, &list->handle);
+ if (ACPI_FAILURE(status))
+ continue;
- list->min_dstate = dev_info.min_dstate;
+ list->min_dstate = dev_info.min_dstate;
- if (list->min_dstate < 0) {
- acpi_handle_debug(lps0_device_handle,
- "Incomplete constraint defined\n");
- continue;
- }
- }
lpi_constraints_table_size++;
}
}
@@ -214,7 +222,7 @@ static void lpi_device_get_constraints(void)
if (!package)
continue;
- for (j = 0; j < package->package.count; ++j) {
+ for (j = 0; j < package->package.count; j++) {
union acpi_object *element =
&(package->package.elements[j]);
@@ -246,7 +254,7 @@ static void lpi_device_get_constraints(void)
constraint->min_dstate = -1;
- for (j = 0; j < package_count; ++j) {
+ for (j = 0; j < package_count; j++) {
union acpi_object *info_obj = &info.package[j];
union acpi_object *cnstr_pkg;
union acpi_object *obj;
@@ -291,32 +299,55 @@ free_acpi_buffer:
ACPI_FREE(out_obj);
}
+/**
+ * acpi_get_lps0_constraint - Get the LPS0 constraint for a device.
+ * @adev: Device to get the constraint for.
+ *
+ * The LPS0 constraint is the shallowest (minimum) power state in which the
+ * device can be so as to allow the platform as a whole to achieve additional
+ * energy conservation by utilizing a system-wide low-power state.
+ *
+ * Returns:
+ * - ACPI power state value of the constraint for @adev on success.
+ * - Otherwise, ACPI_STATE_UNKNOWN.
+ */
+int acpi_get_lps0_constraint(struct acpi_device *adev)
+{
+ struct lpi_constraints *entry;
+
+ for_each_lpi_constraint(entry) {
+ if (adev->handle == entry->handle)
+ return entry->min_dstate;
+ }
+
+ return ACPI_STATE_UNKNOWN;
+}
+
static void lpi_check_constraints(void)
{
- int i;
+ struct lpi_constraints *entry;
- for (i = 0; i < lpi_constraints_table_size; ++i) {
- acpi_handle handle = lpi_constraints_table[i].handle;
- struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
+ for_each_lpi_constraint(entry) {
+ struct acpi_device *adev = acpi_fetch_acpi_dev(entry->handle);
if (!adev)
continue;
- acpi_handle_debug(handle,
+ acpi_handle_debug(entry->handle,
"LPI: required min power state:%s current power state:%s\n",
- acpi_power_state_string(lpi_constraints_table[i].min_dstate),
+ acpi_power_state_string(entry->min_dstate),
acpi_power_state_string(adev->power.state));
if (!adev->flags.power_manageable) {
- acpi_handle_info(handle, "LPI: Device not power manageable\n");
- lpi_constraints_table[i].handle = NULL;
+ acpi_handle_info(entry->handle, "LPI: Device not power manageable\n");
+ entry->handle = NULL;
continue;
}
- if (adev->power.state < lpi_constraints_table[i].min_dstate)
- acpi_handle_info(handle,
+ if (adev->power.state < entry->min_dstate)
+ acpi_handle_info(entry->handle,
"LPI: Constraint not met; min power state:%s current power state:%s\n",
- acpi_power_state_string(lpi_constraints_table[i].min_dstate),
+ acpi_power_state_string(entry->min_dstate),
acpi_power_state_string(adev->power.state));
}
}
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index c2b925f8cd4e..63d834dd3811 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -518,3 +518,38 @@ bool acpi_quirk_skip_acpi_ac_and_battery(void)
return false;
}
EXPORT_SYMBOL_GPL(acpi_quirk_skip_acpi_ac_and_battery);
+
+/* This section provides a workaround for a specific x86 system
+ * which requires disabling of mwait to work correctly.
+ */
+static int __init acpi_proc_quirk_set_no_mwait(const struct dmi_system_id *id)
+{
+ pr_notice("%s detected - disabling mwait for CPU C-states\n",
+ id->ident);
+ boot_option_idle_override = IDLE_NOMWAIT;
+ return 0;
+}
+
+static const struct dmi_system_id acpi_proc_quirk_mwait_dmi_table[] __initconst = {
+ {
+ .callback = acpi_proc_quirk_set_no_mwait,
+ .ident = "Extensa 5220",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ DMI_MATCH(DMI_BOARD_NAME, "Columbia"),
+ },
+ .driver_data = NULL,
+ },
+ {}
+};
+
+void __init acpi_proc_quirk_mwait_check(void)
+{
+ /*
+ * Check whether the system is DMI table. If yes, OSPM
+ * should not use mwait for CPU-states.
+ */
+ dmi_check_system(acpi_proc_quirk_mwait_dmi_table);
+}
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 76e7d6676657..faebe9f5412a 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -153,7 +153,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
goto err;
inode->i_ino = minor + INODE_OFFSET;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
init_special_inode(inode, S_IFCHR | 0600,
MKDEV(MAJOR(binderfs_dev), minor));
inode->i_fop = &binder_fops;
@@ -432,7 +432,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
}
inode->i_ino = SECOND_INODE;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
init_special_inode(inode, S_IFCHR | 0600,
MKDEV(MAJOR(binderfs_dev), minor));
inode->i_fop = &binder_ctl_fops;
@@ -474,7 +474,7 @@ static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
if (ret) {
ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
ret->i_mode = mode;
- ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
+ ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
}
return ret;
}
@@ -703,7 +703,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
inode->i_ino = FIRST_INODE;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | 0755;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_op = &binderfs_dir_inode_operations;
set_nlink(inode, 2);
diff --git a/drivers/auxdisplay/hd44780_common.c b/drivers/auxdisplay/hd44780_common.c
index 3934c2eebf33..7cbf375b0fa5 100644
--- a/drivers/auxdisplay/hd44780_common.c
+++ b/drivers/auxdisplay/hd44780_common.c
@@ -82,7 +82,15 @@ int hd44780_common_clear_display(struct charlcd *lcd)
hdc->write_cmd(hdc, LCD_CMD_DISPLAY_CLEAR);
/* datasheet says to wait 1,64 milliseconds */
long_sleep(2);
- return 0;
+
+ /*
+ * The Hitachi HD44780 controller (and compatible ones) reset the DDRAM
+ * address when executing the DISPLAY_CLEAR command, thus the
+ * following call is not required. However, other controllers do not
+ * (e.g. NewHaven NHD-0220DZW-AG5), thus move the cursor to home
+ * unconditionally to support both.
+ */
+ return hd44780_common_home(lcd);
}
EXPORT_SYMBOL_GPL(hd44780_common_clear_display);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 0db2021f7477..b1affac70d5d 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -4,7 +4,7 @@
# subsystems should select the appropriate symbols.
config REGMAP
- bool "Register Map support" if KUNIT_ALL_TESTS
+ bool
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI)
select IRQ_DOMAIN if REGMAP_IRQ
select MDIO_BUS if REGMAP_MDIO
@@ -23,6 +23,16 @@ config REGMAP_KUNIT
default KUNIT_ALL_TESTS
select REGMAP_RAM
+config REGMAP_BUILD
+ bool "Enable regmap build"
+ depends on KUNIT
+ select REGMAP
+ help
+ This option exists purely to allow the regmap KUnit tests to
+ be enabled without having to enable some driver that uses
+ regmap due to unfortunate issues with how KUnit tests are
+ normally enabled.
+
config REGMAP_AC97
tristate
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 283c2e02a298..41edd6a430eb 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -74,7 +74,7 @@ static int regcache_maple_write(struct regmap *map, unsigned int reg,
rcu_read_unlock();
entry = kmalloc((last - index + 1) * sizeof(unsigned long),
- GFP_KERNEL);
+ map->alloc_flags);
if (!entry)
return -ENOMEM;
@@ -92,7 +92,7 @@ static int regcache_maple_write(struct regmap *map, unsigned int reg,
mas_lock(&mas);
mas_set_range(&mas, index, last);
- ret = mas_store_gfp(&mas, entry, GFP_KERNEL);
+ ret = mas_store_gfp(&mas, entry, map->alloc_flags);
mas_unlock(&mas);
@@ -134,7 +134,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
lower = kmemdup(entry, ((min - mas.index) *
sizeof(unsigned long)),
- GFP_KERNEL);
+ map->alloc_flags);
if (!lower) {
ret = -ENOMEM;
goto out_unlocked;
@@ -148,7 +148,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
upper = kmemdup(&entry[max + 1],
((mas.last - max) *
sizeof(unsigned long)),
- GFP_KERNEL);
+ map->alloc_flags);
if (!upper) {
ret = -ENOMEM;
goto out_unlocked;
@@ -162,7 +162,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
/* Insert new nodes with the saved data */
if (lower) {
mas_set_range(&mas, lower_index, lower_last);
- ret = mas_store_gfp(&mas, lower, GFP_KERNEL);
+ ret = mas_store_gfp(&mas, lower, map->alloc_flags);
if (ret != 0)
goto out;
lower = NULL;
@@ -170,7 +170,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
if (upper) {
mas_set_range(&mas, upper_index, upper_last);
- ret = mas_store_gfp(&mas, upper, GFP_KERNEL);
+ ret = mas_store_gfp(&mas, upper, map->alloc_flags);
if (ret != 0)
goto out;
upper = NULL;
@@ -320,7 +320,7 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
unsigned long *entry;
int i, ret;
- entry = kcalloc(last - first + 1, sizeof(unsigned long), GFP_KERNEL);
+ entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags);
if (!entry)
return -ENOMEM;
@@ -331,7 +331,7 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
mas_set_range(&mas, map->reg_defaults[first].reg,
map->reg_defaults[last].reg);
- ret = mas_store_gfp(&mas, entry, GFP_KERNEL);
+ ret = mas_store_gfp(&mas, entry, map->alloc_flags);
mas_unlock(&mas);
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 584bcc55f56e..db716ffd083e 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -22,7 +22,7 @@ struct regcache_rbtree_node {
/* block of adjacent registers */
void *block;
/* Which registers are present */
- long *cache_present;
+ unsigned long *cache_present;
/* base register handled by this block */
unsigned int base_reg;
/* number of registers available in the block */
@@ -277,7 +277,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
blk = krealloc(rbnode->block,
blklen * map->cache_word_size,
- GFP_KERNEL);
+ map->alloc_flags);
if (!blk)
return -ENOMEM;
@@ -286,7 +286,7 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
present = krealloc(rbnode->cache_present,
BITS_TO_LONGS(blklen) * sizeof(*present),
- GFP_KERNEL);
+ map->alloc_flags);
if (!present)
return -ENOMEM;
@@ -320,7 +320,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
const struct regmap_range *range;
int i;
- rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
+ rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags);
if (!rbnode)
return NULL;
@@ -346,13 +346,13 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
}
rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
- GFP_KERNEL);
+ map->alloc_flags);
if (!rbnode->block)
goto err_free;
rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
sizeof(*rbnode->cache_present),
- GFP_KERNEL);
+ map->alloc_flags);
if (!rbnode->cache_present)
goto err_free_block;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 7d3e47436056..c5d151e9c481 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -558,6 +558,29 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
+/**
+ * regcache_reg_cached - Check if a register is cached
+ *
+ * @map: map to check
+ * @reg: register to check
+ *
+ * Reports if a register is cached.
+ */
+bool regcache_reg_cached(struct regmap *map, unsigned int reg)
+{
+ unsigned int val;
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ ret = regcache_read(map, reg, &val);
+
+ map->unlock(map->lock_arg);
+
+ return ret == 0;
+}
+EXPORT_SYMBOL_GPL(regcache_reg_cached);
+
void regcache_set_val(struct regmap *map, void *base, unsigned int idx,
unsigned int val)
{
@@ -587,14 +610,6 @@ void regcache_set_val(struct regmap *map, void *base, unsigned int idx,
cache[idx] = val;
break;
}
-#ifdef CONFIG_64BIT
- case 8: {
- u64 *cache = base;
-
- cache[idx] = val;
- break;
- }
-#endif
default:
BUG();
}
@@ -627,13 +642,6 @@ unsigned int regcache_get_val(struct regmap *map, const void *base,
return cache[idx];
}
-#ifdef CONFIG_64BIT
- case 8: {
- const u64 *cache = base;
-
- return cache[idx];
- }
-#endif
default:
BUG();
}
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 9ff3018a46aa..264d29b3fced 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -836,6 +836,45 @@ static void cache_drop(struct kunit *test)
regmap_exit(map);
}
+static void cache_present(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ int i;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[i] = false;
+
+ /* No defaults so no registers cached. */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i));
+
+ /* We didn't trigger any reads */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_ASSERT_FALSE(test, data->read[i]);
+
+ /* Fill the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+
+ /* Now everything should be cached */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i));
+
+ regmap_exit(map);
+}
+
struct raw_test_types {
const char *name;
@@ -1177,6 +1216,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 8132b5c101c4..99d7fd85ca7d 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -202,15 +202,6 @@ static int regmap_mmio_noinc_write(void *context, unsigned int reg,
writel(swab32(valp[i]), ctx->regs + reg);
goto out_clk;
}
-#ifdef CONFIG_64BIT
- case 8:
- {
- const u64 *valp = (const u64 *)val;
- for (i = 0; i < val_count; i++)
- writeq(swab64(valp[i]), ctx->regs + reg);
- goto out_clk;
- }
-#endif
default:
ret = -EINVAL;
goto out_clk;
@@ -227,11 +218,6 @@ static int regmap_mmio_noinc_write(void *context, unsigned int reg,
case 4:
writesl(ctx->regs + reg, (const u32 *)val, val_count);
break;
-#ifdef CONFIG_64BIT
- case 8:
- writesq(ctx->regs + reg, (const u64 *)val, val_count);
- break;
-#endif
default:
ret = -EINVAL;
break;
@@ -363,11 +349,6 @@ static int regmap_mmio_noinc_read(void *context, unsigned int reg,
case 4:
readsl(ctx->regs + reg, (u32 *)val, val_count);
break;
-#ifdef CONFIG_64BIT
- case 8:
- readsq(ctx->regs + reg, (u64 *)val, val_count);
- break;
-#endif
default:
ret = -EINVAL;
goto out_clk;
@@ -387,11 +368,6 @@ static int regmap_mmio_noinc_read(void *context, unsigned int reg,
case 4:
swab32_array(val, val_count);
break;
-#ifdef CONFIG_64BIT
- case 8:
- swab64_array(val, val_count);
- break;
-#endif
default:
ret = -EINVAL;
break;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 1bfd1727b4da..884cb51c8f67 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -311,26 +311,6 @@ static void regmap_format_32_native(void *buf, unsigned int val,
memcpy(buf, &v, sizeof(v));
}
-#ifdef CONFIG_64BIT
-static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
-{
- put_unaligned_be64((u64) val << shift, buf);
-}
-
-static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
-{
- put_unaligned_le64((u64) val << shift, buf);
-}
-
-static void regmap_format_64_native(void *buf, unsigned int val,
- unsigned int shift)
-{
- u64 v = (u64) val << shift;
-
- memcpy(buf, &v, sizeof(v));
-}
-#endif
-
static void regmap_parse_inplace_noop(void *buf)
{
}
@@ -411,40 +391,6 @@ static unsigned int regmap_parse_32_native(const void *buf)
return v;
}
-#ifdef CONFIG_64BIT
-static unsigned int regmap_parse_64_be(const void *buf)
-{
- return get_unaligned_be64(buf);
-}
-
-static unsigned int regmap_parse_64_le(const void *buf)
-{
- return get_unaligned_le64(buf);
-}
-
-static void regmap_parse_64_be_inplace(void *buf)
-{
- u64 v = get_unaligned_be64(buf);
-
- memcpy(buf, &v, sizeof(v));
-}
-
-static void regmap_parse_64_le_inplace(void *buf)
-{
- u64 v = get_unaligned_le64(buf);
-
- memcpy(buf, &v, sizeof(v));
-}
-
-static unsigned int regmap_parse_64_native(const void *buf)
-{
- u64 v;
-
- memcpy(&v, buf, sizeof(v));
- return v;
-}
-#endif
-
static void regmap_lock_hwlock(void *__map)
{
struct regmap *map = __map;
@@ -1005,24 +951,6 @@ struct regmap *__regmap_init(struct device *dev,
}
break;
-#ifdef CONFIG_64BIT
- case 64:
- switch (reg_endian) {
- case REGMAP_ENDIAN_BIG:
- map->format.format_reg = regmap_format_64_be;
- break;
- case REGMAP_ENDIAN_LITTLE:
- map->format.format_reg = regmap_format_64_le;
- break;
- case REGMAP_ENDIAN_NATIVE:
- map->format.format_reg = regmap_format_64_native;
- break;
- default:
- goto err_hwlock;
- }
- break;
-#endif
-
default:
goto err_hwlock;
}
@@ -1086,28 +1014,6 @@ struct regmap *__regmap_init(struct device *dev,
goto err_hwlock;
}
break;
-#ifdef CONFIG_64BIT
- case 64:
- switch (val_endian) {
- case REGMAP_ENDIAN_BIG:
- map->format.format_val = regmap_format_64_be;
- map->format.parse_val = regmap_parse_64_be;
- map->format.parse_inplace = regmap_parse_64_be_inplace;
- break;
- case REGMAP_ENDIAN_LITTLE:
- map->format.format_val = regmap_format_64_le;
- map->format.parse_val = regmap_parse_64_le;
- map->format.parse_inplace = regmap_parse_64_le_inplace;
- break;
- case REGMAP_ENDIAN_NATIVE:
- map->format.format_val = regmap_format_64_native;
- map->format.parse_val = regmap_parse_64_native;
- break;
- default:
- goto err_hwlock;
- }
- break;
-#endif
}
if (map->format.format_write) {
@@ -2158,9 +2064,6 @@ static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
u8 *u8p;
u16 *u16p;
u32 *u32p;
-#ifdef CONFIG_64BIT
- u64 *u64p;
-#endif
int ret;
int i;
@@ -2180,13 +2083,6 @@ static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
if (write)
lastval = (unsigned int)u32p[val_count - 1];
break;
-#ifdef CONFIG_64BIT
- case 8:
- u64p = val;
- if (write)
- lastval = (unsigned int)u64p[val_count - 1];
- break;
-#endif
default:
return -EINVAL;
}
@@ -2224,11 +2120,6 @@ static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
case 4:
pr_cont("%x", u32p[i]);
break;
-#ifdef CONFIG_64BIT
- case 8:
- pr_cont("%llx", u64p[i]);
- break;
-#endif
default:
break;
}
@@ -2436,11 +2327,6 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
case 4:
ival = *(u32 *)(val + (i * val_bytes));
break;
-#ifdef CONFIG_64BIT
- case 8:
- ival = *(u64 *)(val + (i * val_bytes));
- break;
-#endif
default:
ret = -EINVAL;
goto out;
@@ -3205,9 +3091,6 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
for (i = 0; i < val_count * val_bytes; i += val_bytes)
map->format.parse_inplace(val + i);
} else {
-#ifdef CONFIG_64BIT
- u64 *u64 = val;
-#endif
u32 *u32 = val;
u16 *u16 = val;
u8 *u8 = val;
@@ -3223,11 +3106,6 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
goto out;
switch (map->format.val_bytes) {
-#ifdef CONFIG_64BIT
- case 8:
- u64[i] = ival;
- break;
-#endif
case 4:
u32[i] = ival;
break;
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index e460c9799d9f..2b98114a9fe0 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1547,7 +1547,6 @@ static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
rel_fdc();
return -EBUSY;
}
- fsync_bdev(bdev);
if (fd_motor_on(drive) == 0) {
rel_fdc();
return -ENODEV;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 2db9b186b977..ea4eb88a2e45 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3255,7 +3255,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
if (!disk || ITYPE(drive_state[cnt].fd_device) != type)
continue;
- __invalidate_device(disk->part0, true);
+ disk_force_media_change(disk);
}
mutex_unlock(&open_lock);
} else {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 637c5bda2387..9f2d412fc560 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -603,7 +603,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
goto out_err;
/* and ... switch */
- disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
+ disk_force_media_change(lo->lo_disk);
blk_mq_freeze_queue(lo->lo_queue);
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
lo->lo_backing_file = file;
@@ -1067,7 +1067,7 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
- disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
+ disk_force_media_change(lo->lo_disk);
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
@@ -1171,7 +1171,7 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
if (!release)
blk_mq_unfreeze_queue(lo->lo_queue);
- disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
+ disk_force_media_change(lo->lo_disk);
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
int err;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 8576d696c7a2..42e0159bb258 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1434,12 +1434,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd)
return ret;
}
-static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
- struct block_device *bdev)
+static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
{
+ blk_mark_disk_dead(nbd->disk);
nbd_clear_sock(nbd);
- __invalidate_device(bdev, true);
- nbd_bdev_reset(nbd);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
@@ -1465,7 +1463,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_DISCONNECT:
return nbd_disconnect(nbd);
case NBD_CLEAR_SOCK:
- nbd_clear_sock_ioctl(nbd, bdev);
+ nbd_clear_sock_ioctl(nbd);
return 0;
case NBD_SET_SOCK:
return nbd_add_socket(nbd, arg, false);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index e0b3786ca51b..8de74dcfa18c 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -37,7 +37,7 @@ config HW_RANDOM_TIMERIOMEM
config HW_RANDOM_INTEL
tristate "Intel HW Random Number Generator support"
- depends on (X86 || IA64) && PCI
+ depends on (X86 || IA64 || COMPILE_TEST) && PCI
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -50,7 +50,8 @@ config HW_RANDOM_INTEL
config HW_RANDOM_AMD
tristate "AMD HW Random Number Generator support"
- depends on (X86 || PPC_MAPLE) && PCI
+ depends on (X86 || PPC_MAPLE || COMPILE_TEST)
+ depends on PCI && HAS_IOPORT_MAP
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -63,7 +64,7 @@ config HW_RANDOM_AMD
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
- depends on (ARCH_AT91 || COMPILE_TEST) && HAVE_CLK && OF
+ depends on (ARCH_AT91 || COMPILE_TEST)
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -113,7 +114,8 @@ config HW_RANDOM_IPROC_RNG200
config HW_RANDOM_GEODE
tristate "AMD Geode HW Random Number Generator support"
- depends on X86_32 && PCI
+ depends on (X86_32 || COMPILE_TEST)
+ depends on PCI
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -205,7 +207,7 @@ config HW_RANDOM_OCTEON
config HW_RANDOM_PASEMI
tristate "PA Semi HW Random Number Generator support"
- depends on PPC_PASEMI
+ depends on PPC_PASEMI || (PPC && COMPILE_TEST)
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -228,7 +230,7 @@ config HW_RANDOM_VIRTIO
config HW_RANDOM_MXC_RNGA
tristate "Freescale i.MX RNGA Random Number Generator"
- depends on SOC_IMX31
+ depends on SOC_IMX31 || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -241,7 +243,7 @@ config HW_RANDOM_MXC_RNGA
config HW_RANDOM_IMX_RNGC
tristate "Freescale i.MX RNGC Random Number Generator"
- depends on HAS_IOMEM && HAVE_CLK
+ depends on HAS_IOMEM
depends on SOC_IMX25 || SOC_IMX6SL || SOC_IMX6SLL || SOC_IMX6UL || COMPILE_TEST
default HW_RANDOM
help
@@ -256,8 +258,7 @@ config HW_RANDOM_IMX_RNGC
config HW_RANDOM_INGENIC_RNG
tristate "Ingenic Random Number Generator support"
- depends on HW_RANDOM
- depends on MACH_JZ4780 || MACH_X1000
+ depends on MACH_JZ4780 || MACH_X1000 || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number Generator
@@ -271,8 +272,7 @@ config HW_RANDOM_INGENIC_RNG
config HW_RANDOM_INGENIC_TRNG
tristate "Ingenic True Random Number Generator support"
- depends on HW_RANDOM
- depends on MACH_X1830
+ depends on MACH_X1830 || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the True Random Number Generator
@@ -324,7 +324,7 @@ config HW_RANDOM_POWERNV
config HW_RANDOM_HISI
tristate "Hisilicon Random Number Generator support"
- depends on HW_RANDOM && ARCH_HISI
+ depends on ARCH_HISI || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -348,7 +348,7 @@ config HW_RANDOM_HISTB
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
- depends on HW_RANDOM && (ARCH_STI || COMPILE_TEST)
+ depends on ARCH_STI || COMPILE_TEST
help
This driver provides kernel-side support for the Random Number
Generator hardware found on STi series of SoCs.
@@ -358,7 +358,7 @@ config HW_RANDOM_ST
config HW_RANDOM_XGENE
tristate "APM X-Gene True Random Number Generator (TRNG) support"
- depends on HW_RANDOM && ARCH_XGENE
+ depends on ARCH_XGENE || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -371,7 +371,7 @@ config HW_RANDOM_XGENE
config HW_RANDOM_STM32
tristate "STMicroelectronics STM32 random number generator"
- depends on HW_RANDOM && (ARCH_STM32 || COMPILE_TEST)
+ depends on ARCH_STM32 || COMPILE_TEST
depends on HAS_IOMEM
default HW_RANDOM
help
@@ -385,8 +385,8 @@ config HW_RANDOM_STM32
config HW_RANDOM_PIC32
tristate "Microchip PIC32 Random Number Generator support"
- depends on HW_RANDOM && MACH_PIC32
- default y
+ depends on MACH_PIC32 || COMPILE_TEST
+ default HW_RANDOM if MACH_PIC32
help
This driver provides kernel-side support for the Random Number
Generator hardware found on a PIC32.
@@ -425,7 +425,8 @@ config HW_RANDOM_MESON
config HW_RANDOM_CAVIUM
tristate "Cavium ThunderX Random Number Generator support"
- depends on HW_RANDOM && PCI && ARCH_THUNDER
+ depends on PCI
+ depends on ARCH_THUNDER || (ARM64 && COMPILE_TEST)
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 0555e3838bce..86162a13681e 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/hw_random.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
index e34c3ea692b6..7e954341b09f 100644
--- a/drivers/char/hw_random/arm_smccc_trng.c
+++ b/drivers/char/hw_random/arm_smccc_trng.c
@@ -105,8 +105,6 @@ static int smccc_trng_probe(struct platform_device *pdev)
trng->name = "smccc_trng";
trng->read = smccc_trng_read;
- platform_set_drvdata(pdev, trng);
-
return devm_hwrng_register(&pdev->dev, trng);
}
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index b8effe77d80f..a37367ebcbac 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -15,7 +15,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/hw_random.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c
index 5b7ca0416490..9de7466e6896 100644
--- a/drivers/char/hw_random/ba431-rng.c
+++ b/drivers/char/hw_random/ba431-rng.c
@@ -189,13 +189,9 @@ static int ba431_trng_probe(struct platform_device *pdev)
ba431->rng.cleanup = ba431_trng_cleanup;
ba431->rng.read = ba431_trng_read;
- platform_set_drvdata(pdev, ba431);
-
ret = devm_hwrng_register(&pdev->dev, &ba431->rng);
- if (ret) {
- dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "BA431 registration failed\n");
dev_info(&pdev->dev, "BA431 TRNG registered\n");
@@ -203,7 +199,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
}
static const struct of_device_id ba431_trng_dt_ids[] = {
- { .compatible = "silex-insight,ba431-rng", .data = NULL },
+ { .compatible = "silex-insight,ba431-rng" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ba431_trng_dt_ids);
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index e98fcac578d6..e19b0f9f48b9 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -8,8 +8,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/clk.h>
diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c
index 302ffa354c2f..1abbff04a015 100644
--- a/drivers/char/hw_random/cctrng.c
+++ b/drivers/char/hw_random/cctrng.c
@@ -455,35 +455,6 @@ static void cc_trng_startwork_handler(struct work_struct *w)
cc_trng_hw_trigger(drvdata);
}
-
-static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
-{
- struct clk *clk;
- struct device *dev = &(drvdata->pdev->dev);
- int rc = 0;
-
- clk = devm_clk_get_optional(dev, NULL);
- if (IS_ERR(clk))
- return dev_err_probe(dev, PTR_ERR(clk),
- "Error getting clock\n");
-
- drvdata->clk = clk;
-
- rc = clk_prepare_enable(drvdata->clk);
- if (rc) {
- dev_err(dev, "Failed to enable clock\n");
- return rc;
- }
-
- return 0;
-}
-
-static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
-{
- clk_disable_unprepare(drvdata->clk);
-}
-
-
static int cctrng_probe(struct platform_device *pdev)
{
struct cctrng_drvdata *drvdata;
@@ -492,6 +463,10 @@ static int cctrng_probe(struct platform_device *pdev)
u32 val;
int irq;
+ /* Compile time assertion checks */
+ BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
+ BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
+
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
@@ -510,10 +485,8 @@ static int cctrng_probe(struct platform_device *pdev)
drvdata->circ.buf = (char *)drvdata->data_buf;
drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(drvdata->cc_base)) {
- dev_err(dev, "Failed to ioremap registers");
- return PTR_ERR(drvdata->cc_base);
- }
+ if (IS_ERR(drvdata->cc_base))
+ return dev_err_probe(dev, PTR_ERR(drvdata->cc_base), "Failed to ioremap registers");
/* Then IRQ */
irq = platform_get_irq(pdev, 0);
@@ -522,16 +495,13 @@ static int cctrng_probe(struct platform_device *pdev)
/* parse sampling rate from device tree */
rc = cc_trng_parse_sampling_ratio(drvdata);
- if (rc) {
- dev_err(dev, "Failed to get legal sampling ratio for rosc\n");
- return rc;
- }
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get legal sampling ratio for rosc\n");
- rc = cc_trng_clk_init(drvdata);
- if (rc) {
- dev_err(dev, "cc_trng_clk_init failed\n");
- return rc;
- }
+ drvdata->clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(drvdata->clk))
+ return dev_err_probe(dev, PTR_ERR(drvdata->clk),
+ "Failed to get or enable the clock\n");
INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
@@ -539,10 +509,8 @@ static int cctrng_probe(struct platform_device *pdev)
/* register the driver isr function */
rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
- if (rc) {
- dev_err(dev, "Could not register to interrupt %d\n", irq);
- goto post_clk_err;
- }
+ if (rc)
+ return dev_err_probe(dev, rc, "Could not register to interrupt %d\n", irq);
dev_dbg(dev, "Registered to IRQ: %d\n", irq);
/* Clear all pending interrupts */
@@ -557,17 +525,13 @@ static int cctrng_probe(struct platform_device *pdev)
/* init PM */
rc = cc_trng_pm_init(drvdata);
- if (rc) {
- dev_err(dev, "cc_trng_pm_init failed\n");
- goto post_clk_err;
- }
+ if (rc)
+ return dev_err_probe(dev, rc, "cc_trng_pm_init failed\n");
/* increment device's usage counter */
rc = cc_trng_pm_get(dev);
- if (rc) {
- dev_err(dev, "cc_trng_pm_get returned %x\n", rc);
- goto post_pm_err;
- }
+ if (rc)
+ return dev_err_probe(dev, rc, "cc_trng_pm_get returned %x\n", rc);
/* set pending_hw to verify that HW won't be triggered from read */
atomic_set(&drvdata->pending_hw, 1);
@@ -593,9 +557,6 @@ static int cctrng_probe(struct platform_device *pdev)
post_pm_err:
cc_trng_pm_fini(drvdata);
-post_clk_err:
- cc_trng_clk_fini(drvdata);
-
return rc;
}
@@ -608,8 +569,6 @@ static int cctrng_remove(struct platform_device *pdev)
cc_trng_pm_fini(drvdata);
- cc_trng_clk_fini(drvdata);
-
dev_info(dev, "ARM cctrng device terminated\n");
return 0;
@@ -698,21 +657,7 @@ static struct platform_driver cctrng_driver = {
.remove = cctrng_remove,
};
-static int __init cctrng_mod_init(void)
-{
- /* Compile time assertion checks */
- BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
- BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
-
- return platform_driver_register(&cctrng_driver);
-}
-module_init(cctrng_mod_init);
-
-static void __exit cctrng_mod_exit(void)
-{
- platform_driver_unregister(&cctrng_driver);
-}
-module_exit(cctrng_mod_exit);
+module_platform_driver(cctrng_driver);
/* Module description */
MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
index 0cd7e1a8e499..31935316a160 100644
--- a/drivers/char/hw_random/cn10k-rng.c
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -187,10 +187,8 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, rng);
rng->reg_base = pcim_iomap(pdev, 0, 0);
- if (!rng->reg_base) {
- dev_err(&pdev->dev, "Error while mapping CSRs, exiting\n");
- return -ENOMEM;
- }
+ if (!rng->reg_base)
+ return dev_err_probe(&pdev->dev, -ENOMEM, "Error while mapping CSRs, exiting\n");
rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"cn10k-rng-%s", dev_name(&pdev->dev));
@@ -205,19 +203,12 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
reset_rng_health_state(rng);
err = devm_hwrng_register(&pdev->dev, &rng->ops);
- if (err) {
- dev_err(&pdev->dev, "Could not register hwrng device.\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Could not register hwrng device.\n");
return 0;
}
-static void cn10k_rng_remove(struct pci_dev *pdev)
-{
- /* Nothing to do */
-}
-
static const struct pci_device_id cn10k_rng_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA098) }, /* RNG PF */
{0,},
@@ -229,7 +220,6 @@ static struct pci_driver cn10k_rng_driver = {
.name = "cn10k_rng",
.id_table = cn10k_rng_id_table,
.probe = cn10k_rng_probe,
- .remove = cn10k_rng_remove,
};
module_pci_driver(cn10k_rng_driver);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index f34d356fe2c0..e3598ec9cfca 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -15,14 +15,13 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
-#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
-#include <linux/sched/signal.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
index 9cc3d542dd0f..30207b7ac5f4 100644
--- a/drivers/char/hw_random/exynos-trng.c
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -185,14 +185,14 @@ static int exynos_trng_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused exynos_trng_suspend(struct device *dev)
+static int exynos_trng_suspend(struct device *dev)
{
pm_runtime_put_sync(dev);
return 0;
}
-static int __maybe_unused exynos_trng_resume(struct device *dev)
+static int exynos_trng_resume(struct device *dev)
{
int ret;
@@ -205,7 +205,7 @@ static int __maybe_unused exynos_trng_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
+static DEFINE_SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
exynos_trng_resume);
static const struct of_device_id exynos_trng_dt_match[] = {
@@ -219,7 +219,7 @@ MODULE_DEVICE_TABLE(of, exynos_trng_dt_match);
static struct platform_driver exynos_trng_driver = {
.driver = {
.name = "exynos-trng",
- .pm = &exynos_trng_pm_ops,
+ .pm = pm_sleep_ptr(&exynos_trng_pm_ops),
.of_match_table = exynos_trng_dt_match,
},
.probe = exynos_trng_probe,
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index bf07f17f78c8..e4b385b01b11 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -239,10 +239,8 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
return PTR_ERR(rngc->base);
rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
- if (IS_ERR(rngc->clk)) {
- dev_err(&pdev->dev, "Can not get rng_clk\n");
- return PTR_ERR(rngc->clk);
- }
+ if (IS_ERR(rngc->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n");
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -272,24 +270,18 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev,
irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
- if (ret) {
- dev_err(rngc->dev, "Can't get interrupt working.\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n");
if (self_test) {
ret = imx_rngc_self_test(rngc);
- if (ret) {
- dev_err(rngc->dev, "self test failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "self test failed\n");
}
ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
- if (ret) {
- dev_err(&pdev->dev, "hwrng registration failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n");
dev_info(&pdev->dev,
"Freescale RNG%c registered (HW revision %d.%02d)\n",
diff --git a/drivers/char/hw_random/ingenic-rng.c b/drivers/char/hw_random/ingenic-rng.c
index 055cfe59f519..4f18c3fa5427 100644
--- a/drivers/char/hw_random/ingenic-rng.c
+++ b/drivers/char/hw_random/ingenic-rng.c
@@ -95,7 +95,7 @@ static int ingenic_rng_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
}
- priv->version = (enum ingenic_rng_version)of_device_get_match_data(&pdev->dev);
+ priv->version = (enum ingenic_rng_version)(uintptr_t)of_device_get_match_data(&pdev->dev);
priv->rng.name = pdev->name;
priv->rng.init = ingenic_rng_init;
diff --git a/drivers/char/hw_random/ingenic-trng.c b/drivers/char/hw_random/ingenic-trng.c
index 0eb80f786f4d..1672320e7d3d 100644
--- a/drivers/char/hw_random/ingenic-trng.c
+++ b/drivers/char/hw_random/ingenic-trng.c
@@ -11,8 +11,8 @@
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -22,8 +22,6 @@
#define TRNG_REG_STATUS_OFFSET 0x08
/* bits within the CFG register */
-#define CFG_RDY_CLR BIT(12)
-#define CFG_INT_MASK BIT(11)
#define CFG_GEN_EN BIT(0)
/* bits within the STATUS register */
@@ -31,7 +29,6 @@
struct ingenic_trng {
void __iomem *base;
- struct clk *clk;
struct hwrng rng;
};
@@ -79,6 +76,7 @@ static int ingenic_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait
static int ingenic_trng_probe(struct platform_device *pdev)
{
struct ingenic_trng *trng;
+ struct clk *clk;
int ret;
trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
@@ -86,60 +84,28 @@ static int ingenic_trng_probe(struct platform_device *pdev)
return -ENOMEM;
trng->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(trng->base)) {
- pr_err("%s: Failed to map DTRNG registers\n", __func__);
- ret = PTR_ERR(trng->base);
- return PTR_ERR(trng->base);
- }
+ if (IS_ERR(trng->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->base),
+ "%s: Failed to map DTRNG registers\n", __func__);
- trng->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(trng->clk)) {
- ret = PTR_ERR(trng->clk);
- pr_crit("%s: Cannot get DTRNG clock\n", __func__);
- return PTR_ERR(trng->clk);
- }
-
- ret = clk_prepare_enable(trng->clk);
- if (ret) {
- pr_crit("%s: Unable to enable DTRNG clock\n", __func__);
- return ret;
- }
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "%s: Cannot get and enable DTRNG clock\n", __func__);
trng->rng.name = pdev->name;
trng->rng.init = ingenic_trng_init;
trng->rng.cleanup = ingenic_trng_cleanup;
trng->rng.read = ingenic_trng_read;
- ret = hwrng_register(&trng->rng);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register hwrng\n");
- goto err_unprepare_clk;
- }
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register hwrng\n");
platform_set_drvdata(pdev, trng);
dev_info(&pdev->dev, "Ingenic DTRNG driver registered\n");
return 0;
-
-err_unprepare_clk:
- clk_disable_unprepare(trng->clk);
- return ret;
-}
-
-static int ingenic_trng_remove(struct platform_device *pdev)
-{
- struct ingenic_trng *trng = platform_get_drvdata(pdev);
- unsigned int ctrl;
-
- hwrng_unregister(&trng->rng);
-
- ctrl = readl(trng->base + TRNG_REG_CFG_OFFSET);
- ctrl &= ~CFG_GEN_EN;
- writel(ctrl, trng->base + TRNG_REG_CFG_OFFSET);
-
- clk_disable_unprepare(trng->clk);
-
- return 0;
}
static const struct of_device_id ingenic_trng_of_match[] = {
@@ -150,7 +116,6 @@ MODULE_DEVICE_TABLE(of, ingenic_trng_of_match);
static struct platform_driver ingenic_trng_driver = {
.probe = ingenic_trng_probe,
- .remove = ingenic_trng_remove,
.driver = {
.name = "ingenic-trng",
.of_match_table = ingenic_trng_of_match,
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index 06bc060534d8..440fe28bddc0 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -12,8 +12,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -182,6 +181,8 @@ static int iproc_rng200_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
}
+ dev_set_drvdata(dev, priv);
+
priv->rng.name = "iproc-rng200";
priv->rng.read = iproc_rng200_read;
priv->rng.init = iproc_rng200_init;
@@ -199,6 +200,28 @@ static int iproc_rng200_probe(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused iproc_rng200_suspend(struct device *dev)
+{
+ struct iproc_rng200_dev *priv = dev_get_drvdata(dev);
+
+ iproc_rng200_cleanup(&priv->rng);
+
+ return 0;
+}
+
+static int __maybe_unused iproc_rng200_resume(struct device *dev)
+{
+ struct iproc_rng200_dev *priv = dev_get_drvdata(dev);
+
+ iproc_rng200_init(&priv->rng);
+
+ return 0;
+}
+
+static const struct dev_pm_ops iproc_rng200_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(iproc_rng200_suspend, iproc_rng200_resume)
+};
+
static const struct of_device_id iproc_rng200_of_match[] = {
{ .compatible = "brcm,bcm2711-rng200", },
{ .compatible = "brcm,bcm7211-rng200", },
@@ -212,6 +235,7 @@ static struct platform_driver iproc_rng200_driver = {
.driver = {
.name = "iproc-rng200",
.of_match_table = iproc_rng200_of_match,
+ .pm = &iproc_rng200_pm_ops,
},
.probe = iproc_rng200_probe,
};
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index e8f9621e7954..8c6a40d6ce3d 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -13,8 +13,6 @@
#include <linux/clk.h>
#include <linux/err.h>
-static struct clk *rng_clk;
-
static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
void __iomem *base = (void __iomem *)rng->priv;
@@ -36,21 +34,17 @@ static struct hwrng nmk_rng = {
static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
{
+ struct clk *rng_clk;
void __iomem *base;
int ret;
- rng_clk = devm_clk_get(&dev->dev, NULL);
- if (IS_ERR(rng_clk)) {
- dev_err(&dev->dev, "could not get rng clock\n");
- ret = PTR_ERR(rng_clk);
- return ret;
- }
-
- clk_prepare_enable(rng_clk);
+ rng_clk = devm_clk_get_enabled(&dev->dev, NULL);
+ if (IS_ERR(rng_clk))
+ return dev_err_probe(&dev->dev, PTR_ERR(rng_clk), "could not get rng clock\n");
ret = amba_request_regions(dev, dev->dev.init_name);
if (ret)
- goto out_clk;
+ return ret;
ret = -ENOMEM;
base = devm_ioremap(&dev->dev, dev->res.start,
resource_size(&dev->res));
@@ -64,15 +58,12 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
out_release:
amba_release_regions(dev);
-out_clk:
- clk_disable_unprepare(rng_clk);
return ret;
}
static void nmk_rng_remove(struct amba_device *dev)
{
amba_release_regions(dev);
- clk_disable_unprepare(rng_clk);
}
static const struct amba_id nmk_rng_ids[] = {
diff --git a/drivers/char/hw_random/npcm-rng.c b/drivers/char/hw_random/npcm-rng.c
index 9903d0357e06..8a304b754217 100644
--- a/drivers/char/hw_random/npcm-rng.c
+++ b/drivers/char/hw_random/npcm-rng.c
@@ -8,12 +8,11 @@
#include <linux/init.h>
#include <linux/random.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
-#include <linux/of_irq.h>
#include <linux/pm_runtime.h>
-#include <linux/of_device.h>
#define NPCM_RNGCS_REG 0x00 /* Control and status register */
#define NPCM_RNGD_REG 0x04 /* Data register */
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 00ff96703dd2..be03f76a2a80 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -26,8 +26,6 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/io.h>
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index f06e4f95114f..18dc46b1b58e 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -20,7 +20,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 2498d4ef9fe2..6959d6edd44c 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -9,11 +9,10 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/io.h>
#define SDCRNG_CTL_REG 0x00
diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c
index 99c8bd0859a1..888e6f5cec1f 100644
--- a/drivers/char/hw_random/pic32-rng.c
+++ b/drivers/char/hw_random/pic32-rng.c
@@ -12,31 +12,22 @@
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define RNGCON 0x04
-#define TRNGEN BIT(8)
-#define PRNGEN BIT(9)
-#define PRNGCONT BIT(10)
-#define TRNGMOD BIT(11)
-#define SEEDLOAD BIT(12)
-#define RNGPOLY1 0x08
-#define RNGPOLY2 0x0C
-#define RNGNUMGEN1 0x10
-#define RNGNUMGEN2 0x14
+#define TRNGEN BIT(8)
+#define TRNGMOD BIT(11)
#define RNGSEED1 0x18
#define RNGSEED2 0x1C
#define RNGRCNT 0x20
-#define RCNT_MASK 0x7F
+#define RCNT_MASK 0x7F
struct pic32_rng {
void __iomem *base;
struct hwrng rng;
- struct clk *clk;
};
/*
@@ -46,6 +37,15 @@ struct pic32_rng {
*/
#define RNG_TIMEOUT 500
+static int pic32_rng_init(struct hwrng *rng)
+{
+ struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng);
+
+ /* enable TRNG in enhanced mode */
+ writel(TRNGEN | TRNGMOD, priv->base + RNGCON);
+ return 0;
+}
+
static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
@@ -67,11 +67,17 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
return -EIO;
}
+static void pic32_rng_cleanup(struct hwrng *rng)
+{
+ struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng);
+
+ writel(0, priv->base + RNGCON);
+}
+
static int pic32_rng_probe(struct platform_device *pdev)
{
struct pic32_rng *priv;
- u32 v;
- int ret;
+ struct clk *clk;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -81,41 +87,16 @@ static int pic32_rng_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
- /* enable TRNG in enhanced mode */
- v = TRNGEN | TRNGMOD;
- writel(v, priv->base + RNGCON);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
priv->rng.name = pdev->name;
+ priv->rng.init = pic32_rng_init;
priv->rng.read = pic32_rng_read;
+ priv->rng.cleanup = pic32_rng_cleanup;
- ret = devm_hwrng_register(&pdev->dev, &priv->rng);
- if (ret)
- goto err_register;
-
- platform_set_drvdata(pdev, priv);
-
- return 0;
-
-err_register:
- clk_disable_unprepare(priv->clk);
- return ret;
-}
-
-static int pic32_rng_remove(struct platform_device *pdev)
-{
- struct pic32_rng *rng = platform_get_drvdata(pdev);
-
- writel(0, rng->base + RNGCON);
- clk_disable_unprepare(rng->clk);
- return 0;
+ return devm_hwrng_register(&pdev->dev, &priv->rng);
}
static const struct of_device_id pic32_rng_of_match[] __maybe_unused = {
@@ -126,10 +107,9 @@ MODULE_DEVICE_TABLE(of, pic32_rng_of_match);
static struct platform_driver pic32_rng_driver = {
.probe = pic32_rng_probe,
- .remove = pic32_rng_remove,
.driver = {
.name = "pic32-rng",
- .of_match_table = of_match_ptr(pic32_rng_of_match),
+ .of_match_table = pic32_rng_of_match,
},
};
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index a6731cf0627a..efb6a9f9a11b 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -10,8 +10,9 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index 26f322d19a88..3db9d868efb1 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -113,16 +113,6 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
- if (res->start % 4 != 0 || resource_size(res) < 4) {
- dev_err(&pdev->dev,
- "address must be at least four bytes wide and 32-bit aligned\n");
- return -EINVAL;
- }
-
/* Allocate memory for the device structure (and zero it) */
priv = devm_kzalloc(&pdev->dev,
sizeof(struct timeriomem_rng_private), GFP_KERNEL);
@@ -131,6 +121,16 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ priv->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->io_base))
+ return PTR_ERR(priv->io_base);
+
+ if (res->start % 4 != 0 || resource_size(res) < 4) {
+ dev_err(&pdev->dev,
+ "address must be at least four bytes wide and 32-bit aligned\n");
+ return -EINVAL;
+ }
+
if (pdev->dev.of_node) {
int i;
@@ -158,11 +158,6 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->rng_ops.name = dev_name(&pdev->dev);
priv->rng_ops.read = timeriomem_rng_read;
- priv->io_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->io_base)) {
- return PTR_ERR(priv->io_base);
- }
-
/* Assume random data is already available. */
priv->present = 1;
complete(&priv->completion);
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index 7c8f3cb7c6af..99f4e86ac3e9 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -14,10 +14,10 @@
#include <linux/hw_random.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <linux/timer.h>
#define RNG_MAX_DATUM 4
diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c
index 2a9fea72b2e0..2c586d1fe8a9 100644
--- a/drivers/char/hw_random/xiphera-trng.c
+++ b/drivers/char/hw_random/xiphera-trng.c
@@ -7,7 +7,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/hw_random.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c
index 8aa9057601d6..12ee42a31c71 100644
--- a/drivers/char/tpm/eventlog/tpm1.c
+++ b/drivers/char/tpm/eventlog/tpm1.c
@@ -251,7 +251,6 @@ static int tpm1_binary_bios_measurements_show(struct seq_file *m, void *v)
static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v)
{
- int len = 0;
char *eventname;
struct tcpa_event *event = v;
unsigned char *event_entry =
@@ -273,7 +272,7 @@ static int tpm1_ascii_bios_measurements_show(struct seq_file *m, void *v)
/* 3rd: event type identifier */
seq_printf(m, " %02x", do_endian_conversion(event->event_type));
- len += get_event_name(eventname, event, event_entry);
+ get_event_name(eventname, event, event_entry);
/* 4th: eventname <= max + \'0' delimiter */
seq_printf(m, " %s\n", eventname);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 7fa3d91042b2..077fdb73740c 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -27,7 +27,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/kernel.h>
-#include <linux/dmi.h>
#include "tpm.h"
#include "tpm_tis_core.h"
@@ -89,8 +88,8 @@ static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr)
tpm_tis_flush(iobase);
}
-static int interrupts;
-module_param(interrupts, int, 0444);
+static bool interrupts;
+module_param(interrupts, bool, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
static bool itpm;
@@ -103,92 +102,6 @@ module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
#endif
-static int tpm_tis_disable_irq(const struct dmi_system_id *d)
-{
- if (interrupts == -1) {
- pr_notice("tpm_tis: %s detected: disabling interrupts.\n", d->ident);
- interrupts = 0;
- }
-
- return 0;
-}
-
-static const struct dmi_system_id tpm_tis_dmi_table[] = {
- {
- .callback = tpm_tis_disable_irq,
- .ident = "Framework Laptop (12th Gen Intel Core)",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Laptop (12th Gen Intel Core)"),
- },
- },
- {
- .callback = tpm_tis_disable_irq,
- .ident = "Framework Laptop (13th Gen Intel Core)",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Laptop (13th Gen Intel Core)"),
- },
- },
- {
- .callback = tpm_tis_disable_irq,
- .ident = "ThinkPad T490s",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
- },
- },
- {
- .callback = tpm_tis_disable_irq,
- .ident = "ThinkStation P360 Tiny",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P360 Tiny"),
- },
- },
- {
- .callback = tpm_tis_disable_irq,
- .ident = "ThinkPad L490",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"),
- },
- },
- {
- .callback = tpm_tis_disable_irq,
- .ident = "ThinkPad L590",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L590"),
- },
- },
- {
- .callback = tpm_tis_disable_irq,
- .ident = "ThinkStation P620",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P620"),
- },
- },
- {
- .callback = tpm_tis_disable_irq,
- .ident = "TUXEDO InfinityBook S 15/17 Gen7",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TUXEDO InfinityBook S 15/17 Gen7"),
- },
- },
- {
- .callback = tpm_tis_disable_irq,
- .ident = "UPX-TGL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
- DMI_MATCH(DMI_PRODUCT_NAME, "UPX-TGL01"),
- },
- },
- {}
-};
-
#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int has_hid(struct acpi_device *dev, const char *hid)
{
@@ -312,8 +225,6 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
int irq = -1;
int rc;
- dmi_check_system(tpm_tis_dmi_table);
-
rc = check_acpi_tpm2(dev);
if (rc)
return rc;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index b95963095729..1b350412d8a6 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -340,7 +340,7 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
return size;
}
-static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+static int tpm_tis_try_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int size = 0;
@@ -348,11 +348,6 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
u32 expected;
int rc;
- if (count < TPM_HEADER_SIZE) {
- size = -EIO;
- goto out;
- }
-
size = recv_data(chip, buf, TPM_HEADER_SIZE);
/* read first 10 bytes, including tag, paramsize, and result */
if (size < TPM_HEADER_SIZE) {
@@ -385,7 +380,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
status = tpm_tis_status(chip);
- if (status & TPM_STS_DATA_AVAIL) { /* retry? */
+ if (status & TPM_STS_DATA_AVAIL) {
dev_err(&chip->dev, "Error left over data\n");
size = -EIO;
goto out;
@@ -399,10 +394,36 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
}
out:
- tpm_tis_ready(chip);
return size;
}
+static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ unsigned int try;
+ int rc = 0;
+
+ if (count < TPM_HEADER_SIZE)
+ return -EIO;
+
+ for (try = 0; try < TPM_RETRY; try++) {
+ rc = tpm_tis_try_recv(chip, buf, count);
+
+ if (rc == -EIO)
+ /* Data transfer errors, indicated by EIO, can be
+ * recovered by rereading the response.
+ */
+ tpm_tis_write8(priv, TPM_STS(priv->locality),
+ TPM_STS_RESPONSE_RETRY);
+ else
+ break;
+ }
+
+ tpm_tis_ready(chip);
+
+ return rc;
+}
+
/*
* If interrupts are used (signaled by an irq set in the vendor structure)
* tpm.c can skip polling for the data to be available as the interrupt is
@@ -469,6 +490,12 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
goto out_err;
}
+ rc = tpm_tis_verify_crc(priv, len, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for command.\n");
+ goto out_err;
+ }
+
return 0;
out_err:
@@ -512,15 +539,16 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
int rc;
u32 ordinal;
unsigned long dur;
+ unsigned int try;
- rc = tpm_tis_send_data(chip, buf, len);
- if (rc < 0)
- return rc;
-
- rc = tpm_tis_verify_crc(priv, len, buf);
- if (rc < 0) {
- dev_err(&chip->dev, "CRC mismatch for command.\n");
- return rc;
+ for (try = 0; try < TPM_RETRY; try++) {
+ rc = tpm_tis_send_data(chip, buf, len);
+ if (rc >= 0)
+ /* Data transfer done successfully */
+ break;
+ else if (rc != -EIO)
+ /* Data transfer failed, not recoverable */
+ return rc;
}
/* go and do it */
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index b1a169d7d1ca..13e99cf65efe 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -34,6 +34,7 @@ enum tis_status {
TPM_STS_GO = 0x20,
TPM_STS_DATA_AVAIL = 0x10,
TPM_STS_DATA_EXPECT = 0x08,
+ TPM_STS_RESPONSE_RETRY = 0x02,
TPM_STS_READ_ZERO = 0x23, /* bits that must be zero on read */
};
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index 9bfaba092a06..a62f5c7f38d3 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -71,8 +71,74 @@ static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
return 0;
}
-int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *in, const u8 *out)
+/*
+ * Half duplex controller with support for TPM wait state detection like
+ * Tegra QSPI need CMD, ADDR & DATA sent in single message to manage HW flow
+ * control. Each phase sent in different transfer for controller to idenity
+ * phase.
+ */
+static int tpm_tis_spi_transfer_half(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct spi_transfer spi_xfer[3];
+ struct spi_message m;
+ u8 transfer_len;
+ int ret;
+
+ while (len) {
+ transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
+
+ spi_message_init(&m);
+ phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
+ phy->iobuf[1] = 0xd4;
+ phy->iobuf[2] = addr >> 8;
+ phy->iobuf[3] = addr;
+
+ memset(&spi_xfer, 0, sizeof(spi_xfer));
+
+ spi_xfer[0].tx_buf = phy->iobuf;
+ spi_xfer[0].len = 1;
+ spi_message_add_tail(&spi_xfer[0], &m);
+
+ spi_xfer[1].tx_buf = phy->iobuf + 1;
+ spi_xfer[1].len = 3;
+ spi_message_add_tail(&spi_xfer[1], &m);
+
+ if (out) {
+ spi_xfer[2].tx_buf = &phy->iobuf[4];
+ spi_xfer[2].rx_buf = NULL;
+ memcpy(&phy->iobuf[4], out, transfer_len);
+ out += transfer_len;
+ }
+
+ if (in) {
+ spi_xfer[2].tx_buf = NULL;
+ spi_xfer[2].rx_buf = &phy->iobuf[4];
+ }
+
+ spi_xfer[2].len = transfer_len;
+ spi_message_add_tail(&spi_xfer[2], &m);
+
+ reinit_completion(&phy->ready);
+
+ ret = spi_sync(phy->spi_device, &m);
+ if (ret < 0)
+ return ret;
+
+ if (in) {
+ memcpy(in, &phy->iobuf[4], transfer_len);
+ in += transfer_len;
+ }
+
+ len -= transfer_len;
+ }
+
+ return ret;
+}
+
+static int tpm_tis_spi_transfer_full(struct tpm_tis_data *data, u32 addr,
+ u16 len, u8 *in, const u8 *out)
{
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0;
@@ -148,6 +214,24 @@ exit:
return ret;
}
+int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *in, const u8 *out)
+{
+ struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
+ struct spi_controller *ctlr = phy->spi_device->controller;
+
+ /*
+ * TPM flow control over SPI requires full duplex support.
+ * Send entire message to a half duplex controller to handle
+ * wait polling in controller.
+ * Set TPM HW flow control flag..
+ */
+ if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
+ return tpm_tis_spi_transfer_half(data, addr, len, in, out);
+ else
+ return tpm_tis_spi_transfer_full(data, addr, len, in, out);
+}
+
static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
{
@@ -189,6 +273,9 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
phy->flow_control = tpm_tis_spi_flow_control;
+ if (dev->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)
+ dev->mode |= SPI_TPM_HW_FLOW;
+
/* If the SPI device has an IRQ then use that */
if (dev->irq > 0)
irq = dev->irq;
diff --git a/drivers/char/tpm/tpm_tis_synquacer.c b/drivers/char/tpm/tpm_tis_synquacer.c
index 49278746b0e2..f7d5e76b505e 100644
--- a/drivers/char/tpm/tpm_tis_synquacer.c
+++ b/drivers/char/tpm/tpm_tis_synquacer.c
@@ -162,23 +162,7 @@ static struct platform_driver tis_synquacer_drv = {
},
};
-static int __init tpm_tis_synquacer_module_init(void)
-{
- int rc;
-
- rc = platform_driver_register(&tis_synquacer_drv);
- if (rc)
- return rc;
-
- return 0;
-}
-
-static void __exit tpm_tis_synquacer_module_exit(void)
-{
- platform_driver_unregister(&tis_synquacer_drv);
-}
+module_platform_driver(tis_synquacer_drv);
-module_init(tpm_tis_synquacer_module_init);
-module_exit(tpm_tis_synquacer_module_exit);
MODULE_DESCRIPTION("TPM MMIO Driver for Socionext SynQuacer platform");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 4fb4fd4b06bd..737aa70e2cb3 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -205,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put);
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
- struct clk **ptr, *clk;
+ struct devm_clk_state *state;
+ struct clk *clk;
- ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
+ state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
+ if (!state)
return ERR_PTR(-ENOMEM);
clk = of_clk_get_by_name(np, con_id);
if (!IS_ERR(clk)) {
- *ptr = clk;
- devres_add(dev, ptr);
+ state->clk = clk;
+ devres_add(dev, state);
} else {
- devres_free(ptr);
+ devres_free(state);
}
return clk;
diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c
index d33f74119488..935d9a2d8c2b 100644
--- a/drivers/clk/keystone/syscon-clk.c
+++ b/drivers/clk/keystone/syscon-clk.c
@@ -151,8 +151,10 @@ static int ti_syscon_gate_clk_probe(struct platform_device *pdev)
data[i].name);
}
- return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
- hw_data);
+ if (num_clks == 1)
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ hw_data->hws[0]);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data);
}
#define TI_SYSCON_CLK_GATE(_name, _offset, _bit_idx) \
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index b2f05d27167e..37f1cdf46d29 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -1011,22 +1011,20 @@ static int __init acpi_cpufreq_probe(struct platform_device *pdev)
return ret;
}
-static int acpi_cpufreq_remove(struct platform_device *pdev)
+static void acpi_cpufreq_remove(struct platform_device *pdev)
{
pr_debug("%s\n", __func__);
cpufreq_unregister_driver(&acpi_cpufreq_driver);
free_acpi_perf_data();
-
- return 0;
}
static struct platform_driver acpi_cpufreq_platdrv = {
.driver = {
.name = "acpi-cpufreq",
},
- .remove = acpi_cpufreq_remove,
+ .remove_new = acpi_cpufreq_remove,
};
static int __init acpi_cpufreq_init(void)
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index 7f3fe2048981..f04ae67dda37 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -64,27 +64,9 @@ static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = {
static bool get_shared_mem(void)
{
bool result = false;
- char path[] = "/sys/module/amd_pstate/parameters/shared_mem";
- char buf[5] = {0};
- struct file *filp = NULL;
- loff_t pos = 0;
- ssize_t ret;
-
- if (!boot_cpu_has(X86_FEATURE_CPPC)) {
- filp = filp_open(path, O_RDONLY, 0);
- if (IS_ERR(filp))
- pr_err("%s unable to open %s file!\n", __func__, path);
- else {
- ret = kernel_read(filp, &buf, sizeof(buf), &pos);
- if (ret < 0)
- pr_err("%s read %s file fail ret=%ld!\n",
- __func__, path, (long)ret);
- filp_close(filp, NULL);
- }
- if ('Y' == *buf)
- result = true;
- }
+ if (!boot_cpu_has(X86_FEATURE_CPPC))
+ result = true;
return result;
}
@@ -145,8 +127,6 @@ static void amd_pstate_ut_check_perf(u32 index)
struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata = NULL;
- highest_perf = amd_get_highest_perf();
-
for_each_possible_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
if (!policy)
@@ -158,9 +138,10 @@ static void amd_pstate_ut_check_perf(u32 index)
if (ret) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret);
- return;
+ goto skip_test;
}
+ highest_perf = cppc_perf.highest_perf;
nominal_perf = cppc_perf.nominal_perf;
lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
lowest_perf = cppc_perf.lowest_perf;
@@ -169,9 +150,10 @@ static void amd_pstate_ut_check_perf(u32 index)
if (ret) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret);
- return;
+ goto skip_test;
}
+ highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1);
lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1);
lowest_perf = AMD_CPPC_LOWEST_PERF(cap1);
@@ -187,7 +169,7 @@ static void amd_pstate_ut_check_perf(u32 index)
nominal_perf, cpudata->nominal_perf,
lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf,
lowest_perf, cpudata->lowest_perf);
- return;
+ goto skip_test;
}
if (!((highest_perf >= nominal_perf) &&
@@ -198,11 +180,15 @@ static void amd_pstate_ut_check_perf(u32 index)
pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n",
__func__, cpu, highest_perf, nominal_perf,
lowest_nonlinear_perf, lowest_perf);
- return;
+ goto skip_test;
}
+ cpufreq_cpu_put(policy);
}
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ return;
+skip_test:
+ cpufreq_cpu_put(policy);
}
/*
@@ -230,14 +216,14 @@ static void amd_pstate_ut_check_freq(u32 index)
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
__func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
cpudata->lowest_nonlinear_freq, cpudata->min_freq);
- return;
+ goto skip_test;
}
if (cpudata->min_freq != policy->min) {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n",
__func__, cpu, cpudata->min_freq, policy->min);
- return;
+ goto skip_test;
}
if (cpudata->boost_supported) {
@@ -249,16 +235,20 @@ static void amd_pstate_ut_check_freq(u32 index)
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
__func__, cpu, policy->max, cpudata->max_freq,
cpudata->nominal_freq);
- return;
+ goto skip_test;
}
} else {
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
pr_err("%s cpu%d must support boost!\n", __func__, cpu);
- return;
+ goto skip_test;
}
+ cpufreq_cpu_put(policy);
}
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
+ return;
+skip_test:
+ cpufreq_cpu_put(policy);
}
static int __init amd_pstate_ut_init(void)
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index b74289a95a17..bea41ccabf1f 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -14,10 +14,8 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regmap.h>
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index ffea6402189d..35fb3a559ea9 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -434,7 +434,11 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
if (ret)
return ERR_PTR(ret);
- table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1, sizeof(*table),
+ /*
+ * We allocate space for the 5 different P-STATES AVS,
+ * plus extra space for a terminating element.
+ */
+ table = devm_kcalloc(dev, AVS_PSTATE_MAX + 1 + 1, sizeof(*table),
GFP_KERNEL);
if (!table)
return ERR_PTR(-ENOMEM);
@@ -749,13 +753,11 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
return ret;
}
-static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
+static void brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&brcm_avs_driver);
brcm_avs_prepare_uninit(pdev);
-
- return 0;
}
static const struct of_device_id brcm_avs_cpufreq_match[] = {
@@ -770,7 +772,7 @@ static struct platform_driver brcm_avs_cpufreq_platdrv = {
.of_match_table = brcm_avs_cpufreq_match,
},
.probe = brcm_avs_cpufreq_probe,
- .remove = brcm_avs_cpufreq_remove,
+ .remove_new = brcm_avs_cpufreq_remove,
};
module_platform_driver(brcm_avs_cpufreq_platdrv);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 022e3555407c..fe08ca419b3d 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -249,15 +249,19 @@ static void __init cppc_freq_invariance_init(void)
return;
kworker_fie = kthread_create_worker(0, "cppc_fie");
- if (IS_ERR(kworker_fie))
+ if (IS_ERR(kworker_fie)) {
+ pr_warn("%s: failed to create kworker_fie: %ld\n", __func__,
+ PTR_ERR(kworker_fie));
+ fie_disabled = FIE_DISABLED;
return;
+ }
ret = sched_setattr_nocheck(kworker_fie->task, &attr);
if (ret) {
pr_warn("%s: failed to set SCHED_DEADLINE: %d\n", __func__,
ret);
kthread_destroy_worker(kworker_fie);
- return;
+ fie_disabled = FIE_DISABLED;
}
}
@@ -267,7 +271,6 @@ static void cppc_freq_invariance_exit(void)
return;
kthread_destroy_worker(kworker_fie);
- kworker_fie = NULL;
}
#else
@@ -849,13 +852,13 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
if (ret)
- return ret;
+ return 0;
udelay(2); /* 2usec delay between sampling */
ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
if (ret)
- return ret;
+ return 0;
delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
&fb_ctrs_t1);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index e2b20080de3a..fb2875ce1fdd 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -143,14 +143,19 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
+ { .compatible = "qcom,msm8998", },
+ { .compatible = "qcom,qcm2290", },
{ .compatible = "qcom,qcs404", },
+ { .compatible = "qcom,qdu1000", },
{ .compatible = "qcom,sa8155p" },
{ .compatible = "qcom,sa8540p" },
+ { .compatible = "qcom,sa8775p" },
{ .compatible = "qcom,sc7180", },
{ .compatible = "qcom,sc7280", },
{ .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sc8280xp", },
{ .compatible = "qcom,sdm845", },
+ { .compatible = "qcom,sdx75", },
{ .compatible = "qcom,sm6115", },
{ .compatible = "qcom,sm6350", },
{ .compatible = "qcom,sm6375", },
@@ -158,6 +163,8 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,sm8150", },
{ .compatible = "qcom,sm8250", },
{ .compatible = "qcom,sm8350", },
+ { .compatible = "qcom,sm8450", },
+ { .compatible = "qcom,sm8550", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 4aec4b2a5225..8bd6e5e8f121 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -349,11 +349,10 @@ err:
return ret;
}
-static int dt_cpufreq_remove(struct platform_device *pdev)
+static void dt_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&dt_cpufreq_driver);
dt_cpufreq_release();
- return 0;
}
static struct platform_driver dt_cpufreq_platdrv = {
@@ -361,7 +360,7 @@ static struct platform_driver dt_cpufreq_platdrv = {
.name = "cpufreq-dt",
},
.probe = dt_cpufreq_probe,
- .remove = dt_cpufreq_remove,
+ .remove_new = dt_cpufreq_remove,
};
module_platform_driver(dt_cpufreq_platdrv);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 50bbc969ffe5..a757f90aa9d6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1234,16 +1234,16 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
&policy->nb_min);
if (ret) {
- dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
- ret, cpumask_pr_args(policy->cpus));
+ dev_err(dev, "Failed to register MIN QoS notifier: %d (CPU%u)\n",
+ ret, cpu);
goto err_kobj_remove;
}
ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
&policy->nb_max);
if (ret) {
- dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
- ret, cpumask_pr_args(policy->cpus));
+ dev_err(dev, "Failed to register MAX QoS notifier: %d (CPU%u)\n",
+ ret, cpu);
goto err_min_qos_notifier;
}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 55c7ffd37d1c..a33df3c66c88 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -243,7 +243,8 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
/* Find valid-unique entries */
cpufreq_for_each_valid_entry(pos, policy->freq_table)
- if (freq_table_get_index(stats, pos->frequency) == -1)
+ if (policy->freq_table_sorted != CPUFREQ_TABLE_UNSORTED ||
+ freq_table_get_index(stats, pos->frequency) == -1)
stats->freq_table[i++] = pos->frequency;
stats->state_num = i;
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index ebb3a8102681..7d2754411d8c 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -131,7 +131,7 @@ static int __init davinci_cpufreq_probe(struct platform_device *pdev)
return cpufreq_register_driver(&davinci_driver);
}
-static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
+static void __exit davinci_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&davinci_driver);
@@ -139,15 +139,13 @@ static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
if (cpufreq.asyncclk)
clk_put(cpufreq.asyncclk);
-
- return 0;
}
static struct platform_driver davinci_cpufreq_driver = {
.driver = {
.name = "cpufreq-davinci",
},
- .remove = __exit_p(davinci_cpufreq_remove),
+ .remove_new = __exit_p(davinci_cpufreq_remove),
};
int __init davinci_cpufreq_init(void)
diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
index 535867a7dfdd..577bb9e2f112 100644
--- a/drivers/cpufreq/imx-cpufreq-dt.c
+++ b/drivers/cpufreq/imx-cpufreq-dt.c
@@ -172,20 +172,18 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
return 0;
}
-static int imx_cpufreq_dt_remove(struct platform_device *pdev)
+static void imx_cpufreq_dt_remove(struct platform_device *pdev)
{
platform_device_unregister(cpufreq_dt_pdev);
if (!of_machine_is_compatible("fsl,imx7ulp"))
dev_pm_opp_put_supported_hw(cpufreq_opp_token);
else
clk_bulk_put(ARRAY_SIZE(imx7ulp_clks), imx7ulp_clks);
-
- return 0;
}
static struct platform_driver imx_cpufreq_dt_driver = {
.probe = imx_cpufreq_dt_probe,
- .remove = imx_cpufreq_dt_remove,
+ .remove_new = imx_cpufreq_dt_remove,
.driver = {
.name = "imx-cpufreq-dt",
},
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 9fb1501033bb..494d044b9e72 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -519,7 +519,7 @@ put_node:
return ret;
}
-static int imx6q_cpufreq_remove(struct platform_device *pdev)
+static void imx6q_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
@@ -530,8 +530,6 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
regulator_put(soc_reg);
clk_bulk_put(num_clks, clks);
-
- return 0;
}
static struct platform_driver imx6q_cpufreq_platdrv = {
@@ -539,7 +537,7 @@ static struct platform_driver imx6q_cpufreq_platdrv = {
.name = "imx6q-cpufreq",
},
.probe = imx6q_cpufreq_probe,
- .remove = imx6q_cpufreq_remove,
+ .remove_new = imx6q_cpufreq_remove,
};
module_platform_driver(imx6q_cpufreq_platdrv);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8ca2bce4341a..dc50c9fb488d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2609,6 +2609,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_clear_update_util_hook(policy->cpu);
intel_pstate_hwp_set(policy->cpu);
}
+ /*
+ * policy->cur is never updated with the intel_pstate driver, but it
+ * is used as a stale frequency value. So, keep it within limits.
+ */
+ policy->cur = policy->min;
mutex_unlock(&intel_pstate_limits_lock);
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 95588101efbd..fd20b986d1f2 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -178,20 +178,18 @@ out_node:
return err;
}
-static int kirkwood_cpufreq_remove(struct platform_device *pdev)
+static void kirkwood_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&kirkwood_cpufreq_driver);
clk_disable_unprepare(priv.powersave_clk);
clk_disable_unprepare(priv.ddr_clk);
clk_disable_unprepare(priv.cpu_clk);
-
- return 0;
}
static struct platform_driver kirkwood_cpufreq_platform_driver = {
.probe = kirkwood_cpufreq_probe,
- .remove = kirkwood_cpufreq_remove,
+ .remove_new = kirkwood_cpufreq_remove,
.driver = {
.name = "kirkwood-cpufreq",
},
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index b22f5cc8a463..d46afb3c0092 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -10,8 +10,9 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#define LUT_MAX_ENTRIES 32U
@@ -315,11 +316,9 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
return ret;
}
-static int mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
+static void mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
-
- return 0;
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
@@ -330,7 +329,7 @@ MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
static struct platform_driver mtk_cpufreq_hw_driver = {
.probe = mtk_cpufreq_hw_driver_probe,
- .remove = mtk_cpufreq_hw_driver_remove,
+ .remove_new = mtk_cpufreq_hw_driver_remove,
.driver = {
.name = "mtk-cpufreq-hw",
.of_match_table = mtk_cpufreq_hw_match,
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index fef68cb2b38f..a0a61919bc4c 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -313,8 +313,6 @@ out:
return ret;
}
-#define DYNAMIC_POWER "dynamic-power-coefficient"
-
static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 81649a1969b6..895690856665 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -182,11 +182,9 @@ static int omap_cpufreq_probe(struct platform_device *pdev)
return cpufreq_register_driver(&omap_driver);
}
-static int omap_cpufreq_remove(struct platform_device *pdev)
+static void omap_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&omap_driver);
-
- return 0;
}
static struct platform_driver omap_cpufreq_platdrv = {
@@ -194,7 +192,7 @@ static struct platform_driver omap_cpufreq_platdrv = {
.name = "omap-cpufreq",
},
.probe = omap_cpufreq_probe,
- .remove = omap_cpufreq_remove,
+ .remove_new = omap_cpufreq_remove,
};
module_platform_driver(omap_cpufreq_platdrv);
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 73efbcf5513b..84fe37def0f1 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -608,22 +608,20 @@ static int __init pcc_cpufreq_probe(struct platform_device *pdev)
return ret;
}
-static int pcc_cpufreq_remove(struct platform_device *pdev)
+static void pcc_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&pcc_cpufreq_driver);
pcc_clear_mapping();
free_percpu(pcc_cpu_info);
-
- return 0;
}
static struct platform_driver pcc_cpufreq_platdrv = {
.driver = {
.name = "pcc-cpufreq",
},
- .remove = pcc_cpufreq_remove,
+ .remove_new = pcc_cpufreq_remove,
};
static int __init pcc_cpufreq_init(void)
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index d289036beff2..b10f7a1b77f1 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1101,7 +1101,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
kfree(data->powernow_table);
kfree(data);
- for_each_cpu(cpu, pol->cpus)
+ /* pol->cpus will be empty here, use related_cpus instead. */
+ for_each_cpu(cpu, pol->related_cpus)
per_cpu(powernow_data, cpu) = NULL;
return 0;
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index e3313ce63b38..88afc49941b7 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -9,7 +9,7 @@
#include <linux/cpufreq.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/cell-regs.h>
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index 4fba3637b115..6f0c32592416 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -11,7 +11,6 @@
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/init.h>
-#include <linux/of_platform.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index f2830371d25f..70b0f21968a0 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -28,7 +28,7 @@
#define GT_IRQ_STATUS BIT(2)
-#define MAX_FREQ_DOMAINS 3
+#define MAX_FREQ_DOMAINS 4
struct qcom_cpufreq_soc_data {
u32 reg_enable;
@@ -730,16 +730,14 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
return ret;
}
-static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
+static void qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
-
- return 0;
}
static struct platform_driver qcom_cpufreq_hw_driver = {
.probe = qcom_cpufreq_hw_driver_probe,
- .remove = qcom_cpufreq_hw_driver_remove,
+ .remove_new = qcom_cpufreq_hw_driver_remove,
.driver = {
.name = "qcom-cpufreq-hw",
.of_match_table = qcom_cpufreq_hw_match,
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index a88b6fe5db50..84d7033e5efe 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
@@ -334,7 +333,7 @@ free_drv:
return ret;
}
-static int qcom_cpufreq_remove(struct platform_device *pdev)
+static void qcom_cpufreq_remove(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv = platform_get_drvdata(pdev);
unsigned int cpu;
@@ -346,13 +345,11 @@ static int qcom_cpufreq_remove(struct platform_device *pdev)
kfree(drv->opp_tokens);
kfree(drv);
-
- return 0;
}
static struct platform_driver qcom_cpufreq_driver = {
.probe = qcom_cpufreq_probe,
- .remove = qcom_cpufreq_remove,
+ .remove_new = qcom_cpufreq_remove,
.driver = {
.name = "qcom-cpufreq-nvmem",
},
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 573b417e1483..0aecaecbb0e6 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -288,11 +288,9 @@ static int qoriq_cpufreq_probe(struct platform_device *pdev)
return 0;
}
-static int qoriq_cpufreq_remove(struct platform_device *pdev)
+static void qoriq_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&qoriq_cpufreq_driver);
-
- return 0;
}
static struct platform_driver qoriq_cpufreq_platform_driver = {
@@ -300,7 +298,7 @@ static struct platform_driver qoriq_cpufreq_platform_driver = {
.name = "qoriq-cpufreq",
},
.probe = qoriq_cpufreq_probe,
- .remove = qoriq_cpufreq_remove,
+ .remove_new = qoriq_cpufreq_remove,
};
module_platform_driver(qoriq_cpufreq_platform_driver);
diff --git a/drivers/cpufreq/raspberrypi-cpufreq.c b/drivers/cpufreq/raspberrypi-cpufreq.c
index 2bc7d9734272..e0705cc9a57d 100644
--- a/drivers/cpufreq/raspberrypi-cpufreq.c
+++ b/drivers/cpufreq/raspberrypi-cpufreq.c
@@ -65,7 +65,7 @@ remove_opp:
return ret;
}
-static int raspberrypi_cpufreq_remove(struct platform_device *pdev)
+static void raspberrypi_cpufreq_remove(struct platform_device *pdev)
{
struct device *cpu_dev;
@@ -74,8 +74,6 @@ static int raspberrypi_cpufreq_remove(struct platform_device *pdev)
dev_pm_opp_remove_all_dynamic(cpu_dev);
platform_device_unregister(cpufreq_dt);
-
- return 0;
}
/*
@@ -87,7 +85,7 @@ static struct platform_driver raspberrypi_cpufreq_driver = {
.name = "raspberrypi-cpufreq",
},
.probe = raspberrypi_cpufreq_probe,
- .remove = raspberrypi_cpufreq_remove,
+ .remove_new = raspberrypi_cpufreq_remove,
};
module_platform_driver(raspberrypi_cpufreq_driver);
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index fd2c16821d54..d33be56983ed 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -14,7 +14,7 @@
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/scpi_protocol.h>
#include <linux/slab.h>
@@ -208,11 +208,10 @@ static int scpi_cpufreq_probe(struct platform_device *pdev)
return ret;
}
-static int scpi_cpufreq_remove(struct platform_device *pdev)
+static void scpi_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&scpi_cpufreq_driver);
scpi_ops = NULL;
- return 0;
}
static struct platform_driver scpi_cpufreq_platdrv = {
@@ -220,7 +219,7 @@ static struct platform_driver scpi_cpufreq_platdrv = {
.name = "scpi-cpufreq",
},
.probe = scpi_cpufreq_probe,
- .remove = scpi_cpufreq_remove,
+ .remove_new = scpi_cpufreq_remove,
};
module_platform_driver(scpi_cpufreq_platdrv);
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
index 1a63aeea8711..9c542e723a15 100644
--- a/drivers/cpufreq/sti-cpufreq.c
+++ b/drivers/cpufreq/sti-cpufreq.c
@@ -13,7 +13,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regmap.h>
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 4321d7bbe769..32a9c88f8ff6 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -137,7 +137,7 @@ free_opp:
return ret;
}
-static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
+static void sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
{
int *opp_tokens = platform_get_drvdata(pdev);
unsigned int cpu;
@@ -148,13 +148,11 @@ static int sun50i_cpufreq_nvmem_remove(struct platform_device *pdev)
dev_pm_opp_put_prop_name(opp_tokens[cpu]);
kfree(opp_tokens);
-
- return 0;
}
static struct platform_driver sun50i_cpufreq_driver = {
.probe = sun50i_cpufreq_nvmem_probe,
- .remove = sun50i_cpufreq_nvmem_remove,
+ .remove_new = sun50i_cpufreq_nvmem_remove,
.driver = {
.name = "sun50i-cpufreq-nvmem",
},
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index f98f53bf1011..7b8fcfa55038 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -259,11 +259,9 @@ put_bpmp:
return err;
}
-static int tegra186_cpufreq_remove(struct platform_device *pdev)
+static void tegra186_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&tegra186_cpufreq_driver);
-
- return 0;
}
static const struct of_device_id tegra186_cpufreq_of_match[] = {
@@ -278,7 +276,7 @@ static struct platform_driver tegra186_cpufreq_platform_driver = {
.of_match_table = tegra186_cpufreq_of_match,
},
.probe = tegra186_cpufreq_probe,
- .remove = tegra186_cpufreq_remove,
+ .remove_new = tegra186_cpufreq_remove,
};
module_platform_driver(tegra186_cpufreq_platform_driver);
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 36dad5ea5947..88ef5e57ccd0 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -508,6 +508,32 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
return 0;
}
+static int tegra194_cpufreq_online(struct cpufreq_policy *policy)
+{
+ /* We did light-weight tear down earlier, nothing to do here */
+ return 0;
+}
+
+static int tegra194_cpufreq_offline(struct cpufreq_policy *policy)
+{
+ /*
+ * Preserve policy->driver_data and don't free resources on light-weight
+ * tear down.
+ */
+
+ return 0;
+}
+
+static int tegra194_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct device *cpu_dev = get_cpu_device(policy->cpu);
+
+ dev_pm_opp_remove_all_dynamic(cpu_dev);
+ dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+
+ return 0;
+}
+
static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int index)
{
@@ -535,6 +561,9 @@ static struct cpufreq_driver tegra194_cpufreq_driver = {
.target_index = tegra194_cpufreq_set_target,
.get = tegra194_get_speed,
.init = tegra194_cpufreq_init,
+ .exit = tegra194_cpufreq_exit,
+ .online = tegra194_cpufreq_online,
+ .offline = tegra194_cpufreq_offline,
.attr = cpufreq_generic_attr,
};
@@ -708,12 +737,10 @@ put_bpmp:
return err;
}
-static int tegra194_cpufreq_remove(struct platform_device *pdev)
+static void tegra194_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&tegra194_cpufreq_driver);
tegra194_cpufreq_free_resources();
-
- return 0;
}
static const struct of_device_id tegra194_cpufreq_of_match[] = {
@@ -730,7 +757,7 @@ static struct platform_driver tegra194_ccplex_driver = {
.of_match_table = tegra194_cpufreq_of_match,
},
.probe = tegra194_cpufreq_probe,
- .remove = tegra194_cpufreq_remove,
+ .remove_new = tegra194_cpufreq_remove,
};
module_platform_driver(tegra194_ccplex_driver);
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index d5cd2fd25cad..3c37d7899660 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index d295f405c4bb..9ac4ea50b874 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -18,7 +18,6 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
@@ -552,7 +551,7 @@ static int ve_spc_cpufreq_probe(struct platform_device *pdev)
return ret;
}
-static int ve_spc_cpufreq_remove(struct platform_device *pdev)
+static void ve_spc_cpufreq_remove(struct platform_device *pdev)
{
bL_switcher_get_enabled();
__bLs_unregister_notifier();
@@ -560,7 +559,6 @@ static int ve_spc_cpufreq_remove(struct platform_device *pdev)
bL_switcher_put_enabled();
pr_info("%s: Un-registered platform driver: %s\n", __func__,
ve_spc_cpufreq_driver.name);
- return 0;
}
static struct platform_driver ve_spc_cpufreq_platdrv = {
@@ -568,7 +566,7 @@ static struct platform_driver ve_spc_cpufreq_platdrv = {
.name = "vexpress-spc-cpufreq",
},
.probe = ve_spc_cpufreq_probe,
- .remove = ve_spc_cpufreq_remove,
+ .remove_new = ve_spc_cpufreq_remove,
};
module_platform_driver(ve_spc_cpufreq_platdrv);
diff --git a/drivers/cpuidle/governors/gov.h b/drivers/cpuidle/governors/gov.h
new file mode 100644
index 000000000000..99e067d9668c
--- /dev/null
+++ b/drivers/cpuidle/governors/gov.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Common definitions for cpuidle governors. */
+
+#ifndef __CPUIDLE_GOVERNOR_H
+#define __CPUIDLE_GOVERNOR_H
+
+/*
+ * Idle state target residency threshold used for deciding whether or not to
+ * check the time till the closest expected timer event.
+ */
+#define RESIDENCY_THRESHOLD_NS (15 * NSEC_PER_USEC)
+
+#endif /* __CPUIDLE_GOVERNOR_H */
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index c4922684f305..b96e3da0fedd 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -19,6 +19,8 @@
#include <linux/sched/stat.h>
#include <linux/math64.h>
+#include "gov.h"
+
#define BUCKETS 12
#define INTERVAL_SHIFT 3
#define INTERVALS (1UL << INTERVAL_SHIFT)
@@ -166,8 +168,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
* of points is below a threshold. If it is... then use the
* average of these 8 points as the estimated value.
*/
-static unsigned int get_typical_interval(struct menu_device *data,
- unsigned int predicted_us)
+static unsigned int get_typical_interval(struct menu_device *data)
{
int i, divisor;
unsigned int min, max, thresh, avg;
@@ -195,11 +196,7 @@ again:
}
}
- /*
- * If the result of the computation is going to be discarded anyway,
- * avoid the computation altogether.
- */
- if (min >= predicted_us)
+ if (!max)
return UINT_MAX;
if (divisor == INTERVALS)
@@ -267,7 +264,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
{
struct menu_device *data = this_cpu_ptr(&menu_devices);
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
- unsigned int predicted_us;
u64 predicted_ns;
u64 interactivity_req;
unsigned int nr_iowaiters;
@@ -279,16 +275,41 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
data->needs_update = 0;
}
- /* determine the expected residency time, round up */
- delta = tick_nohz_get_sleep_length(&delta_tick);
- if (unlikely(delta < 0)) {
- delta = 0;
- delta_tick = 0;
- }
- data->next_timer_ns = delta;
-
nr_iowaiters = nr_iowait_cpu(dev->cpu);
- data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
+
+ /* Find the shortest expected idle interval. */
+ predicted_ns = get_typical_interval(data) * NSEC_PER_USEC;
+ if (predicted_ns > RESIDENCY_THRESHOLD_NS) {
+ unsigned int timer_us;
+
+ /* Determine the time till the closest timer. */
+ delta = tick_nohz_get_sleep_length(&delta_tick);
+ if (unlikely(delta < 0)) {
+ delta = 0;
+ delta_tick = 0;
+ }
+
+ data->next_timer_ns = delta;
+ data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters);
+
+ /* Round up the result for half microseconds. */
+ timer_us = div_u64((RESOLUTION * DECAY * NSEC_PER_USEC) / 2 +
+ data->next_timer_ns *
+ data->correction_factor[data->bucket],
+ RESOLUTION * DECAY * NSEC_PER_USEC);
+ /* Use the lowest expected idle interval to pick the idle state. */
+ predicted_ns = min((u64)timer_us * NSEC_PER_USEC, predicted_ns);
+ } else {
+ /*
+ * Because the next timer event is not going to be determined
+ * in this case, assume that without the tick the closest timer
+ * will be in distant future and that the closest tick will occur
+ * after 1/2 of the tick period.
+ */
+ data->next_timer_ns = KTIME_MAX;
+ delta_tick = TICK_NSEC / 2;
+ data->bucket = which_bucket(KTIME_MAX, nr_iowaiters);
+ }
if (unlikely(drv->state_count <= 1 || latency_req == 0) ||
((data->next_timer_ns < drv->states[1].target_residency_ns ||
@@ -303,16 +324,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return 0;
}
- /* Round up the result for half microseconds. */
- predicted_us = div_u64(data->next_timer_ns *
- data->correction_factor[data->bucket] +
- (RESOLUTION * DECAY * NSEC_PER_USEC) / 2,
- RESOLUTION * DECAY * NSEC_PER_USEC);
- /* Use the lowest expected idle interval to pick the idle state. */
- predicted_ns = (u64)min(predicted_us,
- get_typical_interval(data, predicted_us)) *
- NSEC_PER_USEC;
-
if (tick_nohz_tick_stopped()) {
/*
* If the tick is already stopped, the cost of possible short
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index 987fc5f3997d..7244f71c59c5 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -140,6 +140,8 @@
#include <linux/sched/topology.h>
#include <linux/tick.h>
+#include "gov.h"
+
/*
* The number of bits to shift the CPU's capacity by in order to determine
* the utilized threshold.
@@ -152,7 +154,6 @@
*/
#define UTIL_THRESHOLD_SHIFT 6
-
/*
* The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
* is used for decreasing metrics on a regular basis.
@@ -186,8 +187,8 @@ struct teo_bin {
* @total: Grand total of the "intercepts" and "hits" metrics for all bins.
* @next_recent_idx: Index of the next @recent_idx entry to update.
* @recent_idx: Indices of bins corresponding to recent "intercepts".
+ * @tick_hits: Number of "hits" after TICK_NSEC.
* @util_threshold: Threshold above which the CPU is considered utilized
- * @utilized: Whether the last sleep on the CPU happened while utilized
*/
struct teo_cpu {
s64 time_span_ns;
@@ -196,8 +197,8 @@ struct teo_cpu {
unsigned int total;
int next_recent_idx;
int recent_idx[NR_RECENT];
+ unsigned int tick_hits;
unsigned long util_threshold;
- bool utilized;
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
@@ -228,6 +229,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
int i, idx_timer = 0, idx_duration = 0;
+ s64 target_residency_ns;
u64 measured_ns;
if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) {
@@ -268,7 +270,6 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* fall into.
*/
for (i = 0; i < drv->state_count; i++) {
- s64 target_residency_ns = drv->states[i].target_residency_ns;
struct teo_bin *bin = &cpu_data->state_bins[i];
bin->hits -= bin->hits >> DECAY_SHIFT;
@@ -276,6 +277,8 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
cpu_data->total += bin->hits + bin->intercepts;
+ target_residency_ns = drv->states[i].target_residency_ns;
+
if (target_residency_ns <= cpu_data->sleep_length_ns) {
idx_timer = i;
if (target_residency_ns <= measured_ns)
@@ -291,6 +294,26 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
cpu_data->state_bins[cpu_data->recent_idx[i]].recent--;
/*
+ * If the deepest state's target residency is below the tick length,
+ * make a record of it to help teo_select() decide whether or not
+ * to stop the tick. This effectively adds an extra hits-only bin
+ * beyond the last state-related one.
+ */
+ if (target_residency_ns < TICK_NSEC) {
+ cpu_data->tick_hits -= cpu_data->tick_hits >> DECAY_SHIFT;
+
+ cpu_data->total += cpu_data->tick_hits;
+
+ if (TICK_NSEC <= cpu_data->sleep_length_ns) {
+ idx_timer = drv->state_count;
+ if (TICK_NSEC <= measured_ns) {
+ cpu_data->tick_hits += PULSE;
+ goto end;
+ }
+ }
+ }
+
+ /*
* If the measured idle duration falls into the same bin as the sleep
* length, this is a "hit", so update the "hits" metric for that bin.
* Otherwise, update the "intercepts" metric for the bin fallen into by
@@ -305,18 +328,14 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
cpu_data->recent_idx[i] = idx_duration;
}
+end:
cpu_data->total += PULSE;
}
-static bool teo_time_ok(u64 interval_ns)
+static bool teo_state_ok(int i, struct cpuidle_driver *drv)
{
- return !tick_nohz_tick_stopped() || interval_ns >= TICK_NSEC;
-}
-
-static s64 teo_middle_of_bin(int idx, struct cpuidle_driver *drv)
-{
- return (drv->states[idx].target_residency_ns +
- drv->states[idx+1].target_residency_ns) / 2;
+ return !tick_nohz_tick_stopped() ||
+ drv->states[i].target_residency_ns >= TICK_NSEC;
}
/**
@@ -356,6 +375,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
+ ktime_t delta_tick = TICK_NSEC / 2;
+ unsigned int tick_intercept_sum = 0;
unsigned int idx_intercept_sum = 0;
unsigned int intercept_sum = 0;
unsigned int idx_recent_sum = 0;
@@ -365,7 +386,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int constraint_idx = 0;
int idx0 = 0, idx = -1;
bool alt_intercepts, alt_recent;
- ktime_t delta_tick;
+ bool cpu_utilized;
s64 duration_ns;
int i;
@@ -375,44 +396,48 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
}
cpu_data->time_span_ns = local_clock();
-
- duration_ns = tick_nohz_get_sleep_length(&delta_tick);
- cpu_data->sleep_length_ns = duration_ns;
+ /*
+ * Set the expected sleep length to infinity in case of an early
+ * return.
+ */
+ cpu_data->sleep_length_ns = KTIME_MAX;
/* Check if there is any choice in the first place. */
if (drv->state_count < 2) {
idx = 0;
- goto end;
+ goto out_tick;
}
- if (!dev->states_usage[0].disable) {
+
+ if (!dev->states_usage[0].disable)
idx = 0;
- if (drv->states[1].target_residency_ns > duration_ns)
- goto end;
- }
- cpu_data->utilized = teo_cpu_is_utilized(dev->cpu, cpu_data);
+ cpu_utilized = teo_cpu_is_utilized(dev->cpu, cpu_data);
/*
* If the CPU is being utilized over the threshold and there are only 2
* states to choose from, the metrics need not be considered, so choose
* the shallowest non-polling state and exit.
*/
- if (drv->state_count < 3 && cpu_data->utilized) {
- for (i = 0; i < drv->state_count; ++i) {
- if (!dev->states_usage[i].disable &&
- !(drv->states[i].flags & CPUIDLE_FLAG_POLLING)) {
- idx = i;
- goto end;
- }
+ if (drv->state_count < 3 && cpu_utilized) {
+ /*
+ * If state 0 is enabled and it is not a polling one, select it
+ * right away unless the scheduler tick has been stopped, in
+ * which case care needs to be taken to leave the CPU in a deep
+ * enough state in case it is not woken up any time soon after
+ * all. If state 1 is disabled, though, state 0 must be used
+ * anyway.
+ */
+ if ((!idx && !(drv->states[0].flags & CPUIDLE_FLAG_POLLING) &&
+ teo_state_ok(0, drv)) || dev->states_usage[1].disable) {
+ idx = 0;
+ goto out_tick;
}
+ /* Assume that state 1 is not a polling one and use it. */
+ idx = 1;
+ duration_ns = drv->states[1].target_residency_ns;
+ goto end;
}
- /*
- * Find the deepest idle state whose target residency does not exceed
- * the current sleep length and the deepest idle state not deeper than
- * the former whose exit latency does not exceed the current latency
- * constraint. Compute the sums of metrics for early wakeup pattern
- * detection.
- */
+ /* Compute the sums of metrics for early wakeup pattern detection. */
for (i = 1; i < drv->state_count; i++) {
struct teo_bin *prev_bin = &cpu_data->state_bins[i-1];
struct cpuidle_state *s = &drv->states[i];
@@ -428,19 +453,15 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (dev->states_usage[i].disable)
continue;
- if (idx < 0) {
- idx = i; /* first enabled state */
- idx0 = i;
- }
-
- if (s->target_residency_ns > duration_ns)
- break;
+ if (idx < 0)
+ idx0 = i; /* first enabled state */
idx = i;
if (s->exit_latency_ns <= latency_req)
constraint_idx = i;
+ /* Save the sums for the current state. */
idx_intercept_sum = intercept_sum;
idx_hit_sum = hit_sum;
idx_recent_sum = recent_sum;
@@ -449,11 +470,21 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
/* Avoid unnecessary overhead. */
if (idx < 0) {
idx = 0; /* No states enabled, must use 0. */
- goto end;
- } else if (idx == idx0) {
+ goto out_tick;
+ }
+
+ if (idx == idx0) {
+ /*
+ * Only one idle state is enabled, so use it, but do not
+ * allow the tick to be stopped it is shallow enough.
+ */
+ duration_ns = drv->states[idx].target_residency_ns;
goto end;
}
+ tick_intercept_sum = intercept_sum +
+ cpu_data->state_bins[drv->state_count-1].intercepts;
+
/*
* If the sum of the intercepts metric for all of the idle states
* shallower than the current candidate one (idx) is greater than the
@@ -461,13 +492,11 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* all of the deeper states, or the sum of the numbers of recent
* intercepts over all of the states shallower than the candidate one
* is greater than a half of the number of recent events taken into
- * account, the CPU is likely to wake up early, so find an alternative
- * idle state to select.
+ * account, a shallower idle state is likely to be a better choice.
*/
alt_intercepts = 2 * idx_intercept_sum > cpu_data->total - idx_hit_sum;
alt_recent = idx_recent_sum > NR_RECENT / 2;
if (alt_recent || alt_intercepts) {
- s64 first_suitable_span_ns = duration_ns;
int first_suitable_idx = idx;
/*
@@ -476,44 +505,39 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* cases (both with respect to intercepts overall and with
* respect to the recent intercepts only) in the past.
*
- * Take the possible latency constraint and duration limitation
- * present if the tick has been stopped already into account.
+ * Take the possible duration limitation present if the tick
+ * has been stopped already into account.
*/
intercept_sum = 0;
recent_sum = 0;
for (i = idx - 1; i >= 0; i--) {
struct teo_bin *bin = &cpu_data->state_bins[i];
- s64 span_ns;
intercept_sum += bin->intercepts;
recent_sum += bin->recent;
- span_ns = teo_middle_of_bin(i, drv);
-
if ((!alt_recent || 2 * recent_sum > idx_recent_sum) &&
(!alt_intercepts ||
2 * intercept_sum > idx_intercept_sum)) {
- if (teo_time_ok(span_ns) &&
- !dev->states_usage[i].disable) {
+ /*
+ * Use the current state unless it is too
+ * shallow or disabled, in which case take the
+ * first enabled state that is deep enough.
+ */
+ if (teo_state_ok(i, drv) &&
+ !dev->states_usage[i].disable)
idx = i;
- duration_ns = span_ns;
- } else {
- /*
- * The current state is too shallow or
- * disabled, so take the first enabled
- * deeper state with suitable time span.
- */
+ else
idx = first_suitable_idx;
- duration_ns = first_suitable_span_ns;
- }
+
break;
}
if (dev->states_usage[i].disable)
continue;
- if (!teo_time_ok(span_ns)) {
+ if (!teo_state_ok(i, drv)) {
/*
* The current state is too shallow, but if an
* alternative candidate state has been found,
@@ -525,7 +549,6 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
break;
}
- first_suitable_span_ns = span_ns;
first_suitable_idx = i;
}
}
@@ -539,31 +562,75 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
/*
* If the CPU is being utilized over the threshold, choose a shallower
- * non-polling state to improve latency
+ * non-polling state to improve latency, unless the scheduler tick has
+ * been stopped already and the shallower state's target residency is
+ * not sufficiently large.
*/
- if (cpu_data->utilized)
- idx = teo_find_shallower_state(drv, dev, idx, duration_ns, true);
+ if (cpu_utilized) {
+ i = teo_find_shallower_state(drv, dev, idx, KTIME_MAX, true);
+ if (teo_state_ok(i, drv))
+ idx = i;
+ }
-end:
/*
- * Don't stop the tick if the selected state is a polling one or if the
- * expected idle duration is shorter than the tick period length.
+ * Skip the timers check if state 0 is the current candidate one,
+ * because an immediate non-timer wakeup is expected in that case.
*/
- if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
- duration_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) {
- *stop_tick = false;
+ if (!idx)
+ goto out_tick;
- /*
- * The tick is not going to be stopped, so if the target
- * residency of the state to be returned is not within the time
- * till the closest timer including the tick, try to correct
- * that.
- */
- if (idx > idx0 &&
- drv->states[idx].target_residency_ns > delta_tick)
- idx = teo_find_shallower_state(drv, dev, idx, delta_tick, false);
+ /*
+ * If state 0 is a polling one, check if the target residency of
+ * the current candidate state is low enough and skip the timers
+ * check in that case too.
+ */
+ if ((drv->states[0].flags & CPUIDLE_FLAG_POLLING) &&
+ drv->states[idx].target_residency_ns < RESIDENCY_THRESHOLD_NS)
+ goto out_tick;
+
+ duration_ns = tick_nohz_get_sleep_length(&delta_tick);
+ cpu_data->sleep_length_ns = duration_ns;
+
+ /*
+ * If the closest expected timer is before the terget residency of the
+ * candidate state, a shallower one needs to be found.
+ */
+ if (drv->states[idx].target_residency_ns > duration_ns) {
+ i = teo_find_shallower_state(drv, dev, idx, duration_ns, false);
+ if (teo_state_ok(i, drv))
+ idx = i;
}
+ /*
+ * If the selected state's target residency is below the tick length
+ * and intercepts occurring before the tick length are the majority of
+ * total wakeup events, do not stop the tick.
+ */
+ if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+ tick_intercept_sum > cpu_data->total / 2 + cpu_data->total / 8)
+ duration_ns = TICK_NSEC / 2;
+
+end:
+ /*
+ * Allow the tick to be stopped unless the selected state is a polling
+ * one or the expected idle duration is shorter than the tick period
+ * length.
+ */
+ if ((!(drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+ duration_ns >= TICK_NSEC) || tick_nohz_tick_stopped())
+ return idx;
+
+ /*
+ * The tick is not going to be stopped, so if the target residency of
+ * the state to be returned is not within the time till the closest
+ * timer including the tick, try to correct that.
+ */
+ if (idx > idx0 &&
+ drv->states[idx].target_residency_ns > delta_tick)
+ idx = teo_find_shallower_state(drv, dev, idx, delta_tick, false);
+
+out_tick:
+ *stop_tick = false;
return idx;
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 44e44b8d9ce6..c761952f0dc6 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -70,10 +70,9 @@ config ZCRYPT
select HW_RANDOM
help
Select this option if you want to enable support for
- s390 cryptographic adapters like:
- + Crypto Express 2 up to 7 Coprocessor (CEXxC)
- + Crypto Express 2 up to 7 Accelerator (CEXxA)
- + Crypto Express 4 up to 7 EP11 Coprocessor (CEXxP)
+ s390 cryptographic adapters like Crypto Express 4 up
+ to 8 in Coprocessor (CEXxC), EP11 Coprocessor (CEXxP)
+ or Accelerator (CEXxA) mode.
config ZCRYPT_DEBUG
bool "Enable debug features for s390 cryptographic adapters"
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 51a3a7b5b985..3bcfcfc37084 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -14,7 +14,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/scatterwalk.h>
#include <linux/scatterlist.h>
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index c13550090785..8d4c42863a62 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -29,7 +29,7 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
struct sun8i_ce_alg_template *algt;
unsigned int todo, len;
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
@@ -92,13 +92,18 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
int err;
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun8i_ce_alg_template *algt;
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
- algt->stat_fb++;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ce_alg_template *algt __maybe_unused;
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.skcipher.base);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ algt->stat_fb++;
#endif
+ }
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
@@ -133,7 +138,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
int ns = sg_nents_for_len(areq->src, areq->cryptlen);
int nd = sg_nents_for_len(areq->dst, areq->cryptlen);
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
@@ -294,7 +299,7 @@ theend:
return err;
}
-static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
+static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
{
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
@@ -308,10 +313,10 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
local_bh_enable();
- return 0;
}
-static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
+static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
+ void *async_req)
{
struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
@@ -353,7 +358,17 @@ static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_r
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
+}
+
+int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
+{
+ int err = sun8i_ce_cipher_prepare(engine, areq);
+ if (err)
+ return err;
+
+ sun8i_ce_cipher_run(engine, areq);
+ sun8i_ce_cipher_unprepare(engine, areq);
return 0;
}
@@ -406,7 +421,7 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
op->ce = algt->ce;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -423,10 +438,6 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
CRYPTO_MAX_ALG_NAME);
- op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
- op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
- op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
-
err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
goto error_pm;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 07ea0cc82b16..d4ccd5254280 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -9,21 +9,24 @@
*
* You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
+
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <crypto/internal/rng.h>
-#include <crypto/internal/skcipher.h>
#include "sun8i-ce.h"
@@ -277,7 +280,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ce_algo_id = CE_ID_CIPHER_AES,
.ce_blockmode = CE_ID_OP_CBC,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-sun8i-ce",
@@ -298,13 +301,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.setkey = sun8i_ce_aes_setkey,
.encrypt = sun8i_ce_skencrypt,
.decrypt = sun8i_ce_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ce_cipher_do_one,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ce_algo_id = CE_ID_CIPHER_AES,
.ce_blockmode = CE_ID_OP_ECB,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sun8i-ce",
@@ -324,13 +330,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.setkey = sun8i_ce_aes_setkey,
.encrypt = sun8i_ce_skencrypt,
.decrypt = sun8i_ce_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ce_cipher_do_one,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ce_algo_id = CE_ID_CIPHER_DES3,
.ce_blockmode = CE_ID_OP_CBC,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3-sun8i-ce",
@@ -351,13 +360,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.setkey = sun8i_ce_des3_setkey,
.encrypt = sun8i_ce_skencrypt,
.decrypt = sun8i_ce_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ce_cipher_do_one,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ce_algo_id = CE_ID_CIPHER_DES3,
.ce_blockmode = CE_ID_OP_ECB,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-sun8i-ce",
@@ -377,12 +389,15 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.setkey = sun8i_ce_des3_setkey,
.encrypt = sun8i_ce_skencrypt,
.decrypt = sun8i_ce_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ce_cipher_do_one,
+ },
},
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_HASH
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_MD5,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@@ -390,6 +405,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
+ .init_tfm = sun8i_ce_hash_init_tfm,
+ .exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
@@ -404,15 +421,17 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ce_hash_crainit,
- .cra_exit = sun8i_ce_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ce_hash_run,
+ },
+
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA1,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@@ -420,6 +439,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
+ .init_tfm = sun8i_ce_hash_init_tfm,
+ .exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
@@ -434,15 +455,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ce_hash_crainit,
- .cra_exit = sun8i_ce_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ce_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA224,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@@ -450,6 +472,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
+ .init_tfm = sun8i_ce_hash_init_tfm,
+ .exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@@ -464,15 +488,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ce_hash_crainit,
- .cra_exit = sun8i_ce_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ce_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA256,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@@ -480,6 +505,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
+ .init_tfm = sun8i_ce_hash_init_tfm,
+ .exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@@ -494,15 +521,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ce_hash_crainit,
- .cra_exit = sun8i_ce_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ce_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA384,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@@ -510,6 +538,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
+ .init_tfm = sun8i_ce_hash_init_tfm,
+ .exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
@@ -524,15 +554,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ce_hash_crainit,
- .cra_exit = sun8i_ce_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ce_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA512,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@@ -540,6 +571,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
+ .init_tfm = sun8i_ce_hash_init_tfm,
+ .exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
@@ -554,11 +587,12 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ce_hash_crainit,
- .cra_exit = sun8i_ce_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ce_hash_run,
+ },
},
#endif
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG
@@ -582,14 +616,18 @@ static struct sun8i_ce_alg_template ce_algs[] = {
#endif
};
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
{
- struct sun8i_ce_dev *ce = seq->private;
+ struct sun8i_ce_dev *ce __maybe_unused = seq->private;
unsigned int i;
for (i = 0; i < MAXFLOW; i++)
- seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req);
+ seq_printf(seq, "Channel %d: nreq %lu\n", i,
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+ ce->chanlist[i].stat_req);
+#else
+ 0ul);
+#endif
for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
if (!ce_algs[i].ce)
@@ -597,8 +635,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- ce_algs[i].alg.skcipher.base.cra_driver_name,
- ce_algs[i].alg.skcipher.base.cra_name,
+ ce_algs[i].alg.skcipher.base.base.cra_driver_name,
+ ce_algs[i].alg.skcipher.base.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
seq_printf(seq, "\tLast fallback is: %s\n",
ce_algs[i].fbname);
@@ -621,8 +659,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
break;
case CRYPTO_ALG_TYPE_AHASH:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- ce_algs[i].alg.hash.halg.base.cra_driver_name,
- ce_algs[i].alg.hash.halg.base.cra_name,
+ ce_algs[i].alg.hash.base.halg.base.cra_driver_name,
+ ce_algs[i].alg.hash.base.halg.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
seq_printf(seq, "\tLast fallback is: %s\n",
ce_algs[i].fbname);
@@ -643,7 +681,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
break;
}
}
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
+#if defined(CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG) && \
+ defined(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)
seq_printf(seq, "HWRNG %lu %lu\n",
ce->hwrng_stat_req, ce->hwrng_stat_bytes);
#endif
@@ -651,7 +690,6 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(sun8i_ce_debugfs);
-#endif
static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i)
{
@@ -839,7 +877,7 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
if (ce_method == CE_ID_NOTSUPP) {
dev_dbg(ce->dev,
"DEBUG: Algo of %s not supported\n",
- ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
break;
}
@@ -847,16 +885,16 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
ce_method = ce->variant->op_mode[id];
if (ce_method == CE_ID_NOTSUPP) {
dev_dbg(ce->dev, "DEBUG: Blockmode of %s not supported\n",
- ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
break;
}
dev_info(ce->dev, "Register %s\n",
- ce_algs[i].alg.skcipher.base.cra_name);
- err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
+ err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher);
if (err) {
dev_err(ce->dev, "ERROR: Fail to register %s\n",
- ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
return err;
}
@@ -867,16 +905,16 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
if (ce_method == CE_ID_NOTSUPP) {
dev_info(ce->dev,
"DEBUG: Algo of %s not supported\n",
- ce_algs[i].alg.hash.halg.base.cra_name);
+ ce_algs[i].alg.hash.base.halg.base.cra_name);
ce_algs[i].ce = NULL;
break;
}
dev_info(ce->dev, "Register %s\n",
- ce_algs[i].alg.hash.halg.base.cra_name);
- err = crypto_register_ahash(&ce_algs[i].alg.hash);
+ ce_algs[i].alg.hash.base.halg.base.cra_name);
+ err = crypto_engine_register_ahash(&ce_algs[i].alg.hash);
if (err) {
dev_err(ce->dev, "ERROR: Fail to register %s\n",
- ce_algs[i].alg.hash.halg.base.cra_name);
+ ce_algs[i].alg.hash.base.halg.base.cra_name);
ce_algs[i].ce = NULL;
return err;
}
@@ -916,13 +954,13 @@ static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ce->dev, "Unregister %d %s\n", i,
- ce_algs[i].alg.skcipher.base.cra_name);
- crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
+ crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher);
break;
case CRYPTO_ALG_TYPE_AHASH:
dev_info(ce->dev, "Unregister %d %s\n", i,
- ce_algs[i].alg.hash.halg.base.cra_name);
- crypto_unregister_ahash(&ce_algs[i].alg.hash);
+ ce_algs[i].alg.hash.base.halg.base.cra_name);
+ crypto_engine_unregister_ahash(&ce_algs[i].alg.hash);
break;
case CRYPTO_ALG_TYPE_RNG:
dev_info(ce->dev, "Unregister %d %s\n", i,
@@ -1007,13 +1045,21 @@ static int sun8i_ce_probe(struct platform_device *pdev)
pm_runtime_put_sync(ce->dev);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct dentry *dbgfs_dir __maybe_unused;
+ struct dentry *dbgfs_stats __maybe_unused;
+
+ /* Ignore error of debugfs */
+ dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
+ dbgfs_stats = debugfs_create_file("stats", 0444,
+ dbgfs_dir, ce,
+ &sun8i_ce_debugfs_fops);
+
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- /* Ignore error of debugfs */
- ce->dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
- ce->dbgfs_stats = debugfs_create_file("stats", 0444,
- ce->dbgfs_dir, ce,
- &sun8i_ce_debugfs_fops);
+ ce->dbgfs_dir = dbgfs_dir;
+ ce->dbgfs_stats = dbgfs_stats;
#endif
+ }
return 0;
error_alg:
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 930ad157004c..d358334e5981 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -9,48 +9,46 @@
*
* You could find the datasheet in Documentation/arch/arm/sunxi.rst
*/
+
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/md5.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "sun8i-ce.h"
-int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
+int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
{
- struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ce_alg_template *algt;
int err;
- memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx));
-
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
op->ce = algt->ce;
- op->enginectx.op.do_one_request = sun8i_ce_hash_run;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
-
/* FALLBACK */
- op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+ op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
return PTR_ERR(op->fallback_tfm);
}
- if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
- algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
+ crypto_ahash_set_statesize(tfm,
+ crypto_ahash_statesize(op->fallback_tfm));
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ crypto_ahash_set_reqsize(tfm,
sizeof(struct sun8i_ce_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
- memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base),
+ memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
CRYPTO_MAX_ALG_NAME);
err = pm_runtime_get_sync(op->ce->dev);
@@ -63,9 +61,9 @@ error_pm:
return err;
}
-void sun8i_ce_hash_craexit(struct crypto_tfm *tfm)
+void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm)
{
- struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
+ struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
crypto_free_ahash(tfmctx->fallback_tfm);
pm_runtime_put_sync_suspend(tfmctx->ce->dev);
@@ -114,20 +112,22 @@ int sun8i_ce_hash_final(struct ahash_request *areq)
struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_alg_template *algt;
-#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.result = areq->result;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.hash.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
- algt->stat_fb++;
+ algt->stat_fb++;
#endif
+ }
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -152,10 +152,6 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_alg_template *algt;
-#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
@@ -164,10 +160,17 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
rctx->fallback_req.nbytes = areq->nbytes;
rctx->fallback_req.src = areq->src;
rctx->fallback_req.result = areq->result;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.hash.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
- algt->stat_fb++;
+ algt->stat_fb++;
#endif
+ }
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -177,10 +180,6 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_alg_template *algt;
-#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
@@ -189,10 +188,17 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
rctx->fallback_req.nbytes = areq->nbytes;
rctx->fallback_req.src = areq->src;
rctx->fallback_req.result = areq->result;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+ struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+ algt = container_of(alg, struct sun8i_ce_alg_template,
+ alg.hash.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
- algt->stat_fb++;
+ algt->stat_fb++;
#endif
+ }
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -204,7 +210,7 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
struct sun8i_ce_alg_template *algt;
struct scatterlist *sg;
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
if (areq->nbytes == 0) {
algt->stat_fb_len0++;
@@ -253,7 +259,7 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
return sun8i_ce_hash_digest_fb(areq);
}
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
e = sun8i_ce_get_engine_number(ce);
@@ -345,11 +351,11 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
dma_addr_t addr_res, addr_pad;
int ns = sg_nents_for_len(areq->src, areq->nbytes);
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
- bs = algt->alg.hash.halg.base.cra_blocksize;
- digestsize = algt->alg.hash.halg.digestsize;
+ bs = algt->alg.hash.base.halg.base.cra_blocksize;
+ digestsize = algt->alg.hash.base.halg.digestsize;
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
if (digestsize == SHA384_DIGEST_SIZE)
@@ -454,14 +460,14 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
chan->timeout = areq->nbytes;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
+ err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
- memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+ memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
theend:
kfree(buf);
kfree(result);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 27029fb77e29..93d4985def87 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -265,14 +265,12 @@ struct sun8i_cipher_req_ctx {
/*
* struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
- * @enginectx: crypto_engine used by this TFM
* @key: pointer to key data
* @keylen: len of the key
* @ce: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
*/
struct sun8i_cipher_tfm_ctx {
- struct crypto_engine_ctx enginectx;
u32 *key;
u32 keylen;
struct sun8i_ce_dev *ce;
@@ -281,12 +279,10 @@ struct sun8i_cipher_tfm_ctx {
/*
* struct sun8i_ce_hash_tfm_ctx - context for an ahash TFM
- * @enginectx: crypto_engine used by this TFM
* @ce: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
*/
struct sun8i_ce_hash_tfm_ctx {
- struct crypto_engine_ctx enginectx;
struct sun8i_ce_dev *ce;
struct crypto_ahash *fallback_tfm;
};
@@ -329,8 +325,8 @@ struct sun8i_ce_alg_template {
u32 ce_blockmode;
struct sun8i_ce_dev *ce;
union {
- struct skcipher_alg skcipher;
- struct ahash_alg hash;
+ struct skcipher_engine_alg skcipher;
+ struct ahash_engine_alg hash;
struct rng_alg rng;
} alg;
unsigned long stat_req;
@@ -347,14 +343,13 @@ struct sun8i_ce_alg_template {
char fbname[CRYPTO_MAX_ALG_NAME];
};
-int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type);
-
int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun8i_ce_cipher_init(struct crypto_tfm *tfm);
void sun8i_ce_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq);
int sun8i_ce_skdecrypt(struct skcipher_request *areq);
int sun8i_ce_skencrypt(struct skcipher_request *areq);
@@ -362,12 +357,11 @@ int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce);
int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name);
-int sun8i_ce_hash_crainit(struct crypto_tfm *tfm);
-void sun8i_ce_hash_craexit(struct crypto_tfm *tfm);
+int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm);
+void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm);
int sun8i_ce_hash_init(struct ahash_request *areq);
int sun8i_ce_hash_export(struct ahash_request *areq, void *out);
int sun8i_ce_hash_import(struct ahash_request *areq, const void *in);
-int sun8i_ce_hash(struct ahash_request *areq);
int sun8i_ce_hash_final(struct ahash_request *areq);
int sun8i_ce_hash_update(struct ahash_request *areq);
int sun8i_ce_hash_finup(struct ahash_request *areq);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 381a90fbeaff..7fa359725ec7 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -24,7 +24,7 @@ static bool sun8i_ss_need_fallback(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base);
struct scatterlist *in_sg = areq->src;
struct scatterlist *out_sg = areq->dst;
struct scatterlist *sg;
@@ -93,13 +93,18 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
int err;
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun8i_ss_alg_template *algt;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun8i_ss_alg_template *algt __maybe_unused;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template,
+ alg.skcipher.base);
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
- algt->stat_fb++;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ algt->stat_fb++;
#endif
+ }
+
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
areq->base.complete, areq->base.data);
@@ -193,7 +198,7 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen);
int i;
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base);
dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
@@ -324,7 +329,7 @@ theend:
return err;
}
-static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
+int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
{
int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
@@ -390,7 +395,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base);
op->ss = algt->ss;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -408,10 +413,6 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
CRYPTO_MAX_ALG_NAME);
- op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
-
err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0) {
dev_err(op->ss->dev, "pm error %d\n", err);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 3dd844b40ff7..4a9587285c04 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -9,22 +9,23 @@
*
* You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
+
+#include <crypto/engine.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <crypto/internal/rng.h>
-#include <crypto/internal/skcipher.h>
#include "sun8i-ss.h"
@@ -168,7 +169,7 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_AES,
.ss_blockmode = SS_ID_OP_CBC,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-sun8i-ss",
@@ -189,13 +190,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.setkey = sun8i_ss_aes_setkey,
.encrypt = sun8i_ss_skencrypt,
.decrypt = sun8i_ss_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ss_handle_cipher_request,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_AES,
.ss_blockmode = SS_ID_OP_ECB,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sun8i-ss",
@@ -215,13 +219,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.setkey = sun8i_ss_aes_setkey,
.encrypt = sun8i_ss_skencrypt,
.decrypt = sun8i_ss_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ss_handle_cipher_request,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_DES3,
.ss_blockmode = SS_ID_OP_CBC,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3-sun8i-ss",
@@ -242,13 +249,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.setkey = sun8i_ss_des3_setkey,
.encrypt = sun8i_ss_skencrypt,
.decrypt = sun8i_ss_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ss_handle_cipher_request,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_DES3,
.ss_blockmode = SS_ID_OP_ECB,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-sun8i-ss",
@@ -268,7 +278,10 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.setkey = sun8i_ss_des3_setkey,
.encrypt = sun8i_ss_skencrypt,
.decrypt = sun8i_ss_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sun8i_ss_handle_cipher_request,
+ },
},
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG
{
@@ -292,7 +305,7 @@ static struct sun8i_ss_alg_template ss_algs[] = {
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_HASH
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_MD5,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@@ -300,6 +313,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
+ .init_tfm = sun8i_ss_hash_init_tfm,
+ .exit_tfm = sun8i_ss_hash_exit_tfm,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
@@ -314,15 +329,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ss_hash_crainit,
- .cra_exit = sun8i_ss_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ss_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_SHA1,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@@ -330,6 +346,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
+ .init_tfm = sun8i_ss_hash_init_tfm,
+ .exit_tfm = sun8i_ss_hash_exit_tfm,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
@@ -344,15 +362,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ss_hash_crainit,
- .cra_exit = sun8i_ss_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ss_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_SHA224,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@@ -360,6 +379,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
+ .init_tfm = sun8i_ss_hash_init_tfm,
+ .exit_tfm = sun8i_ss_hash_exit_tfm,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@@ -374,15 +395,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ss_hash_crainit,
- .cra_exit = sun8i_ss_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ss_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_SHA256,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@@ -390,6 +412,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
+ .init_tfm = sun8i_ss_hash_init_tfm,
+ .exit_tfm = sun8i_ss_hash_exit_tfm,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@@ -404,15 +428,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ss_hash_crainit,
- .cra_exit = sun8i_ss_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ss_hash_run,
+ },
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_SHA1,
- .alg.hash = {
+ .alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@@ -420,6 +445,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
+ .init_tfm = sun8i_ss_hash_init_tfm,
+ .exit_tfm = sun8i_ss_hash_exit_tfm,
.setkey = sun8i_ss_hmac_setkey,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
@@ -435,23 +462,28 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
- .cra_init = sun8i_ss_hash_crainit,
- .cra_exit = sun8i_ss_hash_craexit,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = sun8i_ss_hash_run,
+ },
},
#endif
};
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
{
- struct sun8i_ss_dev *ss = seq->private;
+ struct sun8i_ss_dev *ss __maybe_unused = seq->private;
unsigned int i;
for (i = 0; i < MAXFLOW; i++)
- seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req);
+ seq_printf(seq, "Channel %d: nreq %lu\n", i,
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+ ss->flows[i].stat_req);
+#else
+ 0ul);
+#endif
for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
if (!ss_algs[i].ss)
@@ -459,8 +491,8 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
switch (ss_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- ss_algs[i].alg.skcipher.base.cra_driver_name,
- ss_algs[i].alg.skcipher.base.cra_name,
+ ss_algs[i].alg.skcipher.base.base.cra_driver_name,
+ ss_algs[i].alg.skcipher.base.base.cra_name,
ss_algs[i].stat_req, ss_algs[i].stat_fb);
seq_printf(seq, "\tLast fallback is: %s\n",
@@ -482,8 +514,8 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
break;
case CRYPTO_ALG_TYPE_AHASH:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- ss_algs[i].alg.hash.halg.base.cra_driver_name,
- ss_algs[i].alg.hash.halg.base.cra_name,
+ ss_algs[i].alg.hash.base.halg.base.cra_driver_name,
+ ss_algs[i].alg.hash.base.halg.base.cra_name,
ss_algs[i].stat_req, ss_algs[i].stat_fb);
seq_printf(seq, "\tLast fallback is: %s\n",
ss_algs[i].fbname);
@@ -502,7 +534,6 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(sun8i_ss_debugfs);
-#endif
static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
{
@@ -659,7 +690,7 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
if (ss_method == SS_ID_NOTSUPP) {
dev_info(ss->dev,
"DEBUG: Algo of %s not supported\n",
- ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].alg.skcipher.base.base.cra_name);
ss_algs[i].ss = NULL;
break;
}
@@ -667,16 +698,16 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
ss_method = ss->variant->op_mode[id];
if (ss_method == SS_ID_NOTSUPP) {
dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n",
- ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].alg.skcipher.base.base.cra_name);
ss_algs[i].ss = NULL;
break;
}
dev_info(ss->dev, "DEBUG: Register %s\n",
- ss_algs[i].alg.skcipher.base.cra_name);
- err = crypto_register_skcipher(&ss_algs[i].alg.skcipher);
+ ss_algs[i].alg.skcipher.base.base.cra_name);
+ err = crypto_engine_register_skcipher(&ss_algs[i].alg.skcipher);
if (err) {
dev_err(ss->dev, "Fail to register %s\n",
- ss_algs[i].alg.skcipher.base.cra_name);
+ ss_algs[i].alg.skcipher.base.base.cra_name);
ss_algs[i].ss = NULL;
return err;
}
@@ -695,16 +726,16 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
if (ss_method == SS_ID_NOTSUPP) {
dev_info(ss->dev,
"DEBUG: Algo of %s not supported\n",
- ss_algs[i].alg.hash.halg.base.cra_name);
+ ss_algs[i].alg.hash.base.halg.base.cra_name);
ss_algs[i].ss = NULL;
break;
}
dev_info(ss->dev, "Register %s\n",
- ss_algs[i].alg.hash.halg.base.cra_name);
- err = crypto_register_ahash(&ss_algs[i].alg.hash);
+ ss_algs[i].alg.hash.base.halg.base.cra_name);
+ err = crypto_engine_register_ahash(&ss_algs[i].alg.hash);
if (err) {
dev_err(ss->dev, "ERROR: Fail to register %s\n",
- ss_algs[i].alg.hash.halg.base.cra_name);
+ ss_algs[i].alg.hash.base.halg.base.cra_name);
ss_algs[i].ss = NULL;
return err;
}
@@ -727,8 +758,8 @@ static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
switch (ss_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ss->dev, "Unregister %d %s\n", i,
- ss_algs[i].alg.skcipher.base.cra_name);
- crypto_unregister_skcipher(&ss_algs[i].alg.skcipher);
+ ss_algs[i].alg.skcipher.base.base.cra_name);
+ crypto_engine_unregister_skcipher(&ss_algs[i].alg.skcipher);
break;
case CRYPTO_ALG_TYPE_RNG:
dev_info(ss->dev, "Unregister %d %s\n", i,
@@ -737,8 +768,8 @@ static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
break;
case CRYPTO_ALG_TYPE_AHASH:
dev_info(ss->dev, "Unregister %d %s\n", i,
- ss_algs[i].alg.hash.halg.base.cra_name);
- crypto_unregister_ahash(&ss_algs[i].alg.hash);
+ ss_algs[i].alg.hash.base.halg.base.cra_name);
+ crypto_engine_unregister_ahash(&ss_algs[i].alg.hash);
break;
}
}
@@ -851,13 +882,21 @@ static int sun8i_ss_probe(struct platform_device *pdev)
pm_runtime_put_sync(ss->dev);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+ struct dentry *dbgfs_dir __maybe_unused;
+ struct dentry *dbgfs_stats __maybe_unused;
+
+ /* Ignore error of debugfs */
+ dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL);
+ dbgfs_stats = debugfs_create_file("stats", 0444,
+ dbgfs_dir, ss,
+ &sun8i_ss_debugfs_fops);
+
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- /* Ignore error of debugfs */
- ss->dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL);
- ss->dbgfs_stats = debugfs_create_file("stats", 0444,
- ss->dbgfs_dir, ss,
- &sun8i_ss_debugfs_fops);
+ ss->dbgfs_dir = dbgfs_dir;
+ ss->dbgfs_stats = dbgfs_stats;
#endif
+ }
return 0;
error_alg:
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index a4b67d130d11..d70b105dcfa1 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -9,16 +9,21 @@
*
* You could find the datasheet in Documentation/arch/arm/sunxi.rst
*/
-#include <linux/bottom_half.h>
-#include <linux/dma-mapping.h>
-#include <linux/pm_runtime.h>
-#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
+
#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
-#include <crypto/md5.h>
+#include <linux/bottom_half.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "sun8i-ss.h"
static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
@@ -60,14 +65,11 @@ int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
unsigned int keylen)
{
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
- struct ahash_alg *alg = __crypto_ahash_alg(ahash->base.__crt_alg);
- struct sun8i_ss_alg_template *algt;
int digestsize, i;
int bs = crypto_ahash_blocksize(ahash);
int ret;
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
- digestsize = algt->alg.hash.halg.digestsize;
+ digestsize = crypto_ahash_digestsize(ahash);
if (keylen > bs) {
ret = sun8i_ss_hashkey(tfmctx, key, keylen);
@@ -107,38 +109,33 @@ err_opad:
return ret;
}
-int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
+int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm)
{
- struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+ struct sun8i_ss_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt;
int err;
- memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx));
-
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
op->ss = algt->ss;
- op->enginectx.op.do_one_request = sun8i_ss_hash_run;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
-
/* FALLBACK */
- op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+ op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
return PTR_ERR(op->fallback_tfm);
}
- if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
- algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
+ crypto_ahash_set_statesize(tfm,
+ crypto_ahash_statesize(op->fallback_tfm));
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ crypto_ahash_set_reqsize(tfm,
sizeof(struct sun8i_ss_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
- memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), CRYPTO_MAX_ALG_NAME);
+ memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
+ CRYPTO_MAX_ALG_NAME);
err = pm_runtime_get_sync(op->ss->dev);
if (err < 0)
@@ -150,9 +147,9 @@ error_pm:
return err;
}
-void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
+void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm)
{
- struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
+ struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
kfree_sensitive(tfmctx->ipad);
kfree_sensitive(tfmctx->opad);
@@ -204,20 +201,23 @@ int sun8i_ss_hash_final(struct ahash_request *areq)
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ss_alg_template *algt;
-#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.result = areq->result;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ struct sun8i_ss_alg_template *algt __maybe_unused;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template,
+ alg.hash.base);
+
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
- algt->stat_fb++;
+ algt->stat_fb++;
#endif
+ }
return crypto_ahash_final(&rctx->fallback_req);
}
@@ -242,10 +242,6 @@ int sun8i_ss_hash_finup(struct ahash_request *areq)
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ss_alg_template *algt;
-#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
@@ -254,10 +250,18 @@ int sun8i_ss_hash_finup(struct ahash_request *areq)
rctx->fallback_req.nbytes = areq->nbytes;
rctx->fallback_req.src = areq->src;
rctx->fallback_req.result = areq->result;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ struct sun8i_ss_alg_template *algt __maybe_unused;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template,
+ alg.hash.base);
+
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
- algt->stat_fb++;
+ algt->stat_fb++;
#endif
+ }
return crypto_ahash_finup(&rctx->fallback_req);
}
@@ -267,10 +271,6 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ss_alg_template *algt;
-#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
@@ -279,10 +279,18 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
rctx->fallback_req.nbytes = areq->nbytes;
rctx->fallback_req.src = areq->src;
rctx->fallback_req.result = areq->result;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ struct sun8i_ss_alg_template *algt __maybe_unused;
+
+ algt = container_of(alg, struct sun8i_ss_alg_template,
+ alg.hash.base);
+
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
- algt->stat_fb++;
+ algt->stat_fb++;
#endif
+ }
return crypto_ahash_digest(&rctx->fallback_req);
}
@@ -349,11 +357,11 @@ static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt;
struct scatterlist *sg;
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
if (areq->nbytes == 0) {
algt->stat_fb_len++;
@@ -398,8 +406,8 @@ static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
int sun8i_ss_hash_digest(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt;
struct sun8i_ss_dev *ss;
struct crypto_engine *engine;
@@ -408,7 +416,7 @@ int sun8i_ss_hash_digest(struct ahash_request *areq)
if (sun8i_ss_hash_need_fallback(areq))
return sun8i_ss_hash_digest_fb(areq);
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
ss = algt->ss;
e = sun8i_ss_get_engine_number(ss);
@@ -484,8 +492,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt;
struct sun8i_ss_dev *ss;
struct scatterlist *sg;
@@ -504,10 +512,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
*/
int hmac = 0;
- algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+ algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
ss = algt->ss;
- digestsize = algt->alg.hash.halg.digestsize;
+ digestsize = crypto_ahash_digestsize(tfm);
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
@@ -700,7 +708,7 @@ err_dma_result:
}
if (!err)
- memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+ memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
theend:
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
index df6f08f6092f..ae66eb45fb24 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
@@ -201,16 +201,12 @@ struct sun8i_cipher_req_ctx {
/*
* struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
- * @enginectx: crypto_engine used by this TFM
* @key: pointer to key data
* @keylen: len of the key
* @ss: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
- *
- * enginectx must be the first element
*/
struct sun8i_cipher_tfm_ctx {
- struct crypto_engine_ctx enginectx;
u32 *key;
u32 keylen;
struct sun8i_ss_dev *ss;
@@ -229,14 +225,10 @@ struct sun8i_ss_rng_tfm_ctx {
/*
* struct sun8i_ss_hash_tfm_ctx - context for an ahash TFM
- * @enginectx: crypto_engine used by this TFM
* @fallback_tfm: pointer to the fallback TFM
* @ss: pointer to the private data of driver handling this TFM
- *
- * enginectx must be the first element
*/
struct sun8i_ss_hash_tfm_ctx {
- struct crypto_engine_ctx enginectx;
struct crypto_ahash *fallback_tfm;
struct sun8i_ss_dev *ss;
u8 *ipad;
@@ -279,9 +271,9 @@ struct sun8i_ss_alg_template {
u32 ss_blockmode;
struct sun8i_ss_dev *ss;
union {
- struct skcipher_alg skcipher;
+ struct skcipher_engine_alg skcipher;
struct rng_alg rng;
- struct ahash_alg hash;
+ struct ahash_engine_alg hash;
} alg;
unsigned long stat_req;
unsigned long stat_fb;
@@ -293,14 +285,13 @@ struct sun8i_ss_alg_template {
char fbname[CRYPTO_MAX_ALG_NAME];
};
-int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type);
-
int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun8i_ss_cipher_init(struct crypto_tfm *tfm);
void sun8i_ss_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq);
int sun8i_ss_skdecrypt(struct skcipher_request *areq);
int sun8i_ss_skencrypt(struct skcipher_request *areq);
@@ -313,8 +304,8 @@ int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen
int sun8i_ss_prng_init(struct crypto_tfm *tfm);
void sun8i_ss_prng_exit(struct crypto_tfm *tfm);
-int sun8i_ss_hash_crainit(struct crypto_tfm *tfm);
-void sun8i_ss_hash_craexit(struct crypto_tfm *tfm);
+int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm);
+void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm);
int sun8i_ss_hash_init(struct ahash_request *areq);
int sun8i_ss_hash_export(struct ahash_request *areq, void *out);
int sun8i_ss_hash_import(struct ahash_request *areq, const void *in);
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index af017a087ebf..3308406612fc 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -65,7 +65,7 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct meson_alg_template *algt;
- algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
algt->stat_fb++;
#endif
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
@@ -101,7 +101,7 @@ static int meson_cipher(struct skcipher_request *areq)
void *backup_iv = NULL, *bkeyiv;
u32 v;
- algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
@@ -258,8 +258,7 @@ theend:
return err;
}
-static int meson_handle_cipher_request(struct crypto_engine *engine,
- void *areq)
+int meson_handle_cipher_request(struct crypto_engine *engine, void *areq)
{
int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
@@ -318,7 +317,7 @@ int meson_cipher_init(struct crypto_tfm *tfm)
memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
- algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+ algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
op->mc = algt->mc;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -331,10 +330,6 @@ int meson_cipher_init(struct crypto_tfm *tfm)
sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm);
- op->enginectx.op.do_one_request = meson_handle_cipher_request;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
-
return 0;
}
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index 937187027ad5..da6dfe0f9ac3 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -6,17 +6,19 @@
*
* Core file which registers crypto algorithms supported by the hardware.
*/
+
+#include <crypto/engine.h>
+#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
-#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/dma-mapping.h>
#include "amlogic-gxl.h"
@@ -47,7 +49,7 @@ static struct meson_alg_template mc_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.blockmode = MESON_OPMODE_CBC,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-gxl",
@@ -68,12 +70,15 @@ static struct meson_alg_template mc_algs[] = {
.setkey = meson_aes_setkey,
.encrypt = meson_skencrypt,
.decrypt = meson_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = meson_handle_cipher_request,
+ },
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.blockmode = MESON_OPMODE_ECB,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-gxl",
@@ -93,33 +98,43 @@ static struct meson_alg_template mc_algs[] = {
.setkey = meson_aes_setkey,
.encrypt = meson_skencrypt,
.decrypt = meson_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = meson_handle_cipher_request,
+ },
},
};
-#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
static int meson_debugfs_show(struct seq_file *seq, void *v)
{
- struct meson_dev *mc = seq->private;
+ struct meson_dev *mc __maybe_unused = seq->private;
int i;
for (i = 0; i < MAXFLOW; i++)
- seq_printf(seq, "Channel %d: nreq %lu\n", i, mc->chanlist[i].stat_req);
+ seq_printf(seq, "Channel %d: nreq %lu\n", i,
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+ mc->chanlist[i].stat_req);
+#else
+ 0ul);
+#endif
for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
switch (mc_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s %lu %lu\n",
- mc_algs[i].alg.skcipher.base.cra_driver_name,
- mc_algs[i].alg.skcipher.base.cra_name,
+ mc_algs[i].alg.skcipher.base.base.cra_driver_name,
+ mc_algs[i].alg.skcipher.base.base.cra_name,
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
mc_algs[i].stat_req, mc_algs[i].stat_fb);
+#else
+ 0ul, 0ul);
+#endif
break;
}
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(meson_debugfs);
-#endif
static void meson_free_chanlist(struct meson_dev *mc, int i)
{
@@ -183,10 +198,10 @@ static int meson_register_algs(struct meson_dev *mc)
mc_algs[i].mc = mc;
switch (mc_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- err = crypto_register_skcipher(&mc_algs[i].alg.skcipher);
+ err = crypto_engine_register_skcipher(&mc_algs[i].alg.skcipher);
if (err) {
dev_err(mc->dev, "Fail to register %s\n",
- mc_algs[i].alg.skcipher.base.cra_name);
+ mc_algs[i].alg.skcipher.base.base.cra_name);
mc_algs[i].mc = NULL;
return err;
}
@@ -206,7 +221,7 @@ static void meson_unregister_algs(struct meson_dev *mc)
continue;
switch (mc_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
- crypto_unregister_skcipher(&mc_algs[i].alg.skcipher);
+ crypto_engine_unregister_skcipher(&mc_algs[i].alg.skcipher);
break;
}
}
@@ -264,10 +279,16 @@ static int meson_crypto_probe(struct platform_device *pdev)
if (err)
goto error_alg;
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG)) {
+ struct dentry *dbgfs_dir;
+
+ dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
+ debugfs_create_file("stats", 0444, dbgfs_dir, mc, &meson_debugfs_fops);
+
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
- mc->dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
- debugfs_create_file("stats", 0444, mc->dbgfs_dir, mc, &meson_debugfs_fops);
+ mc->dbgfs_dir = dbgfs_dir;
#endif
+ }
return 0;
error_alg:
diff --git a/drivers/crypto/amlogic/amlogic-gxl.h b/drivers/crypto/amlogic/amlogic-gxl.h
index 8c0746a1d6d4..1013a666c932 100644
--- a/drivers/crypto/amlogic/amlogic-gxl.h
+++ b/drivers/crypto/amlogic/amlogic-gxl.h
@@ -114,7 +114,6 @@ struct meson_cipher_req_ctx {
/*
* struct meson_cipher_tfm_ctx - context for a skcipher TFM
- * @enginectx: crypto_engine used by this TFM
* @key: pointer to key data
* @keylen: len of the key
* @keymode: The keymode(type and size of key) associated with this TFM
@@ -122,7 +121,6 @@ struct meson_cipher_req_ctx {
* @fallback_tfm: pointer to the fallback TFM
*/
struct meson_cipher_tfm_ctx {
- struct crypto_engine_ctx enginectx;
u32 *key;
u32 keylen;
u32 keymode;
@@ -143,7 +141,7 @@ struct meson_alg_template {
u32 type;
u32 blockmode;
union {
- struct skcipher_alg skcipher;
+ struct skcipher_engine_alg skcipher;
} alg;
struct meson_dev *mc;
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
@@ -160,3 +158,4 @@ int meson_cipher_init(struct crypto_tfm *tfm);
void meson_cipher_exit(struct crypto_tfm *tfm);
int meson_skdecrypt(struct skcipher_request *areq);
int meson_skencrypt(struct skcipher_request *areq);
+int meson_handle_cipher_request(struct crypto_engine *engine, void *areq);
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
index 470122c87fea..247c568aa8df 100644
--- a/drivers/crypto/aspeed/aspeed-acry.c
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -2,25 +2,23 @@
/*
* Copyright 2021 Aspeed Technology Inc.
*/
-#include <crypto/akcipher.h>
-#include <crypto/algapi.h>
#include <crypto/engine.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/rsa.h>
#include <crypto/scatterwalk.h>
#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/interrupt.h>
#include <linux/count_zeros.h>
-#include <linux/err.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
#define ACRY_DBG(d, fmt, ...) \
@@ -112,7 +110,6 @@ struct aspeed_acry_dev {
};
struct aspeed_acry_ctx {
- struct crypto_engine_ctx enginectx;
struct aspeed_acry_dev *acry_dev;
struct rsa_key key;
@@ -131,7 +128,7 @@ struct aspeed_acry_ctx {
struct aspeed_acry_alg {
struct aspeed_acry_dev *acry_dev;
- struct akcipher_alg akcipher;
+ struct akcipher_engine_alg akcipher;
};
enum aspeed_rsa_key_mode {
@@ -577,7 +574,7 @@ static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
const char *name = crypto_tfm_alg_name(&tfm->base);
struct aspeed_acry_alg *acry_alg;
- acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher);
+ acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher.base);
ctx->acry_dev = acry_alg->acry_dev;
@@ -589,10 +586,6 @@ static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->fallback_tfm);
}
- ctx->enginectx.op.do_one_request = aspeed_acry_do_request;
- ctx->enginectx.op.prepare_request = NULL;
- ctx->enginectx.op.unprepare_request = NULL;
-
return 0;
}
@@ -605,7 +598,7 @@ static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
{
- .akcipher = {
+ .akcipher.base = {
.encrypt = aspeed_acry_rsa_enc,
.decrypt = aspeed_acry_rsa_dec,
.sign = aspeed_acry_rsa_dec,
@@ -627,6 +620,9 @@ static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
.cra_ctxsize = sizeof(struct aspeed_acry_ctx),
},
},
+ .akcipher.op = {
+ .do_one_request = aspeed_acry_do_request,
+ },
},
};
@@ -636,10 +632,10 @@ static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) {
aspeed_acry_akcipher_algs[i].acry_dev = acry_dev;
- rc = crypto_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
+ rc = crypto_engine_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
if (rc) {
ACRY_DBG(acry_dev, "Failed to register %s\n",
- aspeed_acry_akcipher_algs[i].akcipher.base.cra_name);
+ aspeed_acry_akcipher_algs[i].akcipher.base.base.cra_name);
}
}
}
@@ -649,7 +645,7 @@ static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev)
int i;
for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++)
- crypto_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
+ crypto_engine_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
}
/* ACRY interrupt service routine. */
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
index ef73b0028b4d..f0eddb7854e5 100644
--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -4,6 +4,17 @@
*/
#include "aspeed-hace.h"
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG
#define CIPHER_DBG(h, fmt, ...) \
@@ -696,7 +707,7 @@ static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
struct aspeed_hace_alg *crypto_alg;
- crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher);
+ crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher.base);
ctx->hace_dev = crypto_alg->hace_dev;
ctx->start = aspeed_hace_skcipher_trigger;
@@ -713,10 +724,6 @@ static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) +
crypto_skcipher_reqsize(ctx->fallback_tfm));
- ctx->enginectx.op.do_one_request = aspeed_crypto_do_request;
- ctx->enginectx.op.prepare_request = NULL;
- ctx->enginectx.op.unprepare_request = NULL;
-
return 0;
}
@@ -731,7 +738,7 @@ static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm)
static struct aspeed_hace_alg aspeed_crypto_algs[] = {
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = aspeed_aes_setkey,
@@ -751,10 +758,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -775,10 +785,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -799,10 +812,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -823,10 +839,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = aspeed_des_setkey,
@@ -846,10 +865,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -870,10 +892,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -894,10 +919,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -918,10 +946,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = aspeed_des_setkey,
@@ -941,10 +972,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
@@ -965,10 +999,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
@@ -989,10 +1026,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
@@ -1013,13 +1053,16 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
};
static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -1039,10 +1082,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@@ -1062,10 +1108,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
{
- .alg.skcipher = {
+ .alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
@@ -1085,7 +1134,10 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = aspeed_crypto_do_request,
+ },
},
};
@@ -1095,13 +1147,13 @@ void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
int i;
for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++)
- crypto_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+ crypto_engine_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
if (hace_dev->version != AST2600_VERSION)
return;
for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++)
- crypto_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+ crypto_engine_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
}
void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
@@ -1112,10 +1164,10 @@ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) {
aspeed_crypto_algs[i].hace_dev = hace_dev;
- rc = crypto_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+ rc = crypto_engine_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
if (rc) {
CIPHER_DBG(hace_dev, "Failed to register %s\n",
- aspeed_crypto_algs[i].alg.skcipher.base.cra_name);
+ aspeed_crypto_algs[i].alg.skcipher.base.base.cra_name);
}
}
@@ -1124,10 +1176,10 @@ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) {
aspeed_crypto_algs_g6[i].hace_dev = hace_dev;
- rc = crypto_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+ rc = crypto_engine_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
if (rc) {
CIPHER_DBG(hace_dev, "Failed to register %s\n",
- aspeed_crypto_algs_g6[i].alg.skcipher.base.cra_name);
+ aspeed_crypto_algs_g6[i].alg.skcipher.base.base.cra_name);
}
}
}
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index 935135229ebd..0b6e49c06eff 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -4,6 +4,17 @@
*/
#include "aspeed-hace.h"
+#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
#define AHASH_DBG(h, fmt, ...) \
@@ -48,28 +59,6 @@ static const __be64 sha512_iv[8] = {
cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
};
-static const __be32 sha512_224_iv[16] = {
- cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL),
- cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL),
- cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL),
- cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL),
- cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL),
- cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL),
- cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL),
- cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL)
-};
-
-static const __be32 sha512_256_iv[16] = {
- cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL),
- cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL),
- cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL),
- cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL),
- cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL),
- cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL),
- cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL),
- cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL)
-};
-
/* The purpose of this padding is to ensure that the padded message is a
* multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
* The bit "1" is appended at the end of the message followed by
@@ -565,8 +554,8 @@ static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
return 0;
}
-static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
- void *areq)
+static void aspeed_ahash_prepare_request(struct crypto_engine *engine,
+ void *areq)
{
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -581,8 +570,12 @@ static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
else
hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
+}
- return 0;
+static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq)
+{
+ aspeed_ahash_prepare_request(engine, areq);
+ return aspeed_ahash_do_request(engine, areq);
}
static int aspeed_sham_update(struct ahash_request *req)
@@ -750,62 +743,6 @@ static int aspeed_sham_init(struct ahash_request *req)
return 0;
}
-static int aspeed_sha512s_init(struct ahash_request *req)
-{
- struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
- struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
- struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
- AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm));
-
- rctx->cmd = HASH_CMD_ACC_MODE;
- rctx->flags = 0;
-
- switch (crypto_ahash_digestsize(tfm)) {
- case SHA224_DIGEST_SIZE:
- rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 |
- HASH_CMD_SHA_SWAP;
- rctx->flags |= SHA_FLAGS_SHA512_224;
- rctx->digsize = SHA224_DIGEST_SIZE;
- rctx->block_size = SHA512_BLOCK_SIZE;
- rctx->sha_iv = sha512_224_iv;
- rctx->ivsize = 64;
- memcpy(rctx->digest, sha512_224_iv, rctx->ivsize);
- break;
- case SHA256_DIGEST_SIZE:
- rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 |
- HASH_CMD_SHA_SWAP;
- rctx->flags |= SHA_FLAGS_SHA512_256;
- rctx->digsize = SHA256_DIGEST_SIZE;
- rctx->block_size = SHA512_BLOCK_SIZE;
- rctx->sha_iv = sha512_256_iv;
- rctx->ivsize = 64;
- memcpy(rctx->digest, sha512_256_iv, rctx->ivsize);
- break;
- default:
- dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
- crypto_ahash_digestsize(tfm));
- return -EINVAL;
- }
-
- rctx->bufcnt = 0;
- rctx->total = 0;
- rctx->digcnt[0] = 0;
- rctx->digcnt[1] = 0;
-
- /* HMAC init */
- if (tctx->flags & SHA_FLAGS_HMAC) {
- rctx->digcnt[0] = rctx->block_size;
- rctx->bufcnt = rctx->block_size;
- memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
- rctx->flags |= SHA_FLAGS_HMAC;
- }
-
- return 0;
-}
-
static int aspeed_sham_digest(struct ahash_request *req)
{
return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
@@ -854,7 +791,7 @@ static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
struct aspeed_hace_alg *ast_alg;
- ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash);
+ ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
tctx->hace_dev = ast_alg->hace_dev;
tctx->flags = 0;
@@ -876,10 +813,6 @@ static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
}
}
- tctx->enginectx.op.do_one_request = aspeed_ahash_do_request;
- tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request;
- tctx->enginectx.op.unprepare_request = NULL;
-
return 0;
}
@@ -917,7 +850,7 @@ static int aspeed_sham_import(struct ahash_request *req, const void *in)
static struct aspeed_hace_alg aspeed_ahash_algs[] = {
{
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -944,9 +877,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -973,9 +909,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1002,10 +941,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
.alg_base = "sha1",
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1034,10 +976,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
.alg_base = "sha224",
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1066,10 +1011,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
.alg_base = "sha256",
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1098,12 +1046,15 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
};
static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
{
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1130,9 +1081,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1159,68 +1113,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
}
}
},
- },
- {
- .alg.ahash = {
- .init = aspeed_sha512s_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "sha512_224",
- .cra_driver_name = "aspeed-sha512_224",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- },
- {
- .alg.ahash = {
- .init = aspeed_sha512s_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "sha512_256",
- .cra_driver_name = "aspeed-sha512_256",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
},
},
{
.alg_base = "sha384",
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1249,10 +1148,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
}
}
},
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
+ },
},
{
.alg_base = "sha512",
- .alg.ahash = {
+ .alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@@ -1281,69 +1183,8 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
}
}
},
- },
- {
- .alg_base = "sha512_224",
- .alg.ahash = {
- .init = aspeed_sha512s_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha512_224)",
- .cra_driver_name = "aspeed-hmac-sha512_224",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
- },
- },
- {
- .alg_base = "sha512_256",
- .alg.ahash = {
- .init = aspeed_sha512s_init,
- .update = aspeed_sham_update,
- .final = aspeed_sham_final,
- .finup = aspeed_sham_finup,
- .digest = aspeed_sham_digest,
- .setkey = aspeed_sham_setkey,
- .export = aspeed_sham_export,
- .import = aspeed_sham_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct aspeed_sham_reqctx),
- .base = {
- .cra_name = "hmac(sha512_256)",
- .cra_driver_name = "aspeed-hmac-sha512_256",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
- sizeof(struct aspeed_sha_hmac_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = aspeed_sham_cra_init,
- .cra_exit = aspeed_sham_cra_exit,
- }
- }
+ .alg.ahash.op = {
+ .do_one_request = aspeed_ahash_do_one,
},
},
};
@@ -1353,13 +1194,13 @@ void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
int i;
for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
- crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
+ crypto_engine_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
if (hace_dev->version != AST2600_VERSION)
return;
for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
- crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+ crypto_engine_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
}
void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
@@ -1370,10 +1211,10 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
aspeed_ahash_algs[i].hace_dev = hace_dev;
- rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
+ rc = crypto_engine_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
if (rc) {
AHASH_DBG(hace_dev, "Failed to register %s\n",
- aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name);
+ aspeed_ahash_algs[i].alg.ahash.base.halg.base.cra_name);
}
}
@@ -1382,10 +1223,10 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
- rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+ rc = crypto_engine_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
if (rc) {
AHASH_DBG(hace_dev, "Failed to register %s\n",
- aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name);
+ aspeed_ahash_algs_g6[i].alg.ahash.base.halg.base.cra_name);
}
}
}
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
index d2871e1de9c2..8f7aab82e1d8 100644
--- a/drivers/crypto/aspeed/aspeed-hace.c
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -3,7 +3,14 @@
* Copyright (c) 2021 Aspeed Technology Inc.
*/
+#include "aspeed-hace.h"
+#include <crypto/engine.h>
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -11,8 +18,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
-#include "aspeed-hace.h"
-
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
#define HACE_DBG(d, fmt, ...) \
dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
index 05d0a15d546d..68f70e01fccb 100644
--- a/drivers/crypto/aspeed/aspeed-hace.h
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -2,25 +2,14 @@
#ifndef __ASPEED_HACE_H__
#define __ASPEED_HACE_H__
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/fips.h>
-#include <linux/dma-mapping.h>
#include <crypto/aes.h>
-#include <crypto/des.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/aead.h>
-#include <crypto/internal/akcipher.h>
-#include <crypto/internal/des.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/kpp.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/algapi.h>
#include <crypto/engine.h>
-#include <crypto/hmac.h>
-#include <crypto/sha1.h>
+#include <crypto/hash.h>
#include <crypto/sha2.h>
+#include <linux/bits.h>
+#include <linux/compiler_attributes.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
/*****************************
* *
@@ -144,6 +133,7 @@
HACE_CMD_OFB | HACE_CMD_CTR)
struct aspeed_hace_dev;
+struct scatterlist;
typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *);
@@ -178,8 +168,6 @@ struct aspeed_sha_hmac_ctx {
};
struct aspeed_sham_ctx {
- struct crypto_engine_ctx enginectx;
-
struct aspeed_hace_dev *hace_dev;
unsigned long flags; /* hmac flag */
@@ -235,8 +223,6 @@ struct aspeed_engine_crypto {
};
struct aspeed_cipher_ctx {
- struct crypto_engine_ctx enginectx;
-
struct aspeed_hace_dev *hace_dev;
int key_len;
u8 key[AES_MAX_KEYLENGTH];
@@ -275,8 +261,8 @@ struct aspeed_hace_alg {
const char *alg_base;
union {
- struct skcipher_alg skcipher;
- struct ahash_alg ahash;
+ struct skcipher_engine_alg skcipher;
+ struct ahash_engine_alg ahash;
} alg;
};
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 143d33fbb316..55b5f577b01c 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -28,7 +28,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <crypto/scatterwalk.h>
@@ -2533,13 +2533,11 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
}
}
-#if defined(CONFIG_OF)
static const struct of_device_id atmel_aes_dt_ids[] = {
{ .compatible = "atmel,at91sam9g46-aes" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
-#endif
static int atmel_aes_probe(struct platform_device *pdev)
{
@@ -2566,11 +2564,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
- /* Get the base address */
- aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!aes_res) {
- dev_err(dev, "no MEM resource info\n");
- err = -ENODEV;
+ aes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &aes_res);
+ if (IS_ERR(aes_dd->io_base)) {
+ err = PTR_ERR(aes_dd->io_base);
goto err_tasklet_kill;
}
aes_dd->phys_base = aes_res->start;
@@ -2597,13 +2593,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
goto err_tasklet_kill;
}
- aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
- if (IS_ERR(aes_dd->io_base)) {
- dev_err(dev, "can't ioremap\n");
- err = PTR_ERR(aes_dd->io_base);
- goto err_tasklet_kill;
- }
-
err = clk_prepare(aes_dd->iclk);
if (err)
goto err_tasklet_kill;
@@ -2687,7 +2676,7 @@ static struct platform_driver atmel_aes_driver = {
.remove = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
- .of_match_table = of_match_ptr(atmel_aes_dt_ids),
+ .of_match_table = atmel_aes_dt_ids,
},
};
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 432beabd79e6..590ea984c622 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 6bef634d3c86..3622120add62 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -28,7 +28,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <crypto/scatterwalk.h>
@@ -1770,7 +1770,8 @@ static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
size_t bs = ctx->block_size;
size_t i, num_words = bs / sizeof(u32);
- memcpy(hmac->opad, hmac->ipad, bs);
+ unsafe_memcpy(hmac->opad, hmac->ipad, bs,
+ "fortified memcpy causes -Wrestrict warning");
for (i = 0; i < num_words; ++i) {
hmac->ipad[i] ^= 0x36363636;
hmac->opad[i] ^= 0x5c5c5c5c;
@@ -2499,8 +2500,8 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd)
{
dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
if (IS_ERR(dd->dma_lch_in.chan)) {
- dev_err(dd->dev, "DMA channel is not available\n");
- return PTR_ERR(dd->dma_lch_in.chan);
+ return dev_err_probe(dd->dev, PTR_ERR(dd->dma_lch_in.chan),
+ "DMA channel is not available\n");
}
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
@@ -2570,14 +2571,12 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
}
}
-#if defined(CONFIG_OF)
static const struct of_device_id atmel_sha_dt_ids[] = {
{ .compatible = "atmel,at91sam9g46-sha" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
-#endif
static int atmel_sha_probe(struct platform_device *pdev)
{
@@ -2604,11 +2603,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
- /* Get the base address */
- sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!sha_res) {
- dev_err(dev, "no MEM resource info\n");
- err = -ENODEV;
+ sha_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &sha_res);
+ if (IS_ERR(sha_dd->io_base)) {
+ err = PTR_ERR(sha_dd->io_base);
goto err_tasklet_kill;
}
sha_dd->phys_base = sha_res->start;
@@ -2635,13 +2632,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
goto err_tasklet_kill;
}
- sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
- if (IS_ERR(sha_dd->io_base)) {
- dev_err(dev, "can't ioremap\n");
- err = PTR_ERR(sha_dd->io_base);
- goto err_tasklet_kill;
- }
-
err = clk_prepare(sha_dd->iclk);
if (err)
goto err_tasklet_kill;
@@ -2716,7 +2706,7 @@ static struct platform_driver atmel_sha_driver = {
.remove = atmel_sha_remove,
.driver = {
.name = "atmel_sha",
- .of_match_table = of_match_ptr(atmel_sha_dt_ids),
+ .of_match_table = atmel_sha_dt_ids,
},
};
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index c9ded8be9c39..099b32a10dd7 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -28,7 +28,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <crypto/scatterwalk.h>
@@ -1139,13 +1139,11 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
}
}
-#if defined(CONFIG_OF)
static const struct of_device_id atmel_tdes_dt_ids[] = {
{ .compatible = "atmel,at91sam9g46-tdes" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
-#endif
static int atmel_tdes_probe(struct platform_device *pdev)
{
@@ -1172,11 +1170,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
- /* Get the base address */
- tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!tdes_res) {
- dev_err(dev, "no MEM resource info\n");
- err = -ENODEV;
+ tdes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &tdes_res);
+ if (IS_ERR(tdes_dd->io_base)) {
+ err = PTR_ERR(tdes_dd->io_base);
goto err_tasklet_kill;
}
tdes_dd->phys_base = tdes_res->start;
@@ -1203,12 +1199,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
goto err_tasklet_kill;
}
- tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
- if (IS_ERR(tdes_dd->io_base)) {
- err = PTR_ERR(tdes_dd->io_base);
- goto err_tasklet_kill;
- }
-
err = atmel_tdes_hw_version_init(tdes_dd);
if (err)
goto err_tasklet_kill;
@@ -1282,7 +1272,7 @@ static struct platform_driver atmel_tdes_driver = {
.remove = atmel_tdes_remove,
.driver = {
.name = "atmel_tdes",
- .of_match_table = of_match_ptr(atmel_tdes_dt_ids),
+ .of_match_table = atmel_tdes_dt_ids,
},
};
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 70b911baab26..689be70d69c1 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -15,8 +15,7 @@
#include <linux/kthread.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/io.h>
#include <linux/bitops.h>
@@ -2397,7 +2396,8 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
memset(ctx->ipad + ctx->authkeylen, 0,
blocksize - ctx->authkeylen);
ctx->authkeylen = 0;
- memcpy(ctx->opad, ctx->ipad, blocksize);
+ unsafe_memcpy(ctx->opad, ctx->ipad, blocksize,
+ "fortified memcpy causes -Wrestrict warning");
for (index = 0; index < blocksize; index++) {
ctx->ipad[index] ^= HMAC_IPAD_VALUE;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index feb86013dbf6..eba2d750c3b0 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -56,12 +56,15 @@
#include "sg_sw_sec4.h"
#include "key_gen.h"
#include "caamalg_desc.h"
-#include <crypto/engine.h>
-#include <crypto/xts.h>
#include <asm/unaligned.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/xts.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -95,13 +98,13 @@ struct caam_alg_entry {
};
struct caam_aead_alg {
- struct aead_alg aead;
+ struct aead_engine_alg aead;
struct caam_alg_entry caam;
bool registered;
};
struct caam_skcipher_alg {
- struct skcipher_alg skcipher;
+ struct skcipher_engine_alg skcipher;
struct caam_alg_entry caam;
bool registered;
};
@@ -110,7 +113,6 @@ struct caam_skcipher_alg {
* per-session context
*/
struct caam_ctx {
- struct crypto_engine_ctx enginectx;
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
@@ -188,7 +190,8 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
static int aead_set_sh_desc(struct crypto_aead *aead)
{
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
- struct caam_aead_alg, aead);
+ struct caam_aead_alg,
+ aead.base);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct device *jrdev = ctx->jrdev;
@@ -738,7 +741,7 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct caam_skcipher_alg *alg =
container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
- skcipher);
+ skcipher.base);
struct device *jrdev = ctx->jrdev;
unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
@@ -1195,7 +1198,8 @@ static void init_authenc_job(struct aead_request *req,
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
- struct caam_aead_alg, aead);
+ struct caam_aead_alg,
+ aead.base);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
@@ -1881,7 +1885,7 @@ static int skcipher_decrypt(struct skcipher_request *req)
static struct caam_skcipher_alg driver_algs[] = {
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-caam",
@@ -1894,10 +1898,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-3des-caam",
@@ -1910,10 +1917,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-caam",
@@ -1926,10 +1936,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-caam",
@@ -1943,11 +1956,14 @@ static struct caam_skcipher_alg driver_algs[] = {
.ivsize = AES_BLOCK_SIZE,
.chunksize = AES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-caam",
@@ -1963,6 +1979,9 @@ static struct caam_skcipher_alg driver_algs[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.chunksize = AES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -1970,7 +1989,7 @@ static struct caam_skcipher_alg driver_algs[] = {
},
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-caam",
@@ -1984,10 +2003,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-caam",
@@ -1999,10 +2021,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-caam",
@@ -2014,10 +2039,13 @@ static struct caam_skcipher_alg driver_algs[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
},
{
- .skcipher = {
+ .skcipher.base = {
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-caam",
@@ -2029,13 +2057,16 @@ static struct caam_skcipher_alg driver_algs[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
},
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
},
};
static struct caam_aead_alg driver_aeads[] = {
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "rfc4106(gcm(aes))",
.cra_driver_name = "rfc4106-gcm-aes-caam",
@@ -2048,13 +2079,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
.nodkp = true,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "rfc4543(gcm(aes))",
.cra_driver_name = "rfc4543-gcm-aes-caam",
@@ -2067,6 +2101,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = GCM_RFC4543_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
.nodkp = true,
@@ -2074,7 +2111,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
/* Galois Counter Mode */
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-caam",
@@ -2087,6 +2124,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
.nodkp = true,
@@ -2094,7 +2134,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
/* single-pass ipsec_esp descriptor */
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),"
"ecb(cipher_null))",
@@ -2109,13 +2149,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),"
"ecb(cipher_null))",
@@ -2130,13 +2173,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),"
"ecb(cipher_null))",
@@ -2151,13 +2197,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),"
"ecb(cipher_null))",
@@ -2172,13 +2221,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),"
"ecb(cipher_null))",
@@ -2193,13 +2245,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),"
"ecb(cipher_null))",
@@ -2214,13 +2269,16 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = NULL_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(aes))",
.cra_driver_name = "authenc-hmac-md5-"
@@ -2234,6 +2292,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2241,7 +2302,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(md5),"
"cbc(aes)))",
@@ -2256,6 +2317,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2264,7 +2328,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha1-"
@@ -2278,6 +2342,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2285,7 +2352,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha1),"
"cbc(aes)))",
@@ -2300,6 +2367,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2308,7 +2378,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha224-"
@@ -2322,6 +2392,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2329,7 +2402,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha224),"
"cbc(aes)))",
@@ -2344,6 +2417,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2352,7 +2428,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha256-"
@@ -2366,6 +2442,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2373,7 +2452,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(aes)))",
@@ -2388,6 +2467,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2396,7 +2478,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha384-"
@@ -2410,6 +2492,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2417,7 +2502,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha384),"
"cbc(aes)))",
@@ -2432,6 +2517,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2440,7 +2528,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(aes))",
.cra_driver_name = "authenc-hmac-sha512-"
@@ -2454,6 +2542,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2461,7 +2552,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha512),"
"cbc(aes)))",
@@ -2476,6 +2567,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2484,7 +2578,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
.cra_driver_name = "authenc-hmac-md5-"
@@ -2498,6 +2592,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2505,7 +2602,7 @@ static struct caam_aead_alg driver_aeads[] = {
}
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(md5),"
"cbc(des3_ede)))",
@@ -2520,6 +2617,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2528,7 +2628,7 @@ static struct caam_aead_alg driver_aeads[] = {
}
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),"
"cbc(des3_ede))",
@@ -2543,6 +2643,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2550,7 +2653,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha1),"
"cbc(des3_ede)))",
@@ -2566,6 +2669,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2574,7 +2680,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),"
"cbc(des3_ede))",
@@ -2589,6 +2695,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2596,7 +2705,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha224),"
"cbc(des3_ede)))",
@@ -2612,6 +2721,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2620,7 +2732,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),"
"cbc(des3_ede))",
@@ -2635,6 +2747,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2642,7 +2757,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des3_ede)))",
@@ -2658,6 +2773,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2666,7 +2784,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),"
"cbc(des3_ede))",
@@ -2681,6 +2799,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2688,7 +2809,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha384),"
"cbc(des3_ede)))",
@@ -2704,6 +2825,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2712,7 +2836,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),"
"cbc(des3_ede))",
@@ -2727,6 +2851,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2734,7 +2861,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha512),"
"cbc(des3_ede)))",
@@ -2750,6 +2877,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2758,7 +2888,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),cbc(des))",
.cra_driver_name = "authenc-hmac-md5-"
@@ -2772,6 +2902,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2779,7 +2912,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(md5),"
"cbc(des)))",
@@ -2794,6 +2927,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2802,7 +2938,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),cbc(des))",
.cra_driver_name = "authenc-hmac-sha1-"
@@ -2816,6 +2952,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2823,7 +2962,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha1),"
"cbc(des)))",
@@ -2838,6 +2977,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2846,7 +2988,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),cbc(des))",
.cra_driver_name = "authenc-hmac-sha224-"
@@ -2860,6 +3002,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2867,7 +3012,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha224),"
"cbc(des)))",
@@ -2882,6 +3027,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2890,7 +3038,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),cbc(des))",
.cra_driver_name = "authenc-hmac-sha256-"
@@ -2904,6 +3052,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2911,7 +3062,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha256),"
"cbc(des)))",
@@ -2926,6 +3077,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2934,7 +3088,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),cbc(des))",
.cra_driver_name = "authenc-hmac-sha384-"
@@ -2948,6 +3102,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2955,7 +3112,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha384),"
"cbc(des)))",
@@ -2970,6 +3127,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2978,7 +3138,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),cbc(des))",
.cra_driver_name = "authenc-hmac-sha512-"
@@ -2992,6 +3152,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2999,7 +3162,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "echainiv(authenc(hmac(sha512),"
"cbc(des)))",
@@ -3014,6 +3177,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -3022,7 +3188,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(md5),"
"rfc3686(ctr(aes)))",
@@ -3037,6 +3203,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3046,7 +3215,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc("
"hmac(md5),rfc3686(ctr(aes))))",
@@ -3061,6 +3230,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3071,7 +3243,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha1),"
"rfc3686(ctr(aes)))",
@@ -3086,6 +3258,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3095,7 +3270,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc("
"hmac(sha1),rfc3686(ctr(aes))))",
@@ -3110,6 +3285,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3120,7 +3298,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha224),"
"rfc3686(ctr(aes)))",
@@ -3135,6 +3313,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3144,7 +3325,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc("
"hmac(sha224),rfc3686(ctr(aes))))",
@@ -3159,6 +3340,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3169,7 +3353,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha256),"
"rfc3686(ctr(aes)))",
@@ -3184,6 +3368,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3193,7 +3380,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc(hmac(sha256),"
"rfc3686(ctr(aes))))",
@@ -3208,6 +3395,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3218,7 +3408,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha384),"
"rfc3686(ctr(aes)))",
@@ -3233,6 +3423,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3242,7 +3435,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc(hmac(sha384),"
"rfc3686(ctr(aes))))",
@@ -3257,6 +3450,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3267,7 +3463,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "authenc(hmac(sha512),"
"rfc3686(ctr(aes)))",
@@ -3282,6 +3478,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3291,7 +3490,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "seqiv(authenc(hmac(sha512),"
"rfc3686(ctr(aes))))",
@@ -3306,6 +3505,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES |
OP_ALG_AAI_CTR_MOD128,
@@ -3316,7 +3518,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "rfc7539(chacha20,poly1305)",
.cra_driver_name = "rfc7539-chacha20-poly1305-"
@@ -3330,6 +3532,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = CHACHAPOLY_IV_SIZE,
.maxauthsize = POLY1305_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
OP_ALG_AAI_AEAD,
@@ -3339,7 +3544,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
},
{
- .aead = {
+ .aead.base = {
.base = {
.cra_name = "rfc7539esp(chacha20,poly1305)",
.cra_driver_name = "rfc7539esp-chacha20-"
@@ -3353,6 +3558,9 @@ static struct caam_aead_alg driver_aeads[] = {
.ivsize = 8,
.maxauthsize = POLY1305_DIGEST_SIZE,
},
+ .aead.op = {
+ .do_one_request = aead_do_one_req,
+ },
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
OP_ALG_AAI_AEAD,
@@ -3412,13 +3620,11 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
{
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct caam_skcipher_alg *caam_alg =
- container_of(alg, typeof(*caam_alg), skcipher);
+ container_of(alg, typeof(*caam_alg), skcipher.base);
struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
int ret = 0;
- ctx->enginectx.op.do_one_request = skcipher_do_one_req;
-
if (alg_aai == OP_ALG_AAI_XTS) {
const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
struct crypto_skcipher *fallback;
@@ -3449,13 +3655,11 @@ static int caam_aead_init(struct crypto_aead *tfm)
{
struct aead_alg *alg = crypto_aead_alg(tfm);
struct caam_aead_alg *caam_alg =
- container_of(alg, struct caam_aead_alg, aead);
+ container_of(alg, struct caam_aead_alg, aead.base);
struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
- ctx->enginectx.op.do_one_request = aead_do_one_req;
-
return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
@@ -3490,20 +3694,20 @@ void caam_algapi_exit(void)
struct caam_aead_alg *t_alg = driver_aeads + i;
if (t_alg->registered)
- crypto_unregister_aead(&t_alg->aead);
+ crypto_engine_unregister_aead(&t_alg->aead);
}
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
struct caam_skcipher_alg *t_alg = driver_algs + i;
if (t_alg->registered)
- crypto_unregister_skcipher(&t_alg->skcipher);
+ crypto_engine_unregister_skcipher(&t_alg->skcipher);
}
}
static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct skcipher_alg *alg = &t_alg->skcipher;
+ struct skcipher_alg *alg = &t_alg->skcipher.base;
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
@@ -3517,7 +3721,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
{
- struct aead_alg *alg = &t_alg->aead;
+ struct aead_alg *alg = &t_alg->aead.base;
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
@@ -3607,10 +3811,10 @@ int caam_algapi_init(struct device *ctrldev)
caam_skcipher_alg_init(t_alg);
- err = crypto_register_skcipher(&t_alg->skcipher);
+ err = crypto_engine_register_skcipher(&t_alg->skcipher);
if (err) {
pr_warn("%s alg registration failed\n",
- t_alg->skcipher.base.cra_driver_name);
+ t_alg->skcipher.base.base.cra_driver_name);
continue;
}
@@ -3654,15 +3858,15 @@ int caam_algapi_init(struct device *ctrldev)
* if MD or MD size is not supported by device.
*/
if (is_mdha(c2_alg_sel) &&
- (!md_inst || t_alg->aead.maxauthsize > md_limit))
+ (!md_inst || t_alg->aead.base.maxauthsize > md_limit))
continue;
caam_aead_alg_init(t_alg);
- err = crypto_register_aead(&t_alg->aead);
+ err = crypto_engine_register_aead(&t_alg->aead);
if (err) {
pr_warn("%s alg registration failed\n",
- t_alg->aead.base.cra_driver_name);
+ t_alg->aead.base.base.cra_driver_name);
continue;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 80deb003f0a5..290c8500c247 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -65,9 +65,13 @@
#include "sg_sw_sec4.h"
#include "key_gen.h"
#include "caamhash_desc.h"
-#include <crypto/engine.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/hash.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#define CAAM_CRA_PRIORITY 3000
@@ -89,7 +93,6 @@ static struct list_head hash_list;
/* ahash per-session context */
struct caam_hash_ctx {
- struct crypto_engine_ctx enginectx;
u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
@@ -368,10 +371,8 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
int ret;
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
- if (!desc) {
- dev_err(jrdev, "unable to allocate key input memory\n");
+ if (!desc)
return -ENOMEM;
- }
init_job_desc(desc, 0);
@@ -702,19 +703,14 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
int sg_num, u32 *sh_desc,
dma_addr_t sh_desc_dma)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct caam_hash_state *state = ahash_request_ctx_dma(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc;
- unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
- edesc = kzalloc(sizeof(*edesc) + sg_size, flags);
- if (!edesc) {
- dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
+ edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
+ if (!edesc)
return NULL;
- }
state->edesc = edesc;
@@ -1757,7 +1753,7 @@ static struct caam_hash_template driver_hash[] = {
struct caam_hash_alg {
struct list_head entry;
int alg_type;
- struct ahash_alg ahash_alg;
+ struct ahash_engine_alg ahash_alg;
};
static int caam_hash_cra_init(struct crypto_tfm *tfm)
@@ -1769,7 +1765,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
struct ahash_alg *alg =
container_of(halg, struct ahash_alg, halg);
struct caam_hash_alg *caam_hash =
- container_of(alg, struct caam_hash_alg, ahash_alg);
+ container_of(alg, struct caam_hash_alg, ahash_alg.base);
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
@@ -1860,8 +1856,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
sh_desc_digest) -
sh_desc_update_offset;
- ctx->enginectx.op.do_one_request = ahash_do_one_req;
-
crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
/*
@@ -1894,7 +1888,7 @@ void caam_algapi_hash_exit(void)
return;
list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
- crypto_unregister_ahash(&t_alg->ahash_alg);
+ crypto_engine_unregister_ahash(&t_alg->ahash_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
@@ -1909,13 +1903,11 @@ caam_hash_alloc(struct caam_hash_template *template,
struct crypto_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg) {
- pr_err("failed to allocate t_alg\n");
+ if (!t_alg)
return ERR_PTR(-ENOMEM);
- }
- t_alg->ahash_alg = template->template_ahash;
- halg = &t_alg->ahash_alg;
+ t_alg->ahash_alg.base = template->template_ahash;
+ halg = &t_alg->ahash_alg.base;
alg = &halg->halg.base;
if (keyed) {
@@ -1928,7 +1920,7 @@ caam_hash_alloc(struct caam_hash_template *template,
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
- t_alg->ahash_alg.setkey = NULL;
+ halg->setkey = NULL;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
@@ -1940,6 +1932,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
t_alg->alg_type = template->alg_type;
+ t_alg->ahash_alg.op.do_one_request = ahash_do_one_req;
return t_alg;
}
@@ -2001,10 +1994,10 @@ int caam_algapi_hash_init(struct device *ctrldev)
continue;
}
- err = crypto_register_ahash(&t_alg->ahash_alg);
+ err = crypto_engine_register_ahash(&t_alg->ahash_alg);
if (err) {
pr_warn("%s alg registration failed: %d\n",
- t_alg->ahash_alg.halg.base.cra_driver_name,
+ t_alg->ahash_alg.base.halg.base.cra_driver_name,
err);
kfree(t_alg);
} else
@@ -2021,10 +2014,10 @@ int caam_algapi_hash_init(struct device *ctrldev)
continue;
}
- err = crypto_register_ahash(&t_alg->ahash_alg);
+ err = crypto_engine_register_ahash(&t_alg->ahash_alg);
if (err) {
pr_warn("%s alg registration failed: %d\n",
- t_alg->ahash_alg.halg.base.cra_driver_name,
+ t_alg->ahash_alg.base.halg.base.cra_driver_name,
err);
kfree(t_alg);
} else
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 72afc249d42f..887a5f2fb927 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -16,8 +16,12 @@
#include "desc_constr.h"
#include "sg_sw_sec4.h"
#include "caampkc.h"
+#include <crypto/internal/engine.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
@@ -38,7 +42,7 @@ static u8 *zero_buffer;
static bool init_done;
struct caam_akcipher_alg {
- struct akcipher_alg akcipher;
+ struct akcipher_engine_alg akcipher;
bool registered;
};
@@ -225,7 +229,9 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
if (len && *buff)
break;
- sg_miter_next(&miter);
+ if (!sg_miter_next(&miter))
+ break;
+
buff = miter.addr;
len = miter.length;
@@ -1121,8 +1127,6 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
return -ENOMEM;
}
- ctx->enginectx.op.do_one_request = akcipher_do_one_req;
-
return 0;
}
@@ -1139,7 +1143,7 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
}
static struct caam_akcipher_alg caam_rsa = {
- .akcipher = {
+ .akcipher.base = {
.encrypt = caam_rsa_enc,
.decrypt = caam_rsa_dec,
.set_pub_key = caam_rsa_set_pub_key,
@@ -1155,7 +1159,10 @@ static struct caam_akcipher_alg caam_rsa = {
.cra_ctxsize = sizeof(struct caam_rsa_ctx) +
CRYPTO_DMA_PADDING,
},
- }
+ },
+ .akcipher.op = {
+ .do_one_request = akcipher_do_one_req,
+ },
};
/* Public Key Cryptography module initialization handler */
@@ -1193,12 +1200,12 @@ int caam_pkc_init(struct device *ctrldev)
if (!zero_buffer)
return -ENOMEM;
- err = crypto_register_akcipher(&caam_rsa.akcipher);
+ err = crypto_engine_register_akcipher(&caam_rsa.akcipher);
if (err) {
kfree(zero_buffer);
dev_warn(ctrldev, "%s alg registration failed\n",
- caam_rsa.akcipher.base.cra_driver_name);
+ caam_rsa.akcipher.base.base.cra_driver_name);
} else {
init_done = true;
caam_rsa.registered = true;
@@ -1214,7 +1221,7 @@ void caam_pkc_exit(void)
return;
if (caam_rsa.registered)
- crypto_unregister_akcipher(&caam_rsa.akcipher);
+ crypto_engine_unregister_akcipher(&caam_rsa.akcipher);
kfree(zero_buffer);
}
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index cc889a525e2f..96d03704c9be 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -12,7 +12,6 @@
#define _PKC_DESC_H_
#include "compat.h"
#include "pdb.h"
-#include <crypto/engine.h>
/**
* caam_priv_key_form - CAAM RSA private key representation
@@ -88,13 +87,11 @@ struct caam_rsa_key {
/**
* caam_rsa_ctx - per session context.
- * @enginectx : crypto engine context
* @key : RSA key in DMA zone
* @dev : device structure
* @padding_dma : dma address of padding, for adding it to the input
*/
struct caam_rsa_ctx {
- struct crypto_engine_ctx enginectx;
struct caam_rsa_key key;
struct device *dev;
dma_addr_t padding_dma;
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 68e73775392d..bdf367f3f679 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/sys_soc.h>
#include <linux/fsl/mc.h>
@@ -740,6 +741,109 @@ static int caam_ctrl_rng_init(struct device *dev)
return 0;
}
+/* Indicate if the internal state of the CAAM is lost during PM */
+static int caam_off_during_pm(void)
+{
+ bool not_off_during_pm = of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6qp") ||
+ of_machine_is_compatible("fsl,imx6dl");
+
+ return not_off_during_pm ? 0 : 1;
+}
+
+static void caam_state_save(struct device *dev)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ struct caam_ctl_state *state = &ctrlpriv->state;
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ u32 deco_inst, jr_inst;
+ int i;
+
+ state->mcr = rd_reg32(&ctrl->mcr);
+ state->scfgr = rd_reg32(&ctrl->scfgr);
+
+ deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+ CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
+ for (i = 0; i < deco_inst; i++) {
+ state->deco_mid[i].liodn_ms =
+ rd_reg32(&ctrl->deco_mid[i].liodn_ms);
+ state->deco_mid[i].liodn_ls =
+ rd_reg32(&ctrl->deco_mid[i].liodn_ls);
+ }
+
+ jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+ CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
+ for (i = 0; i < jr_inst; i++) {
+ state->jr_mid[i].liodn_ms =
+ rd_reg32(&ctrl->jr_mid[i].liodn_ms);
+ state->jr_mid[i].liodn_ls =
+ rd_reg32(&ctrl->jr_mid[i].liodn_ls);
+ }
+}
+
+static void caam_state_restore(const struct device *dev)
+{
+ const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ const struct caam_ctl_state *state = &ctrlpriv->state;
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ u32 deco_inst, jr_inst;
+ int i;
+
+ wr_reg32(&ctrl->mcr, state->mcr);
+ wr_reg32(&ctrl->scfgr, state->scfgr);
+
+ deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+ CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
+ for (i = 0; i < deco_inst; i++) {
+ wr_reg32(&ctrl->deco_mid[i].liodn_ms,
+ state->deco_mid[i].liodn_ms);
+ wr_reg32(&ctrl->deco_mid[i].liodn_ls,
+ state->deco_mid[i].liodn_ls);
+ }
+
+ jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+ CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
+ for (i = 0; i < jr_inst; i++) {
+ wr_reg32(&ctrl->jr_mid[i].liodn_ms,
+ state->jr_mid[i].liodn_ms);
+ wr_reg32(&ctrl->jr_mid[i].liodn_ls,
+ state->jr_mid[i].liodn_ls);
+ }
+
+ if (ctrlpriv->virt_en == 1)
+ clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
+ JRSTART_JR1_START | JRSTART_JR2_START |
+ JRSTART_JR3_START);
+}
+
+static int caam_ctrl_suspend(struct device *dev)
+{
+ const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en)
+ caam_state_save(dev);
+
+ return 0;
+}
+
+static int caam_ctrl_resume(struct device *dev)
+{
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) {
+ caam_state_restore(dev);
+
+ /* HW and rng will be reset so deinstantiation can be removed */
+ devm_remove_action(dev, devm_deinstantiate_rng, dev);
+ ret = caam_ctrl_rng_init(dev);
+ }
+
+ return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(caam_ctrl_pm_ops, caam_ctrl_suspend, caam_ctrl_resume);
+
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@@ -771,6 +875,8 @@ static int caam_probe(struct platform_device *pdev)
caam_imx = (bool)imx_soc_match;
+ ctrlpriv->caam_off_during_pm = caam_imx && caam_off_during_pm();
+
if (imx_soc_match) {
/*
* Until Layerscape and i.MX OP-TEE get in sync,
@@ -1033,6 +1139,7 @@ static struct platform_driver caam_driver = {
.driver = {
.name = "caam",
.of_match_table = caam_match,
+ .pm = pm_ptr(&caam_ctrl_pm_ops),
},
.probe = caam_probe,
};
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index b4f7bf77f487..e51320150872 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -4,7 +4,7 @@
* Private/internal definitions between modules
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019, 2023 NXP
*/
#ifndef INTERN_H
@@ -47,6 +47,16 @@ struct caam_jrentry_info {
u32 desc_size; /* Stored size for postprocessing, header derived */
};
+struct caam_jr_state {
+ dma_addr_t inpbusaddr;
+ dma_addr_t outbusaddr;
+};
+
+struct caam_jr_dequeue_params {
+ struct device *dev;
+ int enable_itr;
+};
+
/* Private sub-storage for a single JobR */
struct caam_drv_private_jr {
struct list_head list_node; /* Job Ring device list */
@@ -54,6 +64,7 @@ struct caam_drv_private_jr {
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
+ struct caam_jr_dequeue_params tasklet_params;
int irq; /* One per queue */
bool hwrng;
@@ -71,6 +82,15 @@ struct caam_drv_private_jr {
int tail; /* entinfo (s/w ring) tail index */
void *outring; /* Base of output ring, DMA-safe */
struct crypto_engine *engine;
+
+ struct caam_jr_state state; /* State of the JR during PM */
+};
+
+struct caam_ctl_state {
+ struct masterid deco_mid[16];
+ struct masterid jr_mid[4];
+ u32 mcr;
+ u32 scfgr;
};
/*
@@ -116,6 +136,9 @@ struct caam_drv_private {
struct dentry *ctl; /* controller dir */
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
#endif
+
+ int caam_off_during_pm; /* If the CAAM is reset after suspend */
+ struct caam_ctl_state state; /* State of the CTL during PM */
};
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 96dea5304d22..b1f1b393b98e 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -9,6 +9,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include "compat.h"
#include "ctrl.h"
@@ -117,6 +118,23 @@ static int caam_jr_flush(struct device *dev)
return caam_jr_stop_processing(dev, JRCR_RESET);
}
+/* The resume can be used after a park or a flush if CAAM has not been reset */
+static int caam_jr_restart_processing(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ u32 halt_status = rd_reg32(&jrp->rregs->jrintstatus) &
+ JRINT_ERR_HALT_MASK;
+
+ /* Check that the flush/park is completed */
+ if (halt_status != JRINT_ERR_HALT_COMPLETE)
+ return -1;
+
+ /* Resume processing of jobs */
+ clrsetbits_32(&jrp->rregs->jrintstatus, 0, JRINT_ERR_HALT_COMPLETE);
+
+ return 0;
+}
+
static int caam_reset_hw_jr(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
@@ -215,7 +233,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
* tasklet if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
- if (!irqstate)
+ if (!(irqstate & JRINT_JR_INT))
return IRQ_NONE;
/*
@@ -245,7 +263,8 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
static void caam_jr_dequeue(unsigned long devarg)
{
int hw_idx, sw_idx, i, head, tail;
- struct device *dev = (struct device *)devarg;
+ struct caam_jr_dequeue_params *params = (void *)devarg;
+ struct device *dev = params->dev;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@@ -319,8 +338,9 @@ static void caam_jr_dequeue(unsigned long devarg)
outring_used--;
}
- /* reenable / unmask IRQs */
- clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+ if (params->enable_itr)
+ /* reenable / unmask IRQs */
+ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
}
/**
@@ -445,8 +465,16 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
* Guarantee that the descriptor's DMA address has been written to
* the next slot in the ring before the write index is updated, since
* other cores may update this index independently.
+ *
+ * Under heavy DDR load, smp_wmb() or dma_wmb() fail to make the input
+ * ring be updated before the CAAM starts reading it. So, CAAM will
+ * process, again, an old descriptor address and will put it in the
+ * output ring. This will make caam_jr_dequeue() to fail, since this
+ * old descriptor is not in the software ring.
+ * To fix this, use wmb() which works on the full system instead of
+ * inner/outer shareable domains.
*/
- smp_wmb();
+ wmb();
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
@@ -470,6 +498,29 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
}
EXPORT_SYMBOL(caam_jr_enqueue);
+static void caam_jr_init_hw(struct device *dev, dma_addr_t inpbusaddr,
+ dma_addr_t outbusaddr)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+
+ wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
+ wr_reg64(&jrp->rregs->outring_base, outbusaddr);
+ wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
+ wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
+
+ /* Select interrupt coalescing parameters */
+ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
+ (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+ (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+}
+
+static void caam_jr_reset_index(struct caam_drv_private_jr *jrp)
+{
+ jrp->out_ring_read_index = 0;
+ jrp->head = 0;
+ jrp->tail = 0;
+}
+
/*
* Init JobR independent of platform property detection
*/
@@ -506,25 +557,16 @@ static int caam_jr_init(struct device *dev)
jrp->entinfo[i].desc_addr_dma = !0;
/* Setup rings */
- jrp->out_ring_read_index = 0;
- jrp->head = 0;
- jrp->tail = 0;
-
- wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
- wr_reg64(&jrp->rregs->outring_base, outbusaddr);
- wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
- wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
-
+ caam_jr_reset_index(jrp);
jrp->inpring_avail = JOBR_DEPTH;
+ caam_jr_init_hw(dev, inpbusaddr, outbusaddr);
spin_lock_init(&jrp->inplock);
- /* Select interrupt coalescing parameters */
- clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
- (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
- (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
-
- tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+ jrp->tasklet_params.dev = dev;
+ jrp->tasklet_params.enable_itr = 1;
+ tasklet_init(&jrp->irqtask, caam_jr_dequeue,
+ (unsigned long)&jrp->tasklet_params);
/* Connect job ring interrupt handler. */
error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
@@ -635,11 +677,134 @@ static int caam_jr_probe(struct platform_device *pdev)
atomic_set(&jrpriv->tfm_count, 0);
+ device_init_wakeup(&pdev->dev, 1);
+ device_set_wakeup_enable(&pdev->dev, false);
+
register_algs(jrpriv, jrdev->parent);
return 0;
}
+static void caam_jr_get_hw_state(struct device *dev)
+{
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+
+ jrp->state.inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+ jrp->state.outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+}
+
+static int caam_jr_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
+ struct caam_jr_dequeue_params suspend_params = {
+ .dev = dev,
+ .enable_itr = 0,
+ };
+
+ /* Remove the node from Physical JobR list maintained by driver */
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_del(&jrpriv->list_node);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ if (jrpriv->hwrng)
+ caam_rng_exit(dev->parent);
+
+ if (ctrlpriv->caam_off_during_pm) {
+ int err;
+
+ tasklet_disable(&jrpriv->irqtask);
+
+ /* mask itr to call flush */
+ clrsetbits_32(&jrpriv->rregs->rconfig_lo, 0, JRCFG_IMSK);
+
+ /* Invalid job in process */
+ err = caam_jr_flush(dev);
+ if (err) {
+ dev_err(dev, "Failed to flush\n");
+ return err;
+ }
+
+ /* Dequeing jobs flushed */
+ caam_jr_dequeue((unsigned long)&suspend_params);
+
+ /* Save state */
+ caam_jr_get_hw_state(dev);
+ } else if (device_may_wakeup(&pdev->dev)) {
+ enable_irq_wake(jrpriv->irq);
+ }
+
+ return 0;
+}
+
+static int caam_jr_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
+
+ if (ctrlpriv->caam_off_during_pm) {
+ u64 inp_addr;
+ int err;
+
+ /*
+ * Check if the CAAM has been resetted checking the address of
+ * the input ring
+ */
+ inp_addr = rd_reg64(&jrpriv->rregs->inpring_base);
+ if (inp_addr != 0) {
+ /* JR still has some configuration */
+ if (inp_addr == jrpriv->state.inpbusaddr) {
+ /* JR has not been resetted */
+ err = caam_jr_restart_processing(dev);
+ if (err) {
+ dev_err(dev,
+ "Restart processing failed\n");
+ return err;
+ }
+
+ tasklet_enable(&jrpriv->irqtask);
+
+ clrsetbits_32(&jrpriv->rregs->rconfig_lo,
+ JRCFG_IMSK, 0);
+
+ goto add_jr;
+ } else if (ctrlpriv->optee_en) {
+ /* JR has been used by OPTEE, reset it */
+ err = caam_reset_hw_jr(dev);
+ if (err) {
+ dev_err(dev, "Failed to reset JR\n");
+ return err;
+ }
+ } else {
+ /* No explanation, return error */
+ return -EIO;
+ }
+ }
+
+ caam_jr_reset_index(jrpriv);
+ caam_jr_init_hw(dev, jrpriv->state.inpbusaddr,
+ jrpriv->state.outbusaddr);
+
+ tasklet_enable(&jrpriv->irqtask);
+ } else if (device_may_wakeup(&pdev->dev)) {
+ disable_irq_wake(jrpriv->irq);
+ }
+
+add_jr:
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
+ spin_unlock(&driver_data.jr_alloc_lock);
+
+ if (jrpriv->hwrng)
+ jrpriv->hwrng = !caam_rng_init(dev->parent);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(caam_jr_pm_ops, caam_jr_suspend, caam_jr_resume);
+
static const struct of_device_id caam_jr_match[] = {
{
.compatible = "fsl,sec-v4.0-job-ring",
@@ -655,6 +820,7 @@ static struct platform_driver caam_jr_driver = {
.driver = {
.name = "caam_jr",
.of_match_table = caam_jr_match,
+ .pm = pm_ptr(&caam_jr_pm_ops),
},
.probe = caam_jr_probe,
.remove = caam_jr_remove,
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 2ad2c1035856..46a083849a8e 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <soc/fsl/qman.h>
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 189e74c21f0c..873df9de9890 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -459,12 +459,6 @@ struct masterid {
u32 liodn_ls; /* LIODN for non-sequence and seq access */
};
-/* Partition ID for DMA configuration */
-struct partid {
- u32 rsvd1;
- u32 pidr; /* partition ID, DECO */
-};
-
/* RNGB test mode (replicated twice in some configurations) */
/* Padded out to 0x100 */
struct rngtst {
@@ -590,8 +584,7 @@ struct caam_ctrl {
u32 deco_rsr; /* DECORSR - Deco Request Source */
u32 rsvd11;
u32 deco_rq; /* DECORR - DECO Request */
- struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
- u32 rsvd5[22];
+ struct masterid deco_mid[16]; /* DECOxLIODNR - 1 per DECO */
/* DECO Availability/Reset Section 120-3ff */
u32 deco_avail; /* DAR - DECO availability */
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index f6196495e862..aa0ba2d17e1e 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -11,7 +11,8 @@ ccp-$(CONFIG_PCI) += sp-pci.o
ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
sev-dev.o \
tee-dev.o \
- platform-access.o
+ platform-access.o \
+ dbc.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
new file mode 100644
index 000000000000..839ea14b9a85
--- /dev/null
+++ b/drivers/crypto/ccp/dbc.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Processor Dynamic Boost Control interface
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include "dbc.h"
+
+struct error_map {
+ u32 psp;
+ int ret;
+};
+
+#define DBC_ERROR_ACCESS_DENIED 0x0001
+#define DBC_ERROR_EXCESS_DATA 0x0004
+#define DBC_ERROR_BAD_PARAMETERS 0x0006
+#define DBC_ERROR_BAD_STATE 0x0007
+#define DBC_ERROR_NOT_IMPLEMENTED 0x0009
+#define DBC_ERROR_BUSY 0x000D
+#define DBC_ERROR_MESSAGE_FAILURE 0x0307
+#define DBC_ERROR_OVERFLOW 0x300F
+#define DBC_ERROR_SIGNATURE_INVALID 0x3072
+
+static struct error_map error_codes[] = {
+ {DBC_ERROR_ACCESS_DENIED, -EACCES},
+ {DBC_ERROR_EXCESS_DATA, -E2BIG},
+ {DBC_ERROR_BAD_PARAMETERS, -EINVAL},
+ {DBC_ERROR_BAD_STATE, -EAGAIN},
+ {DBC_ERROR_MESSAGE_FAILURE, -ENOENT},
+ {DBC_ERROR_NOT_IMPLEMENTED, -ENOENT},
+ {DBC_ERROR_BUSY, -EBUSY},
+ {DBC_ERROR_OVERFLOW, -ENFILE},
+ {DBC_ERROR_SIGNATURE_INVALID, -EPERM},
+ {0x0, 0x0},
+};
+
+static int send_dbc_cmd(struct psp_dbc_device *dbc_dev,
+ enum psp_platform_access_msg msg)
+{
+ int ret;
+
+ dbc_dev->mbox->req.header.status = 0;
+ ret = psp_send_platform_access_msg(msg, (struct psp_request *)dbc_dev->mbox);
+ if (ret == -EIO) {
+ int i;
+
+ dev_dbg(dbc_dev->dev,
+ "msg 0x%x failed with PSP error: 0x%x\n",
+ msg, dbc_dev->mbox->req.header.status);
+
+ for (i = 0; error_codes[i].psp; i++) {
+ if (dbc_dev->mbox->req.header.status == error_codes[i].psp)
+ return error_codes[i].ret;
+ }
+ }
+
+ return ret;
+}
+
+static int send_dbc_nonce(struct psp_dbc_device *dbc_dev)
+{
+ int ret;
+
+ dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_nonce);
+ ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE);
+ if (ret == -EAGAIN) {
+ dev_dbg(dbc_dev->dev, "retrying get nonce\n");
+ ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE);
+ }
+
+ return ret;
+}
+
+static int send_dbc_parameter(struct psp_dbc_device *dbc_dev)
+{
+ dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_param);
+
+ switch (dbc_dev->mbox->dbc_param.user.msg_index) {
+ case PARAM_SET_FMAX_CAP:
+ case PARAM_SET_PWR_CAP:
+ case PARAM_SET_GFX_MODE:
+ return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_PARAMETER);
+ case PARAM_GET_FMAX_CAP:
+ case PARAM_GET_PWR_CAP:
+ case PARAM_GET_CURR_TEMP:
+ case PARAM_GET_FMAX_MAX:
+ case PARAM_GET_FMAX_MIN:
+ case PARAM_GET_SOC_PWR_MAX:
+ case PARAM_GET_SOC_PWR_MIN:
+ case PARAM_GET_SOC_PWR_CUR:
+ case PARAM_GET_GFX_MODE:
+ return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_PARAMETER);
+ }
+
+ return -EINVAL;
+}
+
+void dbc_dev_destroy(struct psp_device *psp)
+{
+ struct psp_dbc_device *dbc_dev = psp->dbc_data;
+
+ if (!dbc_dev)
+ return;
+
+ misc_deregister(&dbc_dev->char_dev);
+ mutex_destroy(&dbc_dev->ioctl_mutex);
+ psp->dbc_data = NULL;
+}
+
+static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ void __user *argp = (void __user *)arg;
+ struct psp_dbc_device *dbc_dev;
+ int ret;
+
+ if (!psp_master || !psp_master->dbc_data)
+ return -ENODEV;
+ dbc_dev = psp_master->dbc_data;
+
+ mutex_lock(&dbc_dev->ioctl_mutex);
+
+ switch (cmd) {
+ case DBCIOCNONCE:
+ if (copy_from_user(&dbc_dev->mbox->dbc_nonce.user, argp,
+ sizeof(struct dbc_user_nonce))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ ret = send_dbc_nonce(dbc_dev);
+ if (ret)
+ goto unlock;
+
+ if (copy_to_user(argp, &dbc_dev->mbox->dbc_nonce.user,
+ sizeof(struct dbc_user_nonce))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+ break;
+ case DBCIOCUID:
+ dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_set_uid);
+ if (copy_from_user(&dbc_dev->mbox->dbc_set_uid.user, argp,
+ sizeof(struct dbc_user_setuid))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
+ if (ret)
+ goto unlock;
+
+ if (copy_to_user(argp, &dbc_dev->mbox->dbc_set_uid.user,
+ sizeof(struct dbc_user_setuid))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+ break;
+ case DBCIOCPARAM:
+ if (copy_from_user(&dbc_dev->mbox->dbc_param.user, argp,
+ sizeof(struct dbc_user_param))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ ret = send_dbc_parameter(dbc_dev);
+ if (ret)
+ goto unlock;
+
+ if (copy_to_user(argp, &dbc_dev->mbox->dbc_param.user,
+ sizeof(struct dbc_user_param))) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+
+ }
+unlock:
+ mutex_unlock(&dbc_dev->ioctl_mutex);
+
+ return ret;
+}
+
+static const struct file_operations dbc_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = dbc_ioctl,
+};
+
+int dbc_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct psp_dbc_device *dbc_dev;
+ int ret;
+
+ if (!PSP_FEATURE(psp, DBC))
+ return 0;
+
+ dbc_dev = devm_kzalloc(dev, sizeof(*dbc_dev), GFP_KERNEL);
+ if (!dbc_dev)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(sizeof(union dbc_buffer) > PAGE_SIZE);
+ dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ if (!dbc_dev->mbox) {
+ ret = -ENOMEM;
+ goto cleanup_dev;
+ }
+
+ psp->dbc_data = dbc_dev;
+ dbc_dev->dev = dev;
+
+ ret = send_dbc_nonce(dbc_dev);
+ if (ret == -EACCES) {
+ dev_dbg(dbc_dev->dev,
+ "dynamic boost control was previously authenticated\n");
+ ret = 0;
+ }
+ dev_dbg(dbc_dev->dev, "dynamic boost control is %savailable\n",
+ ret ? "un" : "");
+ if (ret) {
+ ret = 0;
+ goto cleanup_mbox;
+ }
+
+ dbc_dev->char_dev.minor = MISC_DYNAMIC_MINOR;
+ dbc_dev->char_dev.name = "dbc";
+ dbc_dev->char_dev.fops = &dbc_fops;
+ dbc_dev->char_dev.mode = 0600;
+ ret = misc_register(&dbc_dev->char_dev);
+ if (ret)
+ goto cleanup_mbox;
+
+ mutex_init(&dbc_dev->ioctl_mutex);
+
+ return 0;
+
+cleanup_mbox:
+ devm_free_pages(dev, (unsigned long)dbc_dev->mbox);
+
+cleanup_dev:
+ psp->dbc_data = NULL;
+ devm_kfree(dev, dbc_dev);
+
+ return ret;
+}
diff --git a/drivers/crypto/ccp/dbc.h b/drivers/crypto/ccp/dbc.h
new file mode 100644
index 000000000000..e963099ca38e
--- /dev/null
+++ b/drivers/crypto/ccp/dbc.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Dynamic Boost Control support
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef __DBC_H__
+#define __DBC_H__
+
+#include <uapi/linux/psp-dbc.h>
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/psp-platform-access.h>
+
+#include "psp-dev.h"
+
+struct psp_dbc_device {
+ struct device *dev;
+
+ union dbc_buffer *mbox;
+
+ struct mutex ioctl_mutex;
+
+ struct miscdevice char_dev;
+};
+
+struct dbc_nonce {
+ struct psp_req_buffer_hdr header;
+ struct dbc_user_nonce user;
+} __packed;
+
+struct dbc_set_uid {
+ struct psp_req_buffer_hdr header;
+ struct dbc_user_setuid user;
+} __packed;
+
+struct dbc_param {
+ struct psp_req_buffer_hdr header;
+ struct dbc_user_param user;
+} __packed;
+
+union dbc_buffer {
+ struct psp_request req;
+ struct dbc_nonce dbc_nonce;
+ struct dbc_set_uid dbc_set_uid;
+ struct dbc_param dbc_param;
+};
+
+void dbc_dev_destroy(struct psp_device *psp);
+int dbc_dev_init(struct psp_device *psp);
+
+#endif /* __DBC_H */
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index e3d6955d3265..d42d7bc62352 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -15,6 +15,7 @@
#include "sev-dev.h"
#include "tee-dev.h"
#include "platform-access.h"
+#include "dbc.h"
struct psp_device *psp_master;
@@ -112,6 +113,12 @@ static void psp_init_platform_access(struct psp_device *psp)
dev_warn(psp->dev, "platform access init failed: %d\n", ret);
return;
}
+
+ /* dbc must come after platform access as it tests the feature */
+ ret = dbc_dev_init(psp);
+ if (ret)
+ dev_warn(psp->dev, "failed to init dynamic boost control: %d\n",
+ ret);
}
static int psp_init(struct psp_device *psp)
@@ -173,13 +180,14 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
+ /* master device must be set for platform access */
+ if (psp->sp->set_psp_master_device)
+ psp->sp->set_psp_master_device(psp->sp);
+
ret = psp_init(psp);
if (ret)
goto e_irq;
- if (sp->set_psp_master_device)
- sp->set_psp_master_device(sp);
-
/* Enable interrupt */
iowrite32(-1, psp->io_regs + psp->vdata->inten_reg);
@@ -188,6 +196,9 @@ int psp_dev_init(struct sp_device *sp)
return 0;
e_irq:
+ if (sp->clear_psp_master_device)
+ sp->clear_psp_master_device(sp);
+
sp_free_psp_irq(psp->sp, psp);
e_err:
sp->psp_data = NULL;
@@ -213,6 +224,8 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
+ dbc_dev_destroy(psp);
+
platform_access_dev_destroy(psp);
sp_free_psp_irq(sp, psp);
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index 505e4bdeaca8..8a4de69399c5 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -40,6 +40,7 @@ struct psp_device {
void *sev_data;
void *tee_data;
void *platform_access_data;
+ void *dbc_data;
unsigned int capability;
};
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 1253a0217985..2329ad524b49 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -28,6 +28,10 @@
#define CACHE_NONE 0x00
#define CACHE_WB_NO_ALLOC 0xb7
+#define PLATFORM_FEATURE_DBC 0x1
+
+#define PSP_FEATURE(psp, feat) (psp->vdata && psp->vdata->platform_features & PLATFORM_FEATURE_##feat)
+
/* Structure to hold CCP device data */
struct ccp_device;
struct ccp_vdata {
@@ -51,6 +55,7 @@ struct tee_vdata {
const unsigned int cmdbuff_addr_hi_reg;
const unsigned int ring_wptr_reg;
const unsigned int ring_rptr_reg;
+ const unsigned int info_reg;
};
struct platform_access_vdata {
@@ -69,6 +74,8 @@ struct psp_vdata {
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;
+ const unsigned int bootloader_info_reg;
+ const unsigned int platform_features;
};
/* Structure to hold SP device data */
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index b603ad9b8341..b6ab56abeb68 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -8,6 +8,7 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -24,6 +25,12 @@
#include "ccp-dev.h"
#include "psp-dev.h"
+/* used for version string AA.BB.CC.DD */
+#define AA GENMASK(31, 24)
+#define BB GENMASK(23, 16)
+#define CC GENMASK(15, 8)
+#define DD GENMASK(7, 0)
+
#define MSIX_VECTORS 2
struct sp_pci {
@@ -32,7 +39,7 @@ struct sp_pci {
};
static struct sp_device *sp_dev_master;
-#define attribute_show(name, def) \
+#define security_attribute_show(name, def) \
static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
char *buf) \
{ \
@@ -42,24 +49,24 @@ static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0); \
}
-attribute_show(fused_part, FUSED_PART)
+security_attribute_show(fused_part, FUSED_PART)
static DEVICE_ATTR_RO(fused_part);
-attribute_show(debug_lock_on, DEBUG_LOCK_ON)
+security_attribute_show(debug_lock_on, DEBUG_LOCK_ON)
static DEVICE_ATTR_RO(debug_lock_on);
-attribute_show(tsme_status, TSME_STATUS)
+security_attribute_show(tsme_status, TSME_STATUS)
static DEVICE_ATTR_RO(tsme_status);
-attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS)
+security_attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS)
static DEVICE_ATTR_RO(anti_rollback_status);
-attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED)
+security_attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED)
static DEVICE_ATTR_RO(rpmc_production_enabled);
-attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE)
+security_attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE)
static DEVICE_ATTR_RO(rpmc_spirom_available);
-attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE)
+security_attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE)
static DEVICE_ATTR_RO(hsp_tpm_available);
-attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED)
+security_attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED)
static DEVICE_ATTR_RO(rom_armor_enforced);
-static struct attribute *psp_attrs[] = {
+static struct attribute *psp_security_attrs[] = {
&dev_attr_fused_part.attr,
&dev_attr_debug_lock_on.attr,
&dev_attr_tsme_status.attr,
@@ -83,13 +90,70 @@ static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *a
return 0;
}
-static struct attribute_group psp_attr_group = {
- .attrs = psp_attrs,
+static struct attribute_group psp_security_attr_group = {
+ .attrs = psp_security_attrs,
.is_visible = psp_security_is_visible,
};
+#define version_attribute_show(name, _offset) \
+static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sp_device *sp = dev_get_drvdata(d); \
+ struct psp_device *psp = sp->psp_data; \
+ unsigned int val = ioread32(psp->io_regs + _offset); \
+ return sysfs_emit(buf, "%02lx.%02lx.%02lx.%02lx\n", \
+ FIELD_GET(AA, val), \
+ FIELD_GET(BB, val), \
+ FIELD_GET(CC, val), \
+ FIELD_GET(DD, val)); \
+}
+
+version_attribute_show(bootloader_version, psp->vdata->bootloader_info_reg)
+static DEVICE_ATTR_RO(bootloader_version);
+version_attribute_show(tee_version, psp->vdata->tee->info_reg)
+static DEVICE_ATTR_RO(tee_version);
+
+static struct attribute *psp_firmware_attrs[] = {
+ &dev_attr_bootloader_version.attr,
+ &dev_attr_tee_version.attr,
+ NULL,
+};
+
+static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sp_device *sp = dev_get_drvdata(dev);
+ struct psp_device *psp = sp->psp_data;
+ unsigned int val = 0xffffffff;
+
+ if (!psp)
+ return 0;
+
+ if (attr == &dev_attr_bootloader_version.attr &&
+ psp->vdata->bootloader_info_reg)
+ val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg);
+
+ if (attr == &dev_attr_tee_version.attr &&
+ psp->capability & PSP_CAPABILITY_TEE &&
+ psp->vdata->tee->info_reg)
+ val = ioread32(psp->io_regs + psp->vdata->tee->info_reg);
+
+ /* If platform disallows accessing this register it will be all f's */
+ if (val != 0xffffffff)
+ return 0444;
+
+ return 0;
+}
+
+static struct attribute_group psp_firmware_attr_group = {
+ .attrs = psp_firmware_attrs,
+ .is_visible = psp_firmware_is_visible,
+};
+
static const struct attribute_group *psp_groups[] = {
- &psp_attr_group,
+ &psp_security_attr_group,
+ &psp_firmware_attr_group,
NULL,
};
@@ -359,6 +423,7 @@ static const struct tee_vdata teev1 = {
.cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */
.ring_wptr_reg = 0x10550, /* C2PMSG_20 */
.ring_rptr_reg = 0x10554, /* C2PMSG_21 */
+ .info_reg = 0x109e8, /* C2PMSG_58 */
};
static const struct tee_vdata teev2 = {
@@ -384,6 +449,7 @@ static const struct platform_access_vdata pa_v2 = {
static const struct psp_vdata pspv1 = {
.sev = &sevv1,
+ .bootloader_info_reg = 0x105ec, /* C2PMSG_59 */
.feature_reg = 0x105fc, /* C2PMSG_63 */
.inten_reg = 0x10610, /* P2CMSG_INTEN */
.intsts_reg = 0x10614, /* P2CMSG_INTSTS */
@@ -391,6 +457,7 @@ static const struct psp_vdata pspv1 = {
static const struct psp_vdata pspv2 = {
.sev = &sevv2,
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
@@ -399,14 +466,17 @@ static const struct psp_vdata pspv2 = {
static const struct psp_vdata pspv3 = {
.tee = &teev1,
.platform_access = &pa_v1,
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
+ .platform_features = PLATFORM_FEATURE_DBC,
};
static const struct psp_vdata pspv4 = {
.sev = &sevv2,
.tee = &teev1,
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index c57f929805d5..0f0694037dd7 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -14,7 +14,6 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include "cc_driver.h"
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 0eade4fa6695..16298ae4a00b 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -2216,7 +2216,8 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
memcpy(hmacctx->ipad, key, keylen);
}
memset(hmacctx->ipad + keylen, 0, bs - keylen);
- memcpy(hmacctx->opad, hmacctx->ipad, bs);
+ unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs,
+ "fortified memcpy causes -Wrestrict warning");
for (i = 0; i < bs / sizeof(int); i++) {
*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index f7c8bb95a71b..5e9d568131fe 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -133,7 +133,6 @@ int start_crypto(void);
int stop_crypto(void);
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl);
-int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 7f88ddb08631..1d693b8436e6 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -344,7 +344,6 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
struct cipher_wr_param *wrparam,
unsigned short qid);
-int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
struct hash_wr_param *param);
int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index cbd8ca6e52ee..5d60a4bcb511 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -15,7 +15,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <crypto/internal/rng.h>
@@ -277,7 +277,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
if (!rng)
return -ENOMEM;
- rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev);
+ rng->type = (uintptr_t)of_device_get_match_data(&pdev->dev);
mutex_init(&rng->lock);
diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c
index 14d0d83d388d..49dce9e0a834 100644
--- a/drivers/crypto/gemini/sl3516-ce-cipher.c
+++ b/drivers/crypto/gemini/sl3516-ce-cipher.c
@@ -8,13 +8,17 @@
* ECB mode.
*/
-#include <linux/crypto.h>
+#include <crypto/engine.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/pm_runtime.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "sl3516-ce.h"
/* sl3516_ce_need_fallback - check if a request can be handled by the CE */
@@ -105,7 +109,7 @@ static int sl3516_ce_cipher_fallback(struct skcipher_request *areq)
struct sl3516_ce_alg_template *algt;
int err;
- algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
algt->stat_fb++;
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
@@ -136,7 +140,7 @@ static int sl3516_ce_cipher(struct skcipher_request *areq)
int err = 0;
int i;
- algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
@@ -258,7 +262,7 @@ theend:
return err;
}
-static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
+int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
{
int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
@@ -318,7 +322,7 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx));
- algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+ algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
op->ce = algt->ce;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -335,10 +339,6 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(&sktfm->base),
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
- op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
-
err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
goto error_pm;
diff --git a/drivers/crypto/gemini/sl3516-ce-core.c b/drivers/crypto/gemini/sl3516-ce-core.c
index b7524b649068..0f43c6e39bb9 100644
--- a/drivers/crypto/gemini/sl3516-ce-core.c
+++ b/drivers/crypto/gemini/sl3516-ce-core.c
@@ -6,22 +6,24 @@
*
* Core file which registers crypto algorithms supported by the CryptoEngine
*/
+
+#include <crypto/engine.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/debugfs.h>
#include <linux/dev_printk.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <crypto/internal/rng.h>
-#include <crypto/internal/skcipher.h>
#include "sl3516-ce.h"
@@ -217,7 +219,7 @@ static struct sl3516_ce_alg_template ce_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.mode = ECB_AES,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sl3516",
@@ -236,11 +238,13 @@ static struct sl3516_ce_alg_template ce_algs[] = {
.setkey = sl3516_ce_aes_setkey,
.encrypt = sl3516_ce_skencrypt,
.decrypt = sl3516_ce_skdecrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = sl3516_ce_handle_cipher_request,
+ },
},
};
-#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
{
struct sl3516_ce_dev *ce = seq->private;
@@ -264,8 +268,8 @@ static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- ce_algs[i].alg.skcipher.base.cra_driver_name,
- ce_algs[i].alg.skcipher.base.cra_name,
+ ce_algs[i].alg.skcipher.base.base.cra_driver_name,
+ ce_algs[i].alg.skcipher.base.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
break;
}
@@ -274,7 +278,6 @@ static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs);
-#endif
static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
{
@@ -286,11 +289,11 @@ static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ce->dev, "DEBUG: Register %s\n",
- ce_algs[i].alg.skcipher.base.cra_name);
- err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
+ err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher);
if (err) {
dev_err(ce->dev, "Fail to register %s\n",
- ce_algs[i].alg.skcipher.base.cra_name);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
return err;
}
@@ -313,8 +316,8 @@ static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ce->dev, "Unregister %d %s\n", i,
- ce_algs[i].alg.skcipher.base.cra_name);
- crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+ ce_algs[i].alg.skcipher.base.base.cra_name);
+ crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher);
break;
}
}
@@ -473,13 +476,20 @@ static int sl3516_ce_probe(struct platform_device *pdev)
pm_runtime_put_sync(ce->dev);
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_SL3516_DEBUG)) {
+ struct dentry *dbgfs_dir __maybe_unused;
+ struct dentry *dbgfs_stats __maybe_unused;
+
+ /* Ignore error of debugfs */
+ dbgfs_dir = debugfs_create_dir("sl3516", NULL);
+ dbgfs_stats = debugfs_create_file("stats", 0444,
+ dbgfs_dir, ce,
+ &sl3516_ce_debugfs_fops);
#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
- /* Ignore error of debugfs */
- ce->dbgfs_dir = debugfs_create_dir("sl3516", NULL);
- ce->dbgfs_stats = debugfs_create_file("stats", 0444,
- ce->dbgfs_dir, ce,
- &sl3516_ce_debugfs_fops);
+ ce->dbgfs_dir = dbgfs_dir;
+ ce->dbgfs_stats = dbgfs_stats;
#endif
+ }
return 0;
error_pmuse:
diff --git a/drivers/crypto/gemini/sl3516-ce.h b/drivers/crypto/gemini/sl3516-ce.h
index 4c0ec6c920d1..9e1a7e7f8961 100644
--- a/drivers/crypto/gemini/sl3516-ce.h
+++ b/drivers/crypto/gemini/sl3516-ce.h
@@ -17,7 +17,6 @@
#include <crypto/engine.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
-#include <linux/crypto.h>
#include <linux/debugfs.h>
#include <linux/hw_random.h>
@@ -292,16 +291,12 @@ struct sl3516_ce_cipher_req_ctx {
/*
* struct sl3516_ce_cipher_tfm_ctx - context for a skcipher TFM
- * @enginectx: crypto_engine used by this TFM
* @key: pointer to key data
* @keylen: len of the key
* @ce: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
- *
- * enginectx must be the first element
*/
struct sl3516_ce_cipher_tfm_ctx {
- struct crypto_engine_ctx enginectx;
u32 *key;
u32 keylen;
struct sl3516_ce_dev *ce;
@@ -324,7 +319,7 @@ struct sl3516_ce_alg_template {
u32 mode;
struct sl3516_ce_dev *ce;
union {
- struct skcipher_alg skcipher;
+ struct skcipher_engine_alg skcipher;
} alg;
unsigned long stat_req;
unsigned long stat_fb;
@@ -345,3 +340,4 @@ int sl3516_ce_run_task(struct sl3516_ce_dev *ce,
int sl3516_ce_rng_register(struct sl3516_ce_dev *ce);
void sl3516_ce_rng_unregister(struct sl3516_ce_dev *ce);
+int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 8ede77310dc5..9a1c61be32cc 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1392,9 +1392,9 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+ unsigned int sz, sz_shift, curve_sz;
struct device *dev = ctx->dev;
char key[HPRE_ECC_MAX_KSZ];
- unsigned int sz, sz_shift;
struct ecdh params;
int ret;
@@ -1406,7 +1406,13 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
/* Use stdrng to generate private key */
if (!params.key || !params.key_size) {
params.key = key;
- params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id);
+ curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+ if (!curve_sz) {
+ dev_err(dev, "Invalid curve size!\n");
+ return -EINVAL;
+ }
+
+ params.key_size = curve_sz - 1;
ret = ecdh_gen_privkey(ctx, &params);
if (ret)
return ret;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 5d0adfb54a34..39297ce70f44 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -209,7 +209,7 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
{HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
{HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
- {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE},
+ {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE},
{HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
{HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
{HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
@@ -276,6 +276,9 @@ static const struct hpre_hw_error hpre_hw_errors[] = {
.int_msk = BIT(23),
.msg = "sva_fsm_timeout_int_set"
}, {
+ .int_msk = BIT(24),
+ .msg = "sva_int_set"
+ }, {
/* sentinel */
}
};
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index edc6fd44e7ca..a99fd589445c 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -88,6 +88,8 @@
#define QM_DB_PRIORITY_SHIFT_V1 48
#define QM_PAGE_SIZE 0x0034
#define QM_QP_DB_INTERVAL 0x10000
+#define QM_DB_TIMEOUT_CFG 0x100074
+#define QM_DB_TIMEOUT_SET 0x1fffff
#define QM_MEM_START_INIT 0x100040
#define QM_MEM_INIT_DONE 0x100044
@@ -954,6 +956,11 @@ static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
if (!val)
return IRQ_NONE;
+ if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) {
+ dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n");
+ return IRQ_HANDLED;
+ }
+
schedule_work(&qm->cmd_process);
return IRQ_HANDLED;
@@ -997,7 +1004,7 @@ static void qm_reset_function(struct hisi_qm *qm)
return;
}
- ret = hisi_qm_stop(qm, QM_FLR);
+ ret = hisi_qm_stop(qm, QM_DOWN);
if (ret) {
dev_err(dev, "failed to stop qm when reset function\n");
goto clear_bit;
@@ -2743,6 +2750,9 @@ void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
test_bit(QM_RESETTING, &qm->misc_ctl))
msleep(WAIT_PERIOD);
+ if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
+ flush_work(&qm->cmd_process);
+
udelay(REMOVE_WAIT_DELAY);
}
EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
@@ -3243,7 +3253,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
if (qm->status.stop_reason == QM_SOFT_RESET ||
- qm->status.stop_reason == QM_FLR) {
+ qm->status.stop_reason == QM_DOWN) {
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
ret = qm_stop_started_qp(qm);
if (ret < 0) {
@@ -4539,11 +4549,11 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF)
qm_cmd_uninit(qm);
- ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR);
+ ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN);
if (ret)
pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
- ret = hisi_qm_stop(qm, QM_FLR);
+ ret = hisi_qm_stop(qm, QM_DOWN);
if (ret) {
pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
@@ -4641,9 +4651,11 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev)
struct hisi_qm *qm = pci_get_drvdata(pdev);
int ret;
- ret = hisi_qm_stop(qm, QM_NORMAL);
+ ret = hisi_qm_stop(qm, QM_DOWN);
if (ret)
dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
+
+ hisi_qm_cache_wb(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
@@ -4807,7 +4819,7 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_PF_FLR_PREPARE:
- qm_pf_reset_vf_process(qm, QM_FLR);
+ qm_pf_reset_vf_process(qm, QM_DOWN);
break;
case QM_PF_SRST_PREPARE:
qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
@@ -5371,6 +5383,8 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_pci_init;
if (qm->fun_type == QM_HW_PF) {
+ /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
+ writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
qm_disable_clock_gate(qm);
ret = qm_dev_mem_reset(qm);
if (ret) {
@@ -5538,6 +5552,8 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm)
qm_cmd_init(qm);
hisi_qm_dev_err_init(qm);
+ /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
+ writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
qm_disable_clock_gate(qm);
ret = qm_dev_mem_reset(qm);
if (ret)
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index e75851326c1e..e1e08993de12 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1107,8 +1107,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
}
queue->task_irq = platform_get_irq(to_platform_device(dev),
queue->queue_id * 2 + 1);
- if (queue->task_irq <= 0) {
- ret = -EINVAL;
+ if (queue->task_irq < 0) {
+ ret = queue->task_irq;
goto err_free_ring_db;
}
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 359aa2b41016..45063693859c 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
@@ -1105,7 +1105,7 @@ static struct platform_driver img_hash_driver = {
.driver = {
.name = "img-hash-accelerator",
.pm = &img_hash_pm_ops,
- .of_match_table = of_match_ptr(img_hash_match),
+ .of_match_table = img_hash_match,
}
};
module_platform_driver(img_hash_driver);
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
index ae31be00357a..1e2fd9a754ec 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
@@ -5,24 +5,23 @@
* Copyright (C) 2018-2020 Intel Corporation
*/
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/clk.h>
#include <linux/completion.h>
-#include <linux/crypto.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/types.h>
-
-#include <crypto/aes.h>
-#include <crypto/engine.h>
-#include <crypto/gcm.h>
-#include <crypto/scatterwalk.h>
-
-#include <crypto/internal/aead.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/string.h>
#include "ocs-aes.h"
@@ -38,7 +37,6 @@
/**
* struct ocs_aes_tctx - OCS AES Transform context
- * @engine_ctx: Engine context.
* @aes_dev: The OCS AES device.
* @key: AES/SM4 key.
* @key_len: The length (in bytes) of @key.
@@ -47,7 +45,6 @@
* @use_fallback: Whether or not fallback cipher should be used.
*/
struct ocs_aes_tctx {
- struct crypto_engine_ctx engine_ctx;
struct ocs_aes_dev *aes_dev;
u8 key[OCS_AES_KEYSIZE_256];
unsigned int key_len;
@@ -1148,15 +1145,6 @@ static int kmb_ocs_sm4_ccm_decrypt(struct aead_request *req)
return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CCM);
}
-static inline int ocs_common_init(struct ocs_aes_tctx *tctx)
-{
- tctx->engine_ctx.op.prepare_request = NULL;
- tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_sk_do_one_request;
- tctx->engine_ctx.op.unprepare_request = NULL;
-
- return 0;
-}
-
static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
{
const char *alg_name = crypto_tfm_alg_name(&tfm->base);
@@ -1172,16 +1160,14 @@ static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
- return ocs_common_init(tctx);
+ return 0;
}
static int ocs_sm4_init_tfm(struct crypto_skcipher *tfm)
{
- struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-
crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
- return ocs_common_init(tctx);
+ return 0;
}
static inline void clear_key(struct ocs_aes_tctx *tctx)
@@ -1206,15 +1192,6 @@ static void ocs_exit_tfm(struct crypto_skcipher *tfm)
}
}
-static inline int ocs_common_aead_init(struct ocs_aes_tctx *tctx)
-{
- tctx->engine_ctx.op.prepare_request = NULL;
- tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_aead_do_one_request;
- tctx->engine_ctx.op.unprepare_request = NULL;
-
- return 0;
-}
-
static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
{
const char *alg_name = crypto_tfm_alg_name(&tfm->base);
@@ -1233,7 +1210,7 @@ static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
(sizeof(struct aead_request) +
crypto_aead_reqsize(tctx->sw_cipher.aead))));
- return ocs_common_aead_init(tctx);
+ return 0;
}
static int kmb_ocs_aead_ccm_setauthsize(struct crypto_aead *tfm,
@@ -1261,11 +1238,9 @@ static int kmb_ocs_aead_gcm_setauthsize(struct crypto_aead *tfm,
static int ocs_sm4_aead_cra_init(struct crypto_aead *tfm)
{
- struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
-
crypto_aead_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
- return ocs_common_aead_init(tctx);
+ return 0;
}
static void ocs_aead_cra_exit(struct crypto_aead *tfm)
@@ -1280,182 +1255,190 @@ static void ocs_aead_cra_exit(struct crypto_aead *tfm)
}
}
-static struct skcipher_alg algs[] = {
+static struct skcipher_engine_alg algs[] = {
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
{
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "ecb-aes-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_AES_MIN_KEY_SIZE,
- .max_keysize = OCS_AES_MAX_KEY_SIZE,
- .setkey = kmb_ocs_aes_set_key,
- .encrypt = kmb_ocs_aes_ecb_encrypt,
- .decrypt = kmb_ocs_aes_ecb_decrypt,
- .init = ocs_aes_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "ecb(aes)",
+ .base.base.cra_driver_name = "ecb-aes-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+ .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+ .base.setkey = kmb_ocs_aes_set_key,
+ .base.encrypt = kmb_ocs_aes_ecb_encrypt,
+ .base.decrypt = kmb_ocs_aes_ecb_decrypt,
+ .base.init = ocs_aes_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
{
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "cbc-aes-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_AES_MIN_KEY_SIZE,
- .max_keysize = OCS_AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = kmb_ocs_aes_set_key,
- .encrypt = kmb_ocs_aes_cbc_encrypt,
- .decrypt = kmb_ocs_aes_cbc_decrypt,
- .init = ocs_aes_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "cbc(aes)",
+ .base.base.cra_driver_name = "cbc-aes-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+ .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_aes_set_key,
+ .base.encrypt = kmb_ocs_aes_cbc_encrypt,
+ .base.decrypt = kmb_ocs_aes_cbc_decrypt,
+ .base.init = ocs_aes_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
{
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_AES_MIN_KEY_SIZE,
- .max_keysize = OCS_AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = kmb_ocs_aes_set_key,
- .encrypt = kmb_ocs_aes_ctr_encrypt,
- .decrypt = kmb_ocs_aes_ctr_decrypt,
- .init = ocs_aes_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "ctr(aes)",
+ .base.base.cra_driver_name = "ctr-aes-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.base.cra_blocksize = 1,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+ .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_aes_set_key,
+ .base.encrypt = kmb_ocs_aes_ctr_encrypt,
+ .base.decrypt = kmb_ocs_aes_ctr_decrypt,
+ .base.init = ocs_aes_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
{
- .base.cra_name = "cts(cbc(aes))",
- .base.cra_driver_name = "cts-aes-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_AES_MIN_KEY_SIZE,
- .max_keysize = OCS_AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = kmb_ocs_aes_set_key,
- .encrypt = kmb_ocs_aes_cts_encrypt,
- .decrypt = kmb_ocs_aes_cts_decrypt,
- .init = ocs_aes_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "cts(cbc(aes))",
+ .base.base.cra_driver_name = "cts-aes-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+ .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_aes_set_key,
+ .base.encrypt = kmb_ocs_aes_cts_encrypt,
+ .base.decrypt = kmb_ocs_aes_cts_decrypt,
+ .base.init = ocs_aes_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
{
- .base.cra_name = "ecb(sm4)",
- .base.cra_driver_name = "ecb-sm4-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_SM4_KEY_SIZE,
- .max_keysize = OCS_SM4_KEY_SIZE,
- .setkey = kmb_ocs_sm4_set_key,
- .encrypt = kmb_ocs_sm4_ecb_encrypt,
- .decrypt = kmb_ocs_sm4_ecb_decrypt,
- .init = ocs_sm4_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "ecb(sm4)",
+ .base.base.cra_driver_name = "ecb-sm4-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_SM4_KEY_SIZE,
+ .base.max_keysize = OCS_SM4_KEY_SIZE,
+ .base.setkey = kmb_ocs_sm4_set_key,
+ .base.encrypt = kmb_ocs_sm4_ecb_encrypt,
+ .base.decrypt = kmb_ocs_sm4_ecb_decrypt,
+ .base.init = ocs_sm4_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
{
- .base.cra_name = "cbc(sm4)",
- .base.cra_driver_name = "cbc-sm4-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_SM4_KEY_SIZE,
- .max_keysize = OCS_SM4_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = kmb_ocs_sm4_set_key,
- .encrypt = kmb_ocs_sm4_cbc_encrypt,
- .decrypt = kmb_ocs_sm4_cbc_decrypt,
- .init = ocs_sm4_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "cbc(sm4)",
+ .base.base.cra_driver_name = "cbc-sm4-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_SM4_KEY_SIZE,
+ .base.max_keysize = OCS_SM4_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_sm4_set_key,
+ .base.encrypt = kmb_ocs_sm4_cbc_encrypt,
+ .base.decrypt = kmb_ocs_sm4_cbc_decrypt,
+ .base.init = ocs_sm4_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
{
- .base.cra_name = "ctr(sm4)",
- .base.cra_driver_name = "ctr-sm4-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_SM4_KEY_SIZE,
- .max_keysize = OCS_SM4_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = kmb_ocs_sm4_set_key,
- .encrypt = kmb_ocs_sm4_ctr_encrypt,
- .decrypt = kmb_ocs_sm4_ctr_decrypt,
- .init = ocs_sm4_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "ctr(sm4)",
+ .base.base.cra_driver_name = "ctr-sm4-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.base.cra_blocksize = 1,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_SM4_KEY_SIZE,
+ .base.max_keysize = OCS_SM4_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_sm4_set_key,
+ .base.encrypt = kmb_ocs_sm4_ctr_encrypt,
+ .base.decrypt = kmb_ocs_sm4_ctr_decrypt,
+ .base.init = ocs_sm4_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
{
- .base.cra_name = "cts(cbc(sm4))",
- .base.cra_driver_name = "cts-sm4-keembay-ocs",
- .base.cra_priority = KMB_OCS_PRIORITY,
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
- .base.cra_module = THIS_MODULE,
- .base.cra_alignmask = 0,
-
- .min_keysize = OCS_SM4_KEY_SIZE,
- .max_keysize = OCS_SM4_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = kmb_ocs_sm4_set_key,
- .encrypt = kmb_ocs_sm4_cts_encrypt,
- .decrypt = kmb_ocs_sm4_cts_decrypt,
- .init = ocs_sm4_init_tfm,
- .exit = ocs_exit_tfm,
+ .base.base.cra_name = "cts(cbc(sm4))",
+ .base.base.cra_driver_name = "cts-sm4-keembay-ocs",
+ .base.base.cra_priority = KMB_OCS_PRIORITY,
+ .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+ .base.base.cra_module = THIS_MODULE,
+ .base.base.cra_alignmask = 0,
+
+ .base.min_keysize = OCS_SM4_KEY_SIZE,
+ .base.max_keysize = OCS_SM4_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.setkey = kmb_ocs_sm4_set_key,
+ .base.encrypt = kmb_ocs_sm4_cts_encrypt,
+ .base.decrypt = kmb_ocs_sm4_cts_decrypt,
+ .base.init = ocs_sm4_init_tfm,
+ .base.exit = ocs_exit_tfm,
+ .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
}
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
};
-static struct aead_alg algs_aead[] = {
+static struct aead_engine_alg algs_aead[] = {
{
- .base = {
+ .base.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
@@ -1467,17 +1450,18 @@ static struct aead_alg algs_aead[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
- .init = ocs_aes_aead_cra_init,
- .exit = ocs_aead_cra_exit,
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setauthsize = kmb_ocs_aead_gcm_setauthsize,
- .setkey = kmb_ocs_aes_aead_set_key,
- .encrypt = kmb_ocs_aes_gcm_encrypt,
- .decrypt = kmb_ocs_aes_gcm_decrypt,
+ .base.init = ocs_aes_aead_cra_init,
+ .base.exit = ocs_aead_cra_exit,
+ .base.ivsize = GCM_AES_IV_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
+ .base.setkey = kmb_ocs_aes_aead_set_key,
+ .base.encrypt = kmb_ocs_aes_gcm_encrypt,
+ .base.decrypt = kmb_ocs_aes_gcm_decrypt,
+ .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
},
{
- .base = {
+ .base.base = {
.cra_name = "ccm(aes)",
.cra_driver_name = "ccm-aes-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
@@ -1489,17 +1473,18 @@ static struct aead_alg algs_aead[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
- .init = ocs_aes_aead_cra_init,
- .exit = ocs_aead_cra_exit,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setauthsize = kmb_ocs_aead_ccm_setauthsize,
- .setkey = kmb_ocs_aes_aead_set_key,
- .encrypt = kmb_ocs_aes_ccm_encrypt,
- .decrypt = kmb_ocs_aes_ccm_decrypt,
+ .base.init = ocs_aes_aead_cra_init,
+ .base.exit = ocs_aead_cra_exit,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
+ .base.setkey = kmb_ocs_aes_aead_set_key,
+ .base.encrypt = kmb_ocs_aes_ccm_encrypt,
+ .base.decrypt = kmb_ocs_aes_ccm_decrypt,
+ .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
},
{
- .base = {
+ .base.base = {
.cra_name = "gcm(sm4)",
.cra_driver_name = "gcm-sm4-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
@@ -1510,17 +1495,18 @@ static struct aead_alg algs_aead[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
- .init = ocs_sm4_aead_cra_init,
- .exit = ocs_aead_cra_exit,
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setauthsize = kmb_ocs_aead_gcm_setauthsize,
- .setkey = kmb_ocs_sm4_aead_set_key,
- .encrypt = kmb_ocs_sm4_gcm_encrypt,
- .decrypt = kmb_ocs_sm4_gcm_decrypt,
+ .base.init = ocs_sm4_aead_cra_init,
+ .base.exit = ocs_aead_cra_exit,
+ .base.ivsize = GCM_AES_IV_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
+ .base.setkey = kmb_ocs_sm4_aead_set_key,
+ .base.encrypt = kmb_ocs_sm4_gcm_encrypt,
+ .base.decrypt = kmb_ocs_sm4_gcm_decrypt,
+ .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
},
{
- .base = {
+ .base.base = {
.cra_name = "ccm(sm4)",
.cra_driver_name = "ccm-sm4-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
@@ -1531,21 +1517,22 @@ static struct aead_alg algs_aead[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
- .init = ocs_sm4_aead_cra_init,
- .exit = ocs_aead_cra_exit,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setauthsize = kmb_ocs_aead_ccm_setauthsize,
- .setkey = kmb_ocs_sm4_aead_set_key,
- .encrypt = kmb_ocs_sm4_ccm_encrypt,
- .decrypt = kmb_ocs_sm4_ccm_decrypt,
+ .base.init = ocs_sm4_aead_cra_init,
+ .base.exit = ocs_aead_cra_exit,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
+ .base.setkey = kmb_ocs_sm4_aead_set_key,
+ .base.encrypt = kmb_ocs_sm4_ccm_encrypt,
+ .base.decrypt = kmb_ocs_sm4_ccm_decrypt,
+ .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
}
};
static void unregister_aes_algs(struct ocs_aes_dev *aes_dev)
{
- crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+ crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
+ crypto_engine_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
static int register_aes_algs(struct ocs_aes_dev *aes_dev)
@@ -1556,13 +1543,13 @@ static int register_aes_algs(struct ocs_aes_dev *aes_dev)
* If any algorithm fails to register, all preceding algorithms that
* were successfully registered will be automatically unregistered.
*/
- ret = crypto_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
+ ret = crypto_engine_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
if (ret)
return ret;
- ret = crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+ ret = crypto_engine_register_skciphers(algs, ARRAY_SIZE(algs));
if (ret)
- crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
+ crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
return ret;
}
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
index 2269df17514c..fb95deed9057 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
@@ -7,30 +7,27 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <crypto/ecc_curve.h>
+#include <crypto/ecdh.h>
+#include <crypto/engine.h>
+#include <crypto/internal/ecc.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/kpp.h>
+#include <crypto/rng.h>
#include <linux/clk.h>
#include <linux/completion.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/fips.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-#include <crypto/ecc_curve.h>
-#include <crypto/ecdh.h>
-#include <crypto/engine.h>
-#include <crypto/kpp.h>
-#include <crypto/rng.h>
-
-#include <crypto/internal/ecc.h>
-#include <crypto/internal/kpp.h>
+#include <linux/string.h>
#define DRV_NAME "keembay-ocs-ecc"
@@ -95,13 +92,11 @@ struct ocs_ecc_dev {
/**
* struct ocs_ecc_ctx - Transformation context.
- * @engine_ctx: Crypto engine ctx.
* @ecc_dev: The ECC driver associated with this context.
* @curve: The elliptic curve used by this transformation.
* @private_key: The private key.
*/
struct ocs_ecc_ctx {
- struct crypto_engine_ctx engine_ctx;
struct ocs_ecc_dev *ecc_dev;
const struct ecc_curve *curve;
u64 private_key[KMB_ECC_VLI_MAX_DIGITS];
@@ -794,10 +789,6 @@ static int kmb_ecc_tctx_init(struct ocs_ecc_ctx *tctx, unsigned int curve_id)
if (!tctx->curve)
return -EOPNOTSUPP;
- tctx->engine_ctx.op.prepare_request = NULL;
- tctx->engine_ctx.op.do_one_request = kmb_ocs_ecc_do_one_request;
- tctx->engine_ctx.op.unprepare_request = NULL;
-
return 0;
}
@@ -830,36 +821,38 @@ static unsigned int kmb_ocs_ecdh_max_size(struct crypto_kpp *tfm)
return digits_to_bytes(tctx->curve->g.ndigits) * 2;
}
-static struct kpp_alg ocs_ecdh_p256 = {
- .set_secret = kmb_ocs_ecdh_set_secret,
- .generate_public_key = kmb_ocs_ecdh_generate_public_key,
- .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
- .init = kmb_ocs_ecdh_nist_p256_init_tfm,
- .exit = kmb_ocs_ecdh_exit_tfm,
- .max_size = kmb_ocs_ecdh_max_size,
- .base = {
+static struct kpp_engine_alg ocs_ecdh_p256 = {
+ .base.set_secret = kmb_ocs_ecdh_set_secret,
+ .base.generate_public_key = kmb_ocs_ecdh_generate_public_key,
+ .base.compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+ .base.init = kmb_ocs_ecdh_nist_p256_init_tfm,
+ .base.exit = kmb_ocs_ecdh_exit_tfm,
+ .base.max_size = kmb_ocs_ecdh_max_size,
+ .base.base = {
.cra_name = "ecdh-nist-p256",
.cra_driver_name = "ecdh-nist-p256-keembay-ocs",
.cra_priority = KMB_OCS_ECC_PRIORITY,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ocs_ecc_ctx),
},
+ .op.do_one_request = kmb_ocs_ecc_do_one_request,
};
-static struct kpp_alg ocs_ecdh_p384 = {
- .set_secret = kmb_ocs_ecdh_set_secret,
- .generate_public_key = kmb_ocs_ecdh_generate_public_key,
- .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
- .init = kmb_ocs_ecdh_nist_p384_init_tfm,
- .exit = kmb_ocs_ecdh_exit_tfm,
- .max_size = kmb_ocs_ecdh_max_size,
- .base = {
+static struct kpp_engine_alg ocs_ecdh_p384 = {
+ .base.set_secret = kmb_ocs_ecdh_set_secret,
+ .base.generate_public_key = kmb_ocs_ecdh_generate_public_key,
+ .base.compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+ .base.init = kmb_ocs_ecdh_nist_p384_init_tfm,
+ .base.exit = kmb_ocs_ecdh_exit_tfm,
+ .base.max_size = kmb_ocs_ecdh_max_size,
+ .base.base = {
.cra_name = "ecdh-nist-p384",
.cra_driver_name = "ecdh-nist-p384-keembay-ocs",
.cra_priority = KMB_OCS_ECC_PRIORITY,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ocs_ecc_ctx),
},
+ .op.do_one_request = kmb_ocs_ecc_do_one_request,
};
static irqreturn_t ocs_ecc_irq_handler(int irq, void *dev_id)
@@ -941,14 +934,14 @@ static int kmb_ocs_ecc_probe(struct platform_device *pdev)
}
/* Register the KPP algo. */
- rc = crypto_register_kpp(&ocs_ecdh_p256);
+ rc = crypto_engine_register_kpp(&ocs_ecdh_p256);
if (rc) {
dev_err(dev,
"Could not register OCS algorithms with Crypto API\n");
goto cleanup;
}
- rc = crypto_register_kpp(&ocs_ecdh_p384);
+ rc = crypto_engine_register_kpp(&ocs_ecdh_p384);
if (rc) {
dev_err(dev,
"Could not register OCS algorithms with Crypto API\n");
@@ -958,7 +951,7 @@ static int kmb_ocs_ecc_probe(struct platform_device *pdev)
return 0;
ocs_ecdh_p384_error:
- crypto_unregister_kpp(&ocs_ecdh_p256);
+ crypto_engine_unregister_kpp(&ocs_ecdh_p256);
cleanup:
crypto_engine_exit(ecc_dev->engine);
@@ -977,8 +970,8 @@ static int kmb_ocs_ecc_remove(struct platform_device *pdev)
ecc_dev = platform_get_drvdata(pdev);
- crypto_unregister_kpp(&ocs_ecdh_p384);
- crypto_unregister_kpp(&ocs_ecdh_p256);
+ crypto_engine_unregister_kpp(&ocs_ecdh_p384);
+ crypto_engine_unregister_kpp(&ocs_ecdh_p256);
spin_lock(&ocs_ecc.lock);
list_del(&ecc_dev->list);
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index d4bcbed1f546..daba8ca05dbe 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -5,19 +5,21 @@
* Copyright (C) 2018-2020 Intel Corporation
*/
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-
#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha2.h>
#include <crypto/sm3.h>
-#include <crypto/hmac.h>
-#include <crypto/internal/hash.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
#include "ocs-hcu.h"
@@ -34,7 +36,6 @@
/**
* struct ocs_hcu_ctx: OCS HCU Transform context.
- * @engine_ctx: Crypto Engine context.
* @hcu_dev: The OCS HCU device used by the transformation.
* @key: The key (used only for HMAC transformations).
* @key_len: The length of the key.
@@ -42,7 +43,6 @@
* @is_hmac_tfm: Whether or not this is a HMAC transformation.
*/
struct ocs_hcu_ctx {
- struct crypto_engine_ctx engine_ctx;
struct ocs_hcu_dev *hcu_dev;
u8 key[SHA512_BLOCK_SIZE];
size_t key_len;
@@ -824,11 +824,6 @@ static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
{
crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
sizeof(struct ocs_hcu_rctx));
-
- /* Init context to 0. */
- memzero_explicit(ctx, sizeof(*ctx));
- /* Set engine ops. */
- ctx->engine_ctx.op.do_one_request = kmb_ocs_hcu_do_one_request;
}
static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
@@ -883,17 +878,17 @@ static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
memzero_explicit(ctx->key, sizeof(ctx->key));
}
-static struct ahash_alg ocs_hcu_algs[] = {
+static struct ahash_engine_alg ocs_hcu_algs[] = {
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -907,18 +902,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sha_cra_init,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .setkey = kmb_ocs_hcu_setkey,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -933,18 +929,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_init = kmb_ocs_hcu_hmac_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -958,18 +955,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sha_cra_init,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .setkey = kmb_ocs_hcu_setkey,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -984,17 +982,18 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_init = kmb_ocs_hcu_hmac_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -1008,18 +1007,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sm3_cra_init,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .setkey = kmb_ocs_hcu_setkey,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -1034,17 +1034,18 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_init = kmb_ocs_hcu_hmac_sm3_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -1058,18 +1059,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sha_cra_init,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .setkey = kmb_ocs_hcu_setkey,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -1084,17 +1086,18 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_init = kmb_ocs_hcu_hmac_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -1108,18 +1111,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = kmb_ocs_hcu_sha_cra_init,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
{
- .init = kmb_ocs_hcu_init,
- .update = kmb_ocs_hcu_update,
- .final = kmb_ocs_hcu_final,
- .finup = kmb_ocs_hcu_finup,
- .digest = kmb_ocs_hcu_digest,
- .export = kmb_ocs_hcu_export,
- .import = kmb_ocs_hcu_import,
- .setkey = kmb_ocs_hcu_setkey,
- .halg = {
+ .base.init = kmb_ocs_hcu_init,
+ .base.update = kmb_ocs_hcu_update,
+ .base.final = kmb_ocs_hcu_final,
+ .base.finup = kmb_ocs_hcu_finup,
+ .base.digest = kmb_ocs_hcu_digest,
+ .base.export = kmb_ocs_hcu_export,
+ .base.import = kmb_ocs_hcu_import,
+ .base.setkey = kmb_ocs_hcu_setkey,
+ .base.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct ocs_hcu_rctx),
.base = {
@@ -1134,7 +1138,8 @@ static struct ahash_alg ocs_hcu_algs[] = {
.cra_init = kmb_ocs_hcu_hmac_cra_init,
.cra_exit = kmb_ocs_hcu_hmac_cra_exit,
}
- }
+ },
+ .op.do_one_request = kmb_ocs_hcu_do_one_request,
},
};
@@ -1155,7 +1160,7 @@ static int kmb_ocs_hcu_remove(struct platform_device *pdev)
if (!hcu_dev)
return -ENODEV;
- crypto_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+ crypto_engine_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
rc = crypto_engine_exit(hcu_dev->engine);
@@ -1170,7 +1175,6 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ocs_hcu_dev *hcu_dev;
- struct resource *hcu_mem;
int rc;
hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
@@ -1184,14 +1188,7 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
if (rc)
return rc;
- /* Get the memory address and remap. */
- hcu_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!hcu_mem) {
- dev_err(dev, "Could not retrieve io mem resource.\n");
- return -ENODEV;
- }
-
- hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
+ hcu_dev->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hcu_dev->io_base))
return PTR_ERR(hcu_dev->io_base);
@@ -1231,7 +1228,7 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
/* Security infrastructure guarantees OCS clock is enabled. */
- rc = crypto_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+ rc = crypto_engine_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
if (rc) {
dev_err(dev, "Could not register algorithms.\n");
goto cleanup;
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index e543a9e24a06..dd4464b7e00b 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -3,11 +3,13 @@
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
#include <adf_cfg.h>
+#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen4_dc.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
+#include <adf_gen4_timer.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -223,6 +225,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_HKDF |
ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+ ICP_ACCEL_CAPABILITIES_SM3 |
+ ICP_ACCEL_CAPABILITIES_SM4 |
ICP_ACCEL_CAPABILITIES_AES_V2;
/* A set bit in fusectl1 means the feature is OFF in this SKU */
@@ -246,12 +250,19 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
+ }
+
capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_SM2 |
ICP_ACCEL_CAPABILITIES_ECEDMONT;
if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
}
@@ -317,6 +328,14 @@ static void get_admin_info(struct admin_info *admin_csrs_info)
admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
}
+static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * 4XXX uses KPT counter for HB
+ */
+ return ADF_4XXX_KPT_COUNTER_FREQ;
+}
+
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
@@ -508,6 +527,10 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->enable_pm = adf_gen4_enable_pm;
hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
hw_data->dev_config = adf_gen4_dev_config;
+ hw_data->start_timer = adf_gen4_timer_start;
+ hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->get_hb_clock = get_heartbeat_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
index e5b314d2b60e..bb3d95a8fb21 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -3,6 +3,7 @@
#ifndef ADF_4XXX_HW_DATA_H_
#define ADF_4XXX_HW_DATA_H_
+#include <linux/units.h>
#include <adf_accel_devices.h>
/* PCIe configuration space */
@@ -64,6 +65,9 @@
#define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin"
#define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin"
+/* Clocks frequency */
+#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
+
/* qat_4xxx fuse bits are different from old GENs, redefine them */
enum icp_qat_4xxx_slice_mask {
ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0),
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 1a15600361d0..6d4e2e139ffa 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -8,6 +8,7 @@
#include <adf_cfg.h>
#include <adf_common_drv.h>
#include <adf_dbgfs.h>
+#include <adf_heartbeat.h>
#include "adf_4xxx_hw_data.h"
#include "qat_compression.h"
@@ -77,6 +78,8 @@ static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
if (ret)
return ret;
+ adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
+
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 475643654e64..9c00c441b602 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
+#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c3xxx_hw_data.h"
+#include "adf_heartbeat.h"
#include "icp_qat_hw.h"
/* Worker thread to service arbiter mappings */
@@ -50,6 +52,28 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
}
+static u32 get_ts_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * Timestamp update interval is 16 AE clock ticks for c3xxx.
+ */
+ return self->clock_frequency / 16;
+}
+
+static int measure_clock(struct adf_accel_dev *accel_dev)
+{
+ u32 frequency;
+ int ret;
+
+ ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C3XXX_MIN_AE_FREQ,
+ ADF_C3XXX_MAX_AE_FREQ);
+ if (ret)
+ return ret;
+
+ accel_dev->hw_device->clock_frequency = frequency;
+ return 0;
+}
+
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C3XXX_PMISC_BAR;
@@ -127,6 +151,10 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
hw_data->dev_config = adf_gen2_dev_config;
+ hw_data->measure_clock = measure_clock;
+ hw_data->get_hb_clock = get_ts_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
index 336a06f11dbd..690c6a1aa172 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -3,6 +3,8 @@
#ifndef ADF_C3XXX_HW_DATA_H_
#define ADF_C3XXX_HW_DATA_H_
+#include <linux/units.h>
+
/* PCIe configuration space */
#define ADF_C3XXX_PMISC_BAR 0
#define ADF_C3XXX_ETR_BAR 1
@@ -19,6 +21,11 @@
#define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
#define ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS 6
+/* Clocks frequency */
+#define ADF_C3XXX_AE_FREQ (685 * HZ_PER_MHZ)
+#define ADF_C3XXX_MIN_AE_FREQ (533 * HZ_PER_MHZ)
+#define ADF_C3XXX_MAX_AE_FREQ (685 * HZ_PER_MHZ)
+
/* Firmware Binary */
#define ADF_C3XXX_FW "qat_c3xxx.bin"
#define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index e14270703670..355a781693eb 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
+#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c62x_hw_data.h"
+#include "adf_heartbeat.h"
#include "icp_qat_hw.h"
/* Worker thread to service arbiter mappings */
@@ -50,6 +52,28 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
}
+static u32 get_ts_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * Timestamp update interval is 16 AE clock ticks for c62x.
+ */
+ return self->clock_frequency / 16;
+}
+
+static int measure_clock(struct adf_accel_dev *accel_dev)
+{
+ u32 frequency;
+ int ret;
+
+ ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C62X_MIN_AE_FREQ,
+ ADF_C62X_MAX_AE_FREQ);
+ if (ret)
+ return ret;
+
+ accel_dev->hw_device->clock_frequency = frequency;
+ return 0;
+}
+
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62X_PMISC_BAR;
@@ -129,6 +153,10 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
hw_data->dev_config = adf_gen2_dev_config;
+ hw_data->measure_clock = measure_clock;
+ hw_data->get_hb_clock = get_ts_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
index 008c0a3a9769..13e6ebf6fd91 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
@@ -3,6 +3,8 @@
#ifndef ADF_C62X_HW_DATA_H_
#define ADF_C62X_HW_DATA_H_
+#include <linux/units.h>
+
/* PCIe configuration space */
#define ADF_C62X_SRAM_BAR 0
#define ADF_C62X_PMISC_BAR 1
@@ -19,6 +21,11 @@
#define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
#define ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS 10
+/* Clocks frequency */
+#define ADF_C62X_AE_FREQ (685 * HZ_PER_MHZ)
+#define ADF_C62X_MIN_AE_FREQ (533 * HZ_PER_MHZ)
+#define ADF_C62X_MAX_AE_FREQ (800 * HZ_PER_MHZ)
+
/* Firmware Binary */
#define ADF_C62X_FW "qat_c62x.bin"
#define ADF_C62X_MMP "qat_c62x_mmp.bin"
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 38de3aba6e8c..43622c7fca71 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -17,6 +17,8 @@ intel_qat-objs := adf_cfg.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
adf_gen4_dc.o \
+ adf_gen4_timer.o \
+ adf_clock.o \
qat_crypto.o \
qat_compression.o \
qat_comp_algs.o \
@@ -28,6 +30,9 @@ intel_qat-objs := adf_cfg.o \
qat_bl.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
+ adf_fw_counters.o \
+ adf_heartbeat.o \
+ adf_heartbeat_dbgfs.o \
adf_dbgfs.o
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 0399417b91fc..e57abde66f4f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -188,6 +188,11 @@ struct adf_hw_device_data {
int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
int (*send_admin_init)(struct adf_accel_dev *accel_dev);
+ int (*start_timer)(struct adf_accel_dev *accel_dev);
+ void (*stop_timer)(struct adf_accel_dev *accel_dev);
+ void (*check_hb_ctrs)(struct adf_accel_dev *accel_dev);
+ uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
+ int (*measure_clock)(struct adf_accel_dev *accel_dev);
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
@@ -229,6 +234,7 @@ struct adf_hw_device_data {
u8 num_accel;
u8 num_logical_accel;
u8 num_engines;
+ u32 num_hb_ctrs;
};
/* CSR write macro */
@@ -241,6 +247,11 @@ struct adf_hw_device_data {
#define ADF_CFG_NUM_SERVICES 4
#define ADF_SRV_TYPE_BIT_LEN 3
#define ADF_SRV_TYPE_MASK 0x7
+#define ADF_AE_ADMIN_THREAD 7
+#define ADF_NUM_THREADS_PER_AE 8
+#define ADF_NUM_PKE_STRAND 2
+#define ADF_AE_STRAND0_THREAD 8
+#define ADF_AE_STRAND1_THREAD 9
#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
@@ -292,9 +303,12 @@ struct adf_accel_dev {
unsigned long status;
atomic_t ref_count;
struct dentry *debugfs_dir;
+ struct dentry *fw_cntr_dbgfile;
struct list_head list;
struct module *owner;
struct adf_accel_pci accel_pci_dev;
+ struct adf_timer *timer;
+ struct adf_heartbeat *heartbeat;
union {
struct {
/* protects VF2PF interrupts access */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index 118775ee02f2..ff790823b868 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
#include "icp_qat_fw_init_admin.h"
#define ADF_ADMIN_MAILBOX_STRIDE 0x1000
@@ -15,6 +16,7 @@
#define ADF_CONST_TABLE_SIZE 1024
#define ADF_ADMIN_POLL_DELAY_US 20
#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+#define ADF_ONE_AE 1
static const u8 const_tab[1024] __aligned(1024) = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -194,6 +196,22 @@ static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
+int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp)
+{
+ struct icp_qat_fw_init_admin_req req = { };
+ struct icp_qat_fw_init_admin_resp resp;
+ unsigned int ae_mask = ADF_ONE_AE;
+ int ret;
+
+ req.cmd_id = ICP_QAT_FW_TIMER_GET;
+ ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
+ if (ret)
+ return ret;
+
+ *timestamp = resp.timestamp;
+ return 0;
+}
+
static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
u32 *capabilities)
{
@@ -223,6 +241,49 @@ static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
return 0;
}
+int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps)
+{
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+ int ret;
+
+ req.cmd_id = ICP_QAT_FW_COUNTERS_GET;
+
+ ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp);
+ if (ret || resp.status)
+ return -EFAULT;
+
+ *reqs = resp.req_rec_count;
+ *resps = resp.resp_sent_count;
+
+ return 0;
+}
+
+int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt)
+{
+ u32 ae_mask = accel_dev->hw_device->ae_mask;
+ struct icp_qat_fw_init_admin_req req = { };
+ struct icp_qat_fw_init_admin_resp resp = { };
+
+ req.cmd_id = ICP_QAT_FW_SYNC;
+ req.int_timer_ticks = cnt;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks)
+{
+ u32 ae_mask = accel_dev->hw_device->ae_mask;
+ struct icp_qat_fw_init_admin_req req = { };
+ struct icp_qat_fw_init_admin_resp resp;
+
+ req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET;
+ req.init_cfg_ptr = accel_dev->heartbeat->dma.phy_addr;
+ req.heartbeat_ticks = ticks;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
/**
* adf_send_admin_init() - Function sends init message to FW
* @accel_dev: Pointer to acceleration device.
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
index 3ae1e5caee0e..6066dc637352 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
@@ -47,4 +47,6 @@
#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
#define ADF_ACCEL_STR "Accelerator%d"
+#define ADF_HEARTBEAT_TIMER "HeartbeatTimer"
+
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c
new file mode 100644
index 000000000000..dc0778691eb0
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include <linux/units.h>
+#include <asm/errno.h>
+#include "adf_accel_devices.h"
+#include "adf_clock.h"
+#include "adf_common_drv.h"
+
+#define MEASURE_CLOCK_RETRIES 10
+#define MEASURE_CLOCK_DELAY_US 10000
+#define ME_CLK_DIVIDER 16
+#define MEASURE_CLOCK_DELTA_THRESHOLD_US 100
+
+static inline u64 timespec_to_us(const struct timespec64 *ts)
+{
+ return (u64)DIV_ROUND_CLOSEST_ULL(timespec64_to_ns(ts), NSEC_PER_USEC);
+}
+
+static inline u64 timespec_to_ms(const struct timespec64 *ts)
+{
+ return (u64)DIV_ROUND_CLOSEST_ULL(timespec64_to_ns(ts), NSEC_PER_MSEC);
+}
+
+u64 adf_clock_get_current_time(void)
+{
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ return timespec_to_ms(&ts);
+}
+
+static int measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency)
+{
+ struct timespec64 ts1, ts2, ts3, ts4;
+ u64 timestamp1, timestamp2, temp;
+ u32 delta_us, tries;
+ int ret;
+
+ tries = MEASURE_CLOCK_RETRIES;
+ do {
+ ktime_get_real_ts64(&ts1);
+ ret = adf_get_fw_timestamp(accel_dev, &timestamp1);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to get fw timestamp\n");
+ return ret;
+ }
+ ktime_get_real_ts64(&ts2);
+ delta_us = timespec_to_us(&ts2) - timespec_to_us(&ts1);
+ } while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD_US && --tries);
+
+ if (!tries) {
+ dev_err(&GET_DEV(accel_dev), "Excessive clock measure delay\n");
+ return -ETIMEDOUT;
+ }
+
+ fsleep(MEASURE_CLOCK_DELAY_US);
+
+ tries = MEASURE_CLOCK_RETRIES;
+ do {
+ ktime_get_real_ts64(&ts3);
+ if (adf_get_fw_timestamp(accel_dev, &timestamp2)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to get fw timestamp\n");
+ return -EIO;
+ }
+ ktime_get_real_ts64(&ts4);
+ delta_us = timespec_to_us(&ts4) - timespec_to_us(&ts3);
+ } while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD_US && --tries);
+
+ if (!tries) {
+ dev_err(&GET_DEV(accel_dev), "Excessive clock measure delay\n");
+ return -ETIMEDOUT;
+ }
+
+ delta_us = timespec_to_us(&ts3) - timespec_to_us(&ts1);
+ temp = (timestamp2 - timestamp1) * ME_CLK_DIVIDER * 10;
+ temp = DIV_ROUND_CLOSEST_ULL(temp, delta_us);
+ /*
+ * Enclose the division to allow the preprocessor to precalculate it,
+ * and avoid promoting r-value to 64-bit before division.
+ */
+ *frequency = temp * (HZ_PER_MHZ / 10);
+
+ return 0;
+}
+
+/**
+ * adf_dev_measure_clock() - measures device clock frequency
+ * @accel_dev: Pointer to acceleration device.
+ * @frequency: Pointer to variable where result will be stored
+ * @min: Minimal allowed frequency value
+ * @max: Maximal allowed frequency value
+ *
+ * If the measurement result will go beyond the min/max thresholds the value
+ * will take the value of the crossed threshold.
+ *
+ * This algorithm compares the device firmware timestamp with the kernel
+ * timestamp. So we can't expect too high accuracy from this measurement.
+ *
+ * Return:
+ * * 0 - measurement succeed
+ * * -ETIMEDOUT - measurement failed
+ */
+int adf_dev_measure_clock(struct adf_accel_dev *accel_dev,
+ u32 *frequency, u32 min, u32 max)
+{
+ int ret;
+ u32 freq;
+
+ ret = measure_clock(accel_dev, &freq);
+ if (ret)
+ return ret;
+
+ *frequency = clamp(freq, min, max);
+
+ if (*frequency != freq)
+ dev_warn(&GET_DEV(accel_dev),
+ "Measured clock %d Hz is out of range, assuming %d\n",
+ freq, *frequency);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_measure_clock);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.h b/drivers/crypto/intel/qat/qat_common/adf_clock.h
new file mode 100644
index 000000000000..e309bc0dc35c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_clock.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_CLOCK_H
+#define ADF_CLOCK_H
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+int adf_dev_measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency,
+ u32 min, u32 max);
+u64 adf_clock_get_current_time(void);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index b8132eb9bc2a..673b5044c62a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -58,12 +58,6 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
-
-int adf_ctl_dev_register(void);
-void adf_ctl_dev_unregister(void);
-int adf_processes_dev_register(void);
-void adf_processes_dev_unregister(void);
-
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
@@ -94,7 +88,11 @@ void adf_exit_aer(void);
int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps);
int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
+int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt);
+int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks);
+int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp);
int adf_init_arb(struct adf_accel_dev *accel_dev);
void adf_exit_arb(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
@@ -178,8 +176,6 @@ int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned long ctx_mask,
unsigned short reg_num, unsigned int regdata);
-int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
- unsigned char ae, unsigned short lm_addr, unsigned int value);
void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char mode);
int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
@@ -193,6 +189,8 @@ int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
int adf_init_misc_wq(void);
void adf_exit_misc_wq(void);
bool adf_misc_wq_queue_work(struct work_struct *work);
+bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
+ unsigned long delay);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
index d0a2f892e6eb..04845f8d72be 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
@@ -6,6 +6,8 @@
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_dbgfs.h"
+#include "adf_fw_counters.h"
+#include "adf_heartbeat_dbgfs.h"
/**
* adf_dbgfs_init() - add persistent debugfs entries
@@ -56,6 +58,11 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
{
if (!accel_dev->debugfs_dir)
return;
+
+ if (!accel_dev->is_vf) {
+ adf_fw_counters_dbgfs_add(accel_dev);
+ adf_heartbeat_dbgfs_add(accel_dev);
+ }
}
/**
@@ -66,4 +73,9 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
{
if (!accel_dev->debugfs_dir)
return;
+
+ if (!accel_dev->is_vf) {
+ adf_heartbeat_dbgfs_rm(accel_dev);
+ adf_fw_counters_dbgfs_rm(accel_dev);
+ }
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c
new file mode 100644
index 000000000000..cb6e09ef5c9f
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_fw_counters.h"
+
+#define ADF_FW_COUNTERS_MAX_PADDING 16
+
+enum adf_fw_counters_types {
+ ADF_FW_REQUESTS,
+ ADF_FW_RESPONSES,
+ ADF_FW_COUNTERS_COUNT
+};
+
+static const char * const adf_fw_counter_names[] = {
+ [ADF_FW_REQUESTS] = "Requests",
+ [ADF_FW_RESPONSES] = "Responses",
+};
+
+static_assert(ARRAY_SIZE(adf_fw_counter_names) == ADF_FW_COUNTERS_COUNT);
+
+struct adf_ae_counters {
+ u16 ae;
+ u64 values[ADF_FW_COUNTERS_COUNT];
+};
+
+struct adf_fw_counters {
+ u16 ae_count;
+ struct adf_ae_counters ae_counters[];
+};
+
+static void adf_fw_counters_parse_ae_values(struct adf_ae_counters *ae_counters, u32 ae,
+ u64 req_count, u64 resp_count)
+{
+ ae_counters->ae = ae;
+ ae_counters->values[ADF_FW_REQUESTS] = req_count;
+ ae_counters->values[ADF_FW_RESPONSES] = resp_count;
+}
+
+static int adf_fw_counters_load_from_device(struct adf_accel_dev *accel_dev,
+ struct adf_fw_counters *fw_counters)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ unsigned long ae_mask;
+ unsigned int i;
+ unsigned long ae;
+
+ /* Ignore the admin AEs */
+ ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask;
+
+ if (hweight_long(ae_mask) > fw_counters->ae_count)
+ return -EINVAL;
+
+ i = 0;
+ for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
+ u64 req_count, resp_count;
+ int ret;
+
+ ret = adf_get_ae_fw_counters(accel_dev, ae, &req_count, &resp_count);
+ if (ret)
+ return ret;
+
+ adf_fw_counters_parse_ae_values(&fw_counters->ae_counters[i++], ae,
+ req_count, resp_count);
+ }
+
+ return 0;
+}
+
+static struct adf_fw_counters *adf_fw_counters_allocate(unsigned long ae_count)
+{
+ struct adf_fw_counters *fw_counters;
+
+ if (unlikely(!ae_count))
+ return ERR_PTR(-EINVAL);
+
+ fw_counters = kmalloc(struct_size(fw_counters, ae_counters, ae_count), GFP_KERNEL);
+ if (!fw_counters)
+ return ERR_PTR(-ENOMEM);
+
+ fw_counters->ae_count = ae_count;
+
+ return fw_counters;
+}
+
+/**
+ * adf_fw_counters_get() - Return FW counters for the provided device.
+ * @accel_dev: Pointer to a QAT acceleration device
+ *
+ * Allocates and returns a table of counters containing execution statistics
+ * for each non-admin AE available through the supplied acceleration device.
+ * The caller becomes the owner of such memory and is responsible for
+ * the deallocation through a call to kfree().
+ *
+ * Returns: a pointer to a dynamically allocated struct adf_fw_counters
+ * on success, or a negative value on error.
+ */
+static struct adf_fw_counters *adf_fw_counters_get(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_fw_counters *fw_counters;
+ unsigned long ae_count;
+ int ret;
+
+ if (!adf_dev_started(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "QAT Device not started\n");
+ return ERR_PTR(-EFAULT);
+ }
+
+ /* Ignore the admin AEs */
+ ae_count = hweight_long(hw_data->ae_mask & ~hw_data->admin_ae_mask);
+
+ fw_counters = adf_fw_counters_allocate(ae_count);
+ if (IS_ERR(fw_counters))
+ return fw_counters;
+
+ ret = adf_fw_counters_load_from_device(accel_dev, fw_counters);
+ if (ret) {
+ kfree(fw_counters);
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to create QAT fw_counters file table [%d].\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ return fw_counters;
+}
+
+static void *qat_fw_counters_seq_start(struct seq_file *sfile, loff_t *pos)
+{
+ struct adf_fw_counters *fw_counters = sfile->private;
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ if (*pos > fw_counters->ae_count)
+ return NULL;
+
+ return &fw_counters->ae_counters[*pos - 1];
+}
+
+static void *qat_fw_counters_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+ struct adf_fw_counters *fw_counters = sfile->private;
+
+ (*pos)++;
+
+ if (*pos > fw_counters->ae_count)
+ return NULL;
+
+ return &fw_counters->ae_counters[*pos - 1];
+}
+
+static void qat_fw_counters_seq_stop(struct seq_file *sfile, void *v) {}
+
+static int qat_fw_counters_seq_show(struct seq_file *sfile, void *v)
+{
+ int i;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(sfile, "AE ");
+ for (i = 0; i < ADF_FW_COUNTERS_COUNT; ++i)
+ seq_printf(sfile, " %*s", ADF_FW_COUNTERS_MAX_PADDING,
+ adf_fw_counter_names[i]);
+ } else {
+ struct adf_ae_counters *ae_counters = (struct adf_ae_counters *)v;
+
+ seq_printf(sfile, "%2d:", ae_counters->ae);
+ for (i = 0; i < ADF_FW_COUNTERS_COUNT; ++i)
+ seq_printf(sfile, " %*llu", ADF_FW_COUNTERS_MAX_PADDING,
+ ae_counters->values[i]);
+ }
+ seq_putc(sfile, '\n');
+
+ return 0;
+}
+
+static const struct seq_operations qat_fw_counters_sops = {
+ .start = qat_fw_counters_seq_start,
+ .next = qat_fw_counters_seq_next,
+ .stop = qat_fw_counters_seq_stop,
+ .show = qat_fw_counters_seq_show,
+};
+
+static int qat_fw_counters_file_open(struct inode *inode, struct file *file)
+{
+ struct adf_accel_dev *accel_dev = inode->i_private;
+ struct seq_file *fw_counters_seq_file;
+ struct adf_fw_counters *fw_counters;
+ int ret;
+
+ fw_counters = adf_fw_counters_get(accel_dev);
+ if (IS_ERR(fw_counters))
+ return PTR_ERR(fw_counters);
+
+ ret = seq_open(file, &qat_fw_counters_sops);
+ if (unlikely(ret)) {
+ kfree(fw_counters);
+ return ret;
+ }
+
+ fw_counters_seq_file = file->private_data;
+ fw_counters_seq_file->private = fw_counters;
+ return ret;
+}
+
+static int qat_fw_counters_file_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+
+ kfree(seq->private);
+ seq->private = NULL;
+
+ return seq_release(inode, file); }
+
+static const struct file_operations qat_fw_counters_fops = {
+ .owner = THIS_MODULE,
+ .open = qat_fw_counters_file_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = qat_fw_counters_file_release,
+};
+
+/**
+ * adf_fw_counters_dbgfs_add() - Create a debugfs file containing FW
+ * execution counters.
+ * @accel_dev: Pointer to a QAT acceleration device
+ *
+ * Function creates a file to display a table with statistics for the given
+ * QAT acceleration device. The table stores device specific execution values
+ * for each AE, such as the number of requests sent to the FW and responses
+ * received from the FW.
+ *
+ * Return: void
+ */
+void adf_fw_counters_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ accel_dev->fw_cntr_dbgfile = debugfs_create_file("fw_counters", 0400,
+ accel_dev->debugfs_dir,
+ accel_dev,
+ &qat_fw_counters_fops);
+}
+
+/**
+ * adf_fw_counters_dbgfs_rm() - Remove the debugfs file containing FW counters.
+ * @accel_dev: Pointer to a QAT acceleration device.
+ *
+ * Function removes the file providing the table of statistics for the given
+ * QAT acceleration device.
+ *
+ * Return: void
+ */
+void adf_fw_counters_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ debugfs_remove(accel_dev->fw_cntr_dbgfile);
+ accel_dev->fw_cntr_dbgfile = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h
new file mode 100644
index 000000000000..91b3b6a95f1f
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_FW_COUNTERS_H
+#define ADF_FW_COUNTERS_H
+
+struct adf_accel_dev;
+
+void adf_fw_counters_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_fw_counters_dbgfs_rm(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
index eeb30da7587a..c27ff6d18e11 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
@@ -7,6 +7,7 @@
#include "adf_common_drv.h"
#include "qat_crypto.h"
#include "qat_compression.h"
+#include "adf_heartbeat.h"
#include "adf_transport_access_macros.h"
static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev)
@@ -195,6 +196,12 @@ int adf_gen2_dev_config(struct adf_accel_dev *accel_dev)
if (ret)
goto err;
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ goto err;
+
+ adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_DEFAULT_MS);
+
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
return ret;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
index e4bc07529be4..6bd341061de4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
@@ -145,6 +145,9 @@ do { \
#define ADF_GEN2_CERRSSMSH(i) ((i) * 0x4000 + 0x10)
#define ADF_GEN2_ERRSSMSH_EN BIT(3)
+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
/* Interrupts */
#define ADF_GEN2_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
#define ADF_GEN2_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 4fb4b3df5a18..02d7a019ebf8 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -136,6 +136,9 @@ do { \
#define ADF_GEN4_VFLNOTIFY BIT(7)
+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
index dd112923e006..c2768762cca3 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
@@ -35,7 +35,7 @@
#define ADF_GEN4_PM_MSG_PENDING BIT(0)
#define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK GENMASK(28, 1)
-#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x0)
+#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER (0x6)
#define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7)
#define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT (0x1)
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
new file mode 100644
index 000000000000..646c57922fcd
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/container_of.h>
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_timer.h"
+
+#define ADF_GEN4_TIMER_PERIOD_MS 200
+
+/* This periodic update is used to trigger HB, RL & TL fw events */
+static void work_handler(struct work_struct *work)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_timer *timer_ctx;
+ u32 time_periods;
+
+ timer_ctx = container_of(to_delayed_work(work), struct adf_timer, work_ctx);
+ accel_dev = timer_ctx->accel_dev;
+
+ adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
+ msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+
+ time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime),
+ ADF_GEN4_TIMER_PERIOD_MS);
+
+ if (adf_send_admin_tim_sync(accel_dev, time_periods))
+ dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n");
+}
+
+int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
+{
+ struct adf_timer *timer_ctx;
+
+ timer_ctx = kzalloc(sizeof(*timer_ctx), GFP_KERNEL);
+ if (!timer_ctx)
+ return -ENOMEM;
+
+ timer_ctx->accel_dev = accel_dev;
+ accel_dev->timer = timer_ctx;
+ timer_ctx->initial_ktime = ktime_get_real();
+
+ INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler);
+ adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
+ msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_timer_start);
+
+void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
+{
+ struct adf_timer *timer_ctx = accel_dev->timer;
+
+ if (!timer_ctx)
+ return;
+
+ cancel_delayed_work_sync(&timer_ctx->work_ctx);
+
+ kfree(timer_ctx);
+ accel_dev->timer = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_timer_stop);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h
new file mode 100644
index 000000000000..66a709e7b358
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_GEN4_TIMER_H_
+#define ADF_GEN4_TIMER_H_
+
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+
+struct adf_accel_dev;
+
+struct adf_timer {
+ struct adf_accel_dev *accel_dev;
+ struct delayed_work work_ctx;
+ ktime_t initial_ktime;
+};
+
+int adf_gen4_timer_start(struct adf_accel_dev *accel_dev);
+void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_GEN4_TIMER_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
new file mode 100644
index 000000000000..beef9a5f6c75
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/kstrtox.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <asm/errno.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_clock.h"
+#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
+#include "adf_transport_internal.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_HB_EMPTY_SIG 0xA5A5A5A5
+
+/* Heartbeat counter pair */
+struct hb_cnt_pair {
+ __u16 resp_heartbeat_cnt;
+ __u16 req_heartbeat_cnt;
+};
+
+static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev)
+{
+ u64 curr_time = adf_clock_get_current_time();
+ u64 polling_time = curr_time - accel_dev->heartbeat->last_hb_check_time;
+
+ if (polling_time < accel_dev->heartbeat->hb_timer) {
+ dev_warn(&GET_DEV(accel_dev),
+ "HB polling too frequent. Configured HB timer %d ms\n",
+ accel_dev->heartbeat->hb_timer);
+ return -EINVAL;
+ }
+
+ accel_dev->heartbeat->last_hb_check_time = curr_time;
+ return 0;
+}
+
+/**
+ * validate_hb_ctrs_cnt() - checks if the number of heartbeat counters should
+ * be updated by one to support the currently loaded firmware.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Return:
+ * * true - hb_ctrs must increased by ADF_NUM_PKE_STRAND
+ * * false - no changes needed
+ */
+static bool validate_hb_ctrs_cnt(struct adf_accel_dev *accel_dev)
+{
+ const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs;
+ const size_t max_aes = accel_dev->hw_device->num_engines;
+ const size_t hb_struct_size = sizeof(struct hb_cnt_pair);
+ const size_t exp_diff_size = array3_size(ADF_NUM_PKE_STRAND, max_aes,
+ hb_struct_size);
+ const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
+ const size_t stats_size = size_mul(dev_ctrs, hb_struct_size);
+ const u32 exp_diff_cnt = exp_diff_size / sizeof(u32);
+ const u32 stats_el_cnt = stats_size / sizeof(u32);
+ struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr;
+ const u32 *mem_to_chk = (u32 *)(hb_stats + dev_ctrs);
+ u32 el_diff_cnt = 0;
+ int i;
+
+ /* count how many bytes are different from pattern */
+ for (i = 0; i < stats_el_cnt; i++) {
+ if (mem_to_chk[i] == ADF_HB_EMPTY_SIG)
+ break;
+
+ el_diff_cnt++;
+ }
+
+ return el_diff_cnt && el_diff_cnt == exp_diff_cnt;
+}
+
+void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev)
+{
+ struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr;
+ const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs;
+ const size_t max_aes = accel_dev->hw_device->num_engines;
+ const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
+ const size_t stats_size = size_mul(dev_ctrs, sizeof(struct hb_cnt_pair));
+ const size_t mem_items_to_fill = size_mul(stats_size, 2) / sizeof(u32);
+
+ /* fill hb stats memory with pattern */
+ memset32((uint32_t *)hb_stats, ADF_HB_EMPTY_SIG, mem_items_to_fill);
+ accel_dev->heartbeat->ctrs_cnt_checked = false;
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_check_ctrs);
+
+static int get_timer_ticks(struct adf_accel_dev *accel_dev, unsigned int *value)
+{
+ char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
+ u32 timer_ms = ADF_CFG_HB_TIMER_DEFAULT_MS;
+ int cfg_read_status;
+ u32 ticks;
+ int ret;
+
+ cfg_read_status = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_HEARTBEAT_TIMER, timer_str);
+ if (cfg_read_status == 0) {
+ if (kstrtouint(timer_str, 10, &timer_ms))
+ dev_dbg(&GET_DEV(accel_dev),
+ "kstrtouint failed to parse the %s, param value",
+ ADF_HEARTBEAT_TIMER);
+ }
+
+ if (timer_ms < ADF_CFG_HB_TIMER_MIN_MS) {
+ dev_err(&GET_DEV(accel_dev), "Timer cannot be less than %u\n",
+ ADF_CFG_HB_TIMER_MIN_MS);
+ return -EINVAL;
+ }
+
+ /*
+ * On 4xxx devices adf_timer is responsible for HB updates and
+ * its period is fixed to 200ms
+ */
+ if (accel_dev->timer)
+ timer_ms = ADF_CFG_HB_TIMER_MIN_MS;
+
+ ret = adf_heartbeat_ms_to_ticks(accel_dev, timer_ms, &ticks);
+ if (ret)
+ return ret;
+
+ adf_heartbeat_save_cfg_param(accel_dev, timer_ms);
+
+ accel_dev->heartbeat->hb_timer = timer_ms;
+ *value = ticks;
+
+ return 0;
+}
+
+static int check_ae(struct hb_cnt_pair *curr, struct hb_cnt_pair *prev,
+ u16 *count, const size_t hb_ctrs)
+{
+ size_t thr;
+
+ /* loop through all threads in AE */
+ for (thr = 0; thr < hb_ctrs; thr++) {
+ u16 req = curr[thr].req_heartbeat_cnt;
+ u16 resp = curr[thr].resp_heartbeat_cnt;
+ u16 last = prev[thr].resp_heartbeat_cnt;
+
+ if ((thr == ADF_AE_ADMIN_THREAD || req != resp) && resp == last) {
+ u16 retry = ++count[thr];
+
+ if (retry >= ADF_CFG_HB_COUNT_THRESHOLD)
+ return -EIO;
+
+ } else {
+ count[thr] = 0;
+ }
+ }
+ return 0;
+}
+
+static int adf_hb_get_status(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+ struct hb_cnt_pair *live_stats, *last_stats, *curr_stats;
+ const size_t hb_ctrs = hw_device->num_hb_ctrs;
+ const unsigned long ae_mask = hw_device->ae_mask;
+ const size_t max_aes = hw_device->num_engines;
+ const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
+ const size_t stats_size = size_mul(dev_ctrs, sizeof(*curr_stats));
+ struct hb_cnt_pair *ae_curr_p, *ae_prev_p;
+ u16 *count_fails, *ae_count_p;
+ size_t ae_offset;
+ size_t ae = 0;
+ int ret = 0;
+
+ if (!accel_dev->heartbeat->ctrs_cnt_checked) {
+ if (validate_hb_ctrs_cnt(accel_dev))
+ hw_device->num_hb_ctrs += ADF_NUM_PKE_STRAND;
+
+ accel_dev->heartbeat->ctrs_cnt_checked = true;
+ }
+
+ live_stats = accel_dev->heartbeat->dma.virt_addr;
+ last_stats = live_stats + dev_ctrs;
+ count_fails = (u16 *)(last_stats + dev_ctrs);
+
+ curr_stats = kmemdup(live_stats, stats_size, GFP_KERNEL);
+ if (!curr_stats)
+ return -ENOMEM;
+
+ /* loop through active AEs */
+ for_each_set_bit(ae, &ae_mask, max_aes) {
+ ae_offset = size_mul(ae, hb_ctrs);
+ ae_curr_p = curr_stats + ae_offset;
+ ae_prev_p = last_stats + ae_offset;
+ ae_count_p = count_fails + ae_offset;
+
+ ret = check_ae(ae_curr_p, ae_prev_p, ae_count_p, hb_ctrs);
+ if (ret)
+ break;
+ }
+
+ /* Copy current stats for the next iteration */
+ memcpy(last_stats, curr_stats, stats_size);
+ kfree(curr_stats);
+
+ return ret;
+}
+
+void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
+ enum adf_device_heartbeat_status *hb_status)
+{
+ struct adf_heartbeat *hb;
+
+ if (!adf_dev_started(accel_dev) ||
+ test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
+ *hb_status = HB_DEV_UNRESPONSIVE;
+ return;
+ }
+
+ if (adf_hb_check_polling_freq(accel_dev) == -EINVAL) {
+ *hb_status = HB_DEV_UNSUPPORTED;
+ return;
+ }
+
+ hb = accel_dev->heartbeat;
+ hb->hb_sent_counter++;
+
+ if (adf_hb_get_status(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Heartbeat ERROR: QAT is not responding.\n");
+ *hb_status = HB_DEV_UNRESPONSIVE;
+ hb->hb_failed_counter++;
+ return;
+ }
+
+ *hb_status = HB_DEV_ALIVE;
+}
+
+int adf_heartbeat_ms_to_ticks(struct adf_accel_dev *accel_dev, unsigned int time_ms,
+ u32 *value)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 clk_per_sec;
+
+ /* HB clock may be different than AE clock */
+ if (!hw_data->get_hb_clock)
+ return -EINVAL;
+
+ clk_per_sec = hw_data->get_hb_clock(hw_data);
+ *value = time_ms * (clk_per_sec / MSEC_PER_SEC);
+
+ return 0;
+}
+
+int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
+ unsigned int timer_ms)
+{
+ char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+ snprintf(timer_str, sizeof(timer_str), "%u", timer_ms);
+ return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_HEARTBEAT_TIMER, timer_str,
+ ADF_STR);
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_save_cfg_param);
+
+int adf_heartbeat_init(struct adf_accel_dev *accel_dev)
+{
+ struct adf_heartbeat *hb;
+
+ hb = kzalloc(sizeof(*hb), GFP_KERNEL);
+ if (!hb)
+ goto err_ret;
+
+ hb->dma.virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ &hb->dma.phy_addr, GFP_KERNEL);
+ if (!hb->dma.virt_addr)
+ goto err_free;
+
+ /*
+ * Default set this flag as true to avoid unnecessary checks,
+ * it will be reset on platforms that need such a check
+ */
+ hb->ctrs_cnt_checked = true;
+ accel_dev->heartbeat = hb;
+
+ return 0;
+
+err_free:
+ kfree(hb);
+err_ret:
+ return -ENOMEM;
+}
+
+int adf_heartbeat_start(struct adf_accel_dev *accel_dev)
+{
+ unsigned int timer_ticks;
+ int ret;
+
+ if (!accel_dev->heartbeat) {
+ dev_warn(&GET_DEV(accel_dev), "Heartbeat instance not found!");
+ return -EFAULT;
+ }
+
+ if (accel_dev->hw_device->check_hb_ctrs)
+ accel_dev->hw_device->check_hb_ctrs(accel_dev);
+
+ ret = get_timer_ticks(accel_dev, &timer_ticks);
+ if (ret)
+ return ret;
+
+ ret = adf_send_admin_hb_timer(accel_dev, timer_ticks);
+ if (ret)
+ dev_warn(&GET_DEV(accel_dev), "Heartbeat not supported!");
+
+ return ret;
+}
+
+void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev)
+{
+ struct adf_heartbeat *hb = accel_dev->heartbeat;
+
+ if (!hb)
+ return;
+
+ if (hb->dma.virt_addr)
+ dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+ hb->dma.virt_addr, hb->dma.phy_addr);
+
+ kfree(hb);
+ accel_dev->heartbeat = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
new file mode 100644
index 000000000000..b22e3cb29798
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_HEARTBEAT_H_
+#define ADF_HEARTBEAT_H_
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+struct dentry;
+
+#define ADF_CFG_HB_TIMER_MIN_MS 200
+#define ADF_CFG_HB_TIMER_DEFAULT_MS 500
+#define ADF_CFG_HB_COUNT_THRESHOLD 3
+
+enum adf_device_heartbeat_status {
+ HB_DEV_UNRESPONSIVE = 0,
+ HB_DEV_ALIVE,
+ HB_DEV_UNSUPPORTED,
+};
+
+struct adf_heartbeat {
+ unsigned int hb_sent_counter;
+ unsigned int hb_failed_counter;
+ unsigned int hb_timer;
+ u64 last_hb_check_time;
+ bool ctrs_cnt_checked;
+ struct hb_dma_addr {
+ dma_addr_t phy_addr;
+ void *virt_addr;
+ } dma;
+ struct {
+ struct dentry *base_dir;
+ struct dentry *status;
+ struct dentry *cfg;
+ struct dentry *sent;
+ struct dentry *failed;
+ } dbgfs;
+};
+
+#ifdef CONFIG_DEBUG_FS
+int adf_heartbeat_init(struct adf_accel_dev *accel_dev);
+int adf_heartbeat_start(struct adf_accel_dev *accel_dev);
+void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev);
+
+int adf_heartbeat_ms_to_ticks(struct adf_accel_dev *accel_dev, unsigned int time_ms,
+ uint32_t *value);
+int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
+ unsigned int timer_ms);
+void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
+ enum adf_device_heartbeat_status *hb_status);
+void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev);
+
+#else
+static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline int adf_heartbeat_start(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
+ unsigned int timer_ms)
+{
+ return 0;
+}
+
+static inline void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
+#endif /* ADF_HEARTBEAT_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
new file mode 100644
index 000000000000..803cbfd838f0
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kstrtox.h>
+#include <linux/types.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
+#include "adf_heartbeat_dbgfs.h"
+
+#define HB_OK 0
+#define HB_ERROR -1
+#define HB_STATUS_MAX_STRLEN 4
+#define HB_STATS_MAX_STRLEN 16
+
+static ssize_t adf_hb_stats_read(struct file *file, char __user *user_buffer,
+ size_t count, loff_t *ppos)
+{
+ char buf[HB_STATS_MAX_STRLEN];
+ unsigned int *value;
+ int len;
+
+ if (*ppos > 0)
+ return 0;
+
+ value = file->private_data;
+ len = scnprintf(buf, sizeof(buf), "%u\n", *value);
+
+ return simple_read_from_buffer(user_buffer, count, ppos, buf, len + 1);
+}
+
+static const struct file_operations adf_hb_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = adf_hb_stats_read,
+};
+
+static ssize_t adf_hb_status_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum adf_device_heartbeat_status hb_status;
+ char ret_str[HB_STATUS_MAX_STRLEN];
+ struct adf_accel_dev *accel_dev;
+ int ret_code;
+ size_t len;
+
+ if (*ppos > 0)
+ return 0;
+
+ accel_dev = file->private_data;
+ ret_code = HB_OK;
+
+ adf_heartbeat_status(accel_dev, &hb_status);
+
+ if (hb_status != HB_DEV_ALIVE)
+ ret_code = HB_ERROR;
+
+ len = scnprintf(ret_str, sizeof(ret_str), "%d\n", ret_code);
+
+ return simple_read_from_buffer(user_buf, count, ppos, ret_str, len + 1);
+}
+
+static const struct file_operations adf_hb_status_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = adf_hb_status_read,
+};
+
+static ssize_t adf_hb_cfg_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ struct adf_accel_dev *accel_dev;
+ unsigned int timer_ms;
+ int len;
+
+ if (*ppos > 0)
+ return 0;
+
+ accel_dev = file->private_data;
+ timer_ms = accel_dev->heartbeat->hb_timer;
+ len = scnprintf(timer_str, sizeof(timer_str), "%u\n", timer_ms);
+
+ return simple_read_from_buffer(user_buf, count, ppos, timer_str,
+ len + 1);
+}
+
+static ssize_t adf_hb_cfg_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char input_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
+ struct adf_accel_dev *accel_dev;
+ int ret, written_chars;
+ unsigned int timer_ms;
+ u32 ticks;
+
+ accel_dev = file->private_data;
+ timer_ms = ADF_CFG_HB_TIMER_DEFAULT_MS;
+
+ /* last byte left as string termination */
+ if (count > sizeof(input_str) - 1)
+ return -EINVAL;
+
+ written_chars = simple_write_to_buffer(input_str, sizeof(input_str) - 1,
+ ppos, user_buf, count);
+ if (written_chars > 0) {
+ ret = kstrtouint(input_str, 10, &timer_ms);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "heartbeat_cfg: Invalid value\n");
+ return ret;
+ }
+
+ if (timer_ms < ADF_CFG_HB_TIMER_MIN_MS) {
+ dev_err(&GET_DEV(accel_dev),
+ "heartbeat_cfg: Invalid value\n");
+ return -EINVAL;
+ }
+
+ /*
+ * On 4xxx devices adf_timer is responsible for HB updates and
+ * its period is fixed to 200ms
+ */
+ if (accel_dev->timer)
+ timer_ms = ADF_CFG_HB_TIMER_MIN_MS;
+
+ ret = adf_heartbeat_save_cfg_param(accel_dev, timer_ms);
+ if (ret)
+ return ret;
+
+ ret = adf_heartbeat_ms_to_ticks(accel_dev, timer_ms, &ticks);
+ if (ret)
+ return ret;
+
+ ret = adf_send_admin_hb_timer(accel_dev, ticks);
+ if (ret)
+ return ret;
+
+ accel_dev->heartbeat->hb_timer = timer_ms;
+ }
+
+ return written_chars;
+}
+
+static const struct file_operations adf_hb_cfg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = adf_hb_cfg_read,
+ .write = adf_hb_cfg_write,
+};
+
+void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_heartbeat *hb = accel_dev->heartbeat;
+
+ if (!hb)
+ return;
+
+ hb->dbgfs.base_dir = debugfs_create_dir("heartbeat", accel_dev->debugfs_dir);
+ hb->dbgfs.status = debugfs_create_file("status", 0400, hb->dbgfs.base_dir,
+ accel_dev, &adf_hb_status_fops);
+ hb->dbgfs.sent = debugfs_create_file("queries_sent", 0400, hb->dbgfs.base_dir,
+ &hb->hb_sent_counter, &adf_hb_stats_fops);
+ hb->dbgfs.failed = debugfs_create_file("queries_failed", 0400, hb->dbgfs.base_dir,
+ &hb->hb_failed_counter, &adf_hb_stats_fops);
+ hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir,
+ accel_dev, &adf_hb_cfg_fops);
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add);
+
+void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ struct adf_heartbeat *hb = accel_dev->heartbeat;
+
+ if (!hb)
+ return;
+
+ debugfs_remove(hb->dbgfs.status);
+ hb->dbgfs.status = NULL;
+ debugfs_remove(hb->dbgfs.sent);
+ hb->dbgfs.sent = NULL;
+ debugfs_remove(hb->dbgfs.failed);
+ hb->dbgfs.failed = NULL;
+ debugfs_remove(hb->dbgfs.cfg);
+ hb->dbgfs.cfg = NULL;
+ debugfs_remove(hb->dbgfs.base_dir);
+ hb->dbgfs.base_dir = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_rm);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h
new file mode 100644
index 000000000000..84dd29ea6454
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_HEARTBEAT_DBGFS_H_
+#define ADF_HEARTBEAT_DBGFS_H_
+
+struct adf_accel_dev;
+
+void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_HEARTBEAT_DBGFS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index 826179c98524..89001fe92e76 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -8,6 +8,7 @@
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_dbgfs.h"
+#include "adf_heartbeat.h"
static LIST_HEAD(service_table);
static DEFINE_MUTEX(service_lock);
@@ -129,6 +130,8 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
+ adf_heartbeat_init(accel_dev);
+
/*
* Subservice initialisation is divided into two stages: init and start.
* This is to facilitate any ordering dependencies between services
@@ -163,6 +166,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
+ int ret;
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -177,6 +181,14 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
+ if (hw_data->measure_clock) {
+ ret = hw_data->measure_clock(accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n");
+ return ret;
+ }
+ }
+
/* Set ssm watch dog timer */
if (hw_data->set_ssm_wdtimer)
hw_data->set_ssm_wdtimer(accel_dev);
@@ -187,6 +199,16 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
+ if (hw_data->start_timer) {
+ ret = hw_data->start_timer(accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
+ return ret;
+ }
+ }
+
+ adf_heartbeat_start(accel_dev);
+
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
@@ -235,6 +257,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
*/
static void adf_dev_stop(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
bool wait = false;
@@ -270,6 +293,9 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
}
}
+ if (hw_data->stop_timer)
+ hw_data->stop_timer(accel_dev);
+
if (wait)
msleep(100);
@@ -326,6 +352,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
clear_bit(accel_dev->accel_id, service->init_status);
}
+ adf_heartbeat_shutdown(accel_dev);
+
hw_data->disable_iov(accel_dev);
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index ad9e135b8560..2aba194a7c29 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -380,3 +380,9 @@ bool adf_misc_wq_queue_work(struct work_struct *work)
{
return queue_work(adf_misc_wq, work);
}
+
+bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
+ unsigned long delay)
+{
+ return queue_delayed_work(adf_misc_wq, work, delay);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
index 56cb827f93ea..3e968a4bcc9c 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
@@ -16,6 +16,8 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_HEARTBEAT_SYNC = 7,
ICP_QAT_FW_HEARTBEAT_GET = 8,
ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
+ ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13,
+ ICP_QAT_FW_TIMER_GET = 19,
ICP_QAT_FW_PM_STATE_CONFIG = 128,
};
@@ -37,6 +39,12 @@ struct icp_qat_fw_init_admin_req {
__u16 ibuf_size_in_kb;
__u16 resrvd3;
};
+ struct {
+ __u32 int_timer_ticks;
+ };
+ struct {
+ __u32 heartbeat_ticks;
+ };
__u32 idle_filter;
};
@@ -97,19 +105,6 @@ struct icp_qat_fw_init_admin_resp {
};
} __packed;
-#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
-#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
-#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
-#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
- ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
-
-#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
- ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
+#define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
- QAT_FIELD_GET(flags, \
- ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
- ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
index a65059e56248..0c8883e2ccc6 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
@@ -97,7 +97,10 @@ enum icp_qat_capabilities_mask {
ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
- /* Bits 18-21 are currently reserved */
+ ICP_ACCEL_CAPABILITIES_SM2 = BIT(18),
+ ICP_ACCEL_CAPABILITIES_SM3 = BIT(19),
+ ICP_ACCEL_CAPABILITIES_SM4 = BIT(20),
+ /* Bit 21 is currently reserved */
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = BIT(22),
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index 3f1f35283266..7842a9f22178 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -234,8 +234,7 @@ static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
DMA_FROM_DEVICE);
- memset(dc_data->ovf_buff, 0, dc_data->ovf_buff_sz);
- kfree(dc_data->ovf_buff);
+ kfree_sensitive(dc_data->ovf_buff);
devm_kfree(dev, dc_data);
accel_dev->dc_data = NULL;
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index ce837bcc1cab..4bd150d1441a 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -11,7 +11,7 @@
#include "icp_qat_hal.h"
#include "icp_qat_fw_loader_handle.h"
-#define UWORD_CPYBUF_SIZE 1024
+#define UWORD_CPYBUF_SIZE 1024U
#define INVLD_UWORD 0xffffffffffull
#define PID_MINOR_REV 0xf
#define PID_MAJOR_REV (0xf << 4)
@@ -1986,10 +1986,7 @@ static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
uw_relative_addr = 0;
words_num = encap_page->micro_words_num;
while (words_num) {
- if (words_num < UWORD_CPYBUF_SIZE)
- cpylen = words_num;
- else
- cpylen = UWORD_CPYBUF_SIZE;
+ cpylen = min(words_num, UWORD_CPYBUF_SIZE);
/* load the buffer */
for (i = 0; i < cpylen; i++)
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 1ebe0b351fae..09551f949126 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -7,6 +7,7 @@
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_dh895xcc_hw_data.h"
+#include "adf_heartbeat.h"
#include "icp_qat_hw.h"
#define ADF_DH895XCC_VF_MSK 0xFFFFFFFF
@@ -44,6 +45,14 @@ static u32 get_misc_bar_id(struct adf_hw_device_data *self)
return ADF_DH895XCC_PMISC_BAR;
}
+static u32 get_ts_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * Timestamp update interval is 16 AE clock ticks for dh895xcc.
+ */
+ return self->clock_frequency / 16;
+}
+
static u32 get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_DH895XCC_ETR_BAR;
@@ -237,6 +246,10 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->reset_device = adf_reset_sbr;
hw_data->disable_iov = adf_disable_sriov;
hw_data->dev_config = adf_gen2_dev_config;
+ hw_data->clock_frequency = ADF_DH895X_AE_FREQ;
+ hw_data->get_hb_clock = get_ts_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 7b674bbe4192..cd3a21985455 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -3,6 +3,8 @@
#ifndef ADF_DH895x_HW_DATA_H_
#define ADF_DH895x_HW_DATA_H_
+#include <linux/units.h>
+
/* PCIe configuration space */
#define ADF_DH895XCC_SRAM_BAR 0
#define ADF_DH895XCC_PMISC_BAR 1
@@ -30,6 +32,9 @@
#define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
#define ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS 12
+/* Clocks frequency */
+#define ADF_DH895X_AE_FREQ (933 * HZ_PER_MHZ)
+
/* FW names */
#define ADF_DH895XCC_FW "qat_895xcc.bin"
#define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 4f6ca229ee5e..d5a32d71a3e9 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/cpumask.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
diff --git a/drivers/crypto/omap-aes-gcm.c b/drivers/crypto/omap-aes-gcm.c
index 9f937bdc53a7..c498950402e8 100644
--- a/drivers/crypto/omap-aes-gcm.c
+++ b/drivers/crypto/omap-aes-gcm.c
@@ -7,18 +7,21 @@
* Copyright (c) 2016 Texas Instruments Incorporated
*/
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
#include <linux/errno.h>
-#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/omap-dma.h>
#include <linux/pm_runtime.h>
-#include <crypto/aes.h>
-#include <crypto/gcm.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/skcipher.h>
-#include <crypto/internal/aead.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
#include "omap-crypto.h"
#include "omap-aes.h"
@@ -212,12 +215,10 @@ static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
return 0;
}
-static int omap_aes_gcm_prepare_req(struct crypto_engine *engine, void *areq)
+static int omap_aes_gcm_prepare_req(struct aead_request *req,
+ struct omap_aes_dev *dd)
{
- struct aead_request *req = container_of(areq, struct aead_request,
- base);
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
- struct omap_aes_dev *dd = rctx->dd;
struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
int err;
@@ -356,17 +357,21 @@ int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
return crypto_rfc4106_check_authsize(authsize);
}
-static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
+int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
{
struct aead_request *req = container_of(areq, struct aead_request,
base);
struct omap_aes_reqctx *rctx = aead_request_ctx(req);
struct omap_aes_dev *dd = rctx->dd;
- int ret = 0;
+ int ret;
if (!dd)
return -ENODEV;
+ ret = omap_aes_gcm_prepare_req(req, dd);
+ if (ret)
+ return ret;
+
if (dd->in_sg_len)
ret = omap_aes_crypt_dma_start(dd);
else
@@ -377,12 +382,6 @@ static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
{
- struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
-
- ctx->enginectx.op.prepare_request = omap_aes_gcm_prepare_req;
- ctx->enginectx.op.unprepare_request = NULL;
- ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req;
-
crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
return 0;
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 67a99c760bc4..ed83023dd77a 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -13,28 +13,26 @@
#define prn(num) pr_debug(#num "=%d\n", num)
#define prx(num) pr_debug(#num "=%x\n", num)
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/pm_runtime.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/aes.h>
-#include <crypto/gcm.h>
-#include <crypto/engine.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/internal/aead.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
#include "omap-crypto.h"
#include "omap-aes.h"
@@ -426,20 +424,15 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
return 0;
}
-static int omap_aes_prepare_req(struct crypto_engine *engine,
- void *areq)
+static int omap_aes_prepare_req(struct skcipher_request *req,
+ struct omap_aes_dev *dd)
{
- struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
- struct omap_aes_dev *dd = rctx->dd;
int ret;
u16 flags;
- if (!dd)
- return -ENODEV;
-
/* assign new request to device */
dd->req = req;
dd->total = req->cryptlen;
@@ -491,7 +484,8 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
if (!dd)
return -ENODEV;
- return omap_aes_crypt_dma_start(dd);
+ return omap_aes_prepare_req(req, dd) ?:
+ omap_aes_crypt_dma_start(dd);
}
static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
@@ -629,11 +623,6 @@ static int omap_aes_ctr_decrypt(struct skcipher_request *req)
return omap_aes_crypt(req, FLAGS_CTR);
}
-static int omap_aes_prepare_req(struct crypto_engine *engine,
- void *req);
-static int omap_aes_crypt_req(struct crypto_engine *engine,
- void *req);
-
static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
{
const char *name = crypto_tfm_alg_name(&tfm->base);
@@ -649,10 +638,6 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) +
crypto_skcipher_reqsize(blk));
- ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
- ctx->enginectx.op.unprepare_request = NULL;
- ctx->enginectx.op.do_one_request = omap_aes_crypt_req;
-
return 0;
}
@@ -668,68 +653,77 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
/* ********************** ALGS ************************************ */
-static struct skcipher_alg algs_ecb_cbc[] = {
+static struct skcipher_engine_alg algs_ecb_cbc[] = {
{
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "ecb-aes-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_ecb_encrypt,
- .decrypt = omap_aes_ecb_decrypt,
- .init = omap_aes_init_tfm,
- .exit = omap_aes_exit_tfm,
+ .base = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ecb_encrypt,
+ .decrypt = omap_aes_ecb_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
+ },
+ .op.do_one_request = omap_aes_crypt_req,
},
{
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "cbc-aes-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_cbc_encrypt,
- .decrypt = omap_aes_cbc_decrypt,
- .init = omap_aes_init_tfm,
- .exit = omap_aes_exit_tfm,
+ .base = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_cbc_encrypt,
+ .decrypt = omap_aes_cbc_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
+ },
+ .op.do_one_request = omap_aes_crypt_req,
}
};
-static struct skcipher_alg algs_ctr[] = {
+static struct skcipher_engine_alg algs_ctr[] = {
{
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "ctr-aes-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_setkey,
- .encrypt = omap_aes_ctr_encrypt,
- .decrypt = omap_aes_ctr_decrypt,
- .init = omap_aes_init_tfm,
- .exit = omap_aes_exit_tfm,
+ .base = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct omap_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_setkey,
+ .encrypt = omap_aes_ctr_encrypt,
+ .decrypt = omap_aes_ctr_decrypt,
+ .init = omap_aes_init_tfm,
+ .exit = omap_aes_exit_tfm,
+ },
+ .op.do_one_request = omap_aes_crypt_req,
}
};
@@ -740,46 +734,52 @@ static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
},
};
-static struct aead_alg algs_aead_gcm[] = {
+static struct aead_engine_alg algs_aead_gcm[] = {
{
.base = {
- .cra_name = "gcm(aes)",
- .cra_driver_name = "gcm-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-omap",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .init = omap_aes_gcm_cra_init,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = omap_aes_gcm_setkey,
+ .setauthsize = omap_aes_gcm_setauthsize,
+ .encrypt = omap_aes_gcm_encrypt,
+ .decrypt = omap_aes_gcm_decrypt,
},
- .init = omap_aes_gcm_cra_init,
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setkey = omap_aes_gcm_setkey,
- .setauthsize = omap_aes_gcm_setauthsize,
- .encrypt = omap_aes_gcm_encrypt,
- .decrypt = omap_aes_gcm_decrypt,
+ .op.do_one_request = omap_aes_gcm_crypt_req,
},
{
.base = {
- .cra_name = "rfc4106(gcm(aes))",
- .cra_driver_name = "rfc4106-gcm-aes-omap",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-omap",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct omap_aes_gcm_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .init = omap_aes_gcm_cra_init,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .ivsize = GCM_RFC4106_IV_SIZE,
+ .setkey = omap_aes_4106gcm_setkey,
+ .setauthsize = omap_aes_4106gcm_setauthsize,
+ .encrypt = omap_aes_4106gcm_encrypt,
+ .decrypt = omap_aes_4106gcm_decrypt,
},
- .init = omap_aes_gcm_cra_init,
- .maxauthsize = AES_BLOCK_SIZE,
- .ivsize = GCM_RFC4106_IV_SIZE,
- .setkey = omap_aes_4106gcm_setkey,
- .setauthsize = omap_aes_4106gcm_setauthsize,
- .encrypt = omap_aes_4106gcm_encrypt,
- .decrypt = omap_aes_4106gcm_decrypt,
+ .op.do_one_request = omap_aes_gcm_crypt_req,
},
};
@@ -1101,8 +1101,8 @@ static int omap_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_aes_dev *dd;
- struct skcipher_alg *algp;
- struct aead_alg *aalg;
+ struct skcipher_engine_alg *algp;
+ struct aead_engine_alg *aalg;
struct resource res;
int err = -ENOMEM, i, j, irq = -1;
u32 reg;
@@ -1195,9 +1195,9 @@ static int omap_aes_probe(struct platform_device *pdev)
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->base.cra_name);
+ pr_debug("reg alg: %s\n", algp->base.base.cra_name);
- err = crypto_register_skcipher(algp);
+ err = crypto_engine_register_skcipher(algp);
if (err)
goto err_algs;
@@ -1211,9 +1211,9 @@ static int omap_aes_probe(struct platform_device *pdev)
for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
- pr_debug("reg alg: %s\n", aalg->base.cra_name);
+ pr_debug("reg alg: %s\n", aalg->base.base.cra_name);
- err = crypto_register_aead(aalg);
+ err = crypto_engine_register_aead(aalg);
if (err)
goto err_aead_algs;
@@ -1231,12 +1231,12 @@ static int omap_aes_probe(struct platform_device *pdev)
err_aead_algs:
for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
- crypto_unregister_aead(aalg);
+ crypto_engine_unregister_aead(aalg);
}
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_skcipher(
+ crypto_engine_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine:
@@ -1258,7 +1258,7 @@ err_data:
static int omap_aes_remove(struct platform_device *pdev)
{
struct omap_aes_dev *dd = platform_get_drvdata(pdev);
- struct aead_alg *aalg;
+ struct aead_engine_alg *aalg;
int i, j;
spin_lock_bh(&list_lock);
@@ -1267,14 +1267,14 @@ static int omap_aes_remove(struct platform_device *pdev)
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
- crypto_unregister_skcipher(
+ crypto_engine_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
dd->pdata->algs_info[i].registered--;
}
for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
aalg = &dd->pdata->aead_algs_info->algs_list[i];
- crypto_unregister_aead(aalg);
+ crypto_engine_unregister_aead(aalg);
dd->pdata->aead_algs_info->registered--;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 23d073e87bb8..0f35c9164764 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -10,7 +10,6 @@
#define __OMAP_AES_H__
#include <crypto/aes.h>
-#include <crypto/engine.h>
#define DST_MAXBURST 4
#define DMA_MIN (DST_MAXBURST * sizeof(u32))
@@ -93,7 +92,6 @@ struct omap_aes_gcm_result {
};
struct omap_aes_ctx {
- struct crypto_engine_ctx enginectx;
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
@@ -117,15 +115,15 @@ struct omap_aes_reqctx {
#define OMAP_AES_CACHE_SIZE 0
struct omap_aes_algs_info {
- struct skcipher_alg *algs_list;
- unsigned int size;
- unsigned int registered;
+ struct skcipher_engine_alg *algs_list;
+ unsigned int size;
+ unsigned int registered;
};
struct omap_aes_aead_algs {
- struct aead_alg *algs_list;
- unsigned int size;
- unsigned int registered;
+ struct aead_engine_alg *algs_list;
+ unsigned int size;
+ unsigned int registered;
};
struct omap_aes_pdata {
@@ -218,5 +216,6 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
void omap_aes_gcm_dma_out_callback(void *data);
void omap_aes_clear_copy_flags(struct omap_aes_dev *dd);
+int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq);
#endif
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index f783769ea110..089dd45eaedd 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -16,27 +16,23 @@
#define prx(num) do { } while (0)
#endif
+#include <crypto/engine.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/init.h>
-#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
#include <linux/pm_runtime.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/des.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/algapi.h>
-#include <crypto/engine.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
#include "omap-crypto.h"
@@ -83,7 +79,6 @@
#define FLAGS_OUT_DATA_ST_SHIFT 10
struct omap_des_ctx {
- struct crypto_engine_ctx enginectx;
struct omap_des_dev *dd;
int keylen;
@@ -99,9 +94,9 @@ struct omap_des_reqctx {
#define OMAP_DES_CACHE_SIZE 0
struct omap_des_algs_info {
- struct skcipher_alg *algs_list;
- unsigned int size;
- unsigned int registered;
+ struct skcipher_engine_alg *algs_list;
+ unsigned int size;
+ unsigned int registered;
};
struct omap_des_pdata {
@@ -522,20 +517,15 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
return 0;
}
-static int omap_des_prepare_req(struct crypto_engine *engine,
- void *areq)
+static int omap_des_prepare_req(struct skcipher_request *req,
+ struct omap_des_dev *dd)
{
- struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
struct omap_des_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
- struct omap_des_dev *dd = omap_des_find_dev(ctx);
struct omap_des_reqctx *rctx;
int ret;
u16 flags;
- if (!dd)
- return -ENODEV;
-
/* assign new request to device */
dd->req = req;
dd->total = req->cryptlen;
@@ -590,7 +580,8 @@ static int omap_des_crypt_req(struct crypto_engine *engine,
if (!dd)
return -ENODEV;
- return omap_des_crypt_dma_start(dd);
+ return omap_des_prepare_req(req, dd) ?:
+ omap_des_crypt_dma_start(dd);
}
static void omap_des_done_task(unsigned long data)
@@ -709,98 +700,99 @@ static int omap_des_cbc_decrypt(struct skcipher_request *req)
return omap_des_crypt(req, FLAGS_CBC);
}
-static int omap_des_prepare_req(struct crypto_engine *engine,
- void *areq);
-static int omap_des_crypt_req(struct crypto_engine *engine,
- void *areq);
-
static int omap_des_init_tfm(struct crypto_skcipher *tfm)
{
- struct omap_des_ctx *ctx = crypto_skcipher_ctx(tfm);
-
pr_debug("enter\n");
crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_des_reqctx));
- ctx->enginectx.op.prepare_request = omap_des_prepare_req;
- ctx->enginectx.op.unprepare_request = NULL;
- ctx->enginectx.op.do_one_request = omap_des_crypt_req;
-
return 0;
}
/* ********************** ALGS ************************************ */
-static struct skcipher_alg algs_ecb_cbc[] = {
+static struct skcipher_engine_alg algs_ecb_cbc[] = {
{
- .base.cra_name = "ecb(des)",
- .base.cra_driver_name = "ecb-des-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct omap_des_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = omap_des_setkey,
- .encrypt = omap_des_ecb_encrypt,
- .decrypt = omap_des_ecb_decrypt,
- .init = omap_des_init_tfm,
+ .base = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_ecb_encrypt,
+ .decrypt = omap_des_ecb_decrypt,
+ .init = omap_des_init_tfm,
+ },
+ .op.do_one_request = omap_des_crypt_req,
},
{
- .base.cra_name = "cbc(des)",
- .base.cra_driver_name = "cbc-des-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct omap_des_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = omap_des_setkey,
- .encrypt = omap_des_cbc_encrypt,
- .decrypt = omap_des_cbc_decrypt,
- .init = omap_des_init_tfm,
+ .base = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = omap_des_setkey,
+ .encrypt = omap_des_cbc_encrypt,
+ .decrypt = omap_des_cbc_decrypt,
+ .init = omap_des_init_tfm,
+ },
+ .op.do_one_request = omap_des_crypt_req,
},
{
- .base.cra_name = "ecb(des3_ede)",
- .base.cra_driver_name = "ecb-des3-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct omap_des_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = omap_des3_setkey,
- .encrypt = omap_des_ecb_encrypt,
- .decrypt = omap_des_ecb_decrypt,
- .init = omap_des_init_tfm,
+ .base = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = omap_des3_setkey,
+ .encrypt = omap_des_ecb_encrypt,
+ .decrypt = omap_des_ecb_decrypt,
+ .init = omap_des_init_tfm,
+ },
+ .op.do_one_request = omap_des_crypt_req,
},
{
- .base.cra_name = "cbc(des3_ede)",
- .base.cra_driver_name = "cbc-des3-omap",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct omap_des_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = omap_des3_setkey,
- .encrypt = omap_des_cbc_encrypt,
- .decrypt = omap_des_cbc_decrypt,
- .init = omap_des_init_tfm,
+ .base = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3-omap",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct omap_des_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = omap_des3_setkey,
+ .encrypt = omap_des_cbc_encrypt,
+ .decrypt = omap_des_cbc_decrypt,
+ .init = omap_des_init_tfm,
+ },
+ .op.do_one_request = omap_des_crypt_req,
}
};
@@ -958,7 +950,7 @@ static int omap_des_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct omap_des_dev *dd;
- struct skcipher_alg *algp;
+ struct skcipher_engine_alg *algp;
struct resource *res;
int err = -ENOMEM, i, j, irq = -1;
u32 reg;
@@ -971,18 +963,12 @@ static int omap_des_probe(struct platform_device *pdev)
dd->dev = dev;
platform_set_drvdata(pdev, dd);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "no MEM resource info\n");
- goto err_res;
- }
-
err = (dev->of_node) ? omap_des_get_of(dd, pdev) :
omap_des_get_pdev(dd, pdev);
if (err)
goto err_res;
- dd->io_base = devm_ioremap_resource(dev, res);
+ dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(dd->io_base)) {
err = PTR_ERR(dd->io_base);
goto err_res;
@@ -1052,9 +1038,9 @@ static int omap_des_probe(struct platform_device *pdev)
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
algp = &dd->pdata->algs_info[i].algs_list[j];
- pr_debug("reg alg: %s\n", algp->base.cra_name);
+ pr_debug("reg alg: %s\n", algp->base.base.cra_name);
- err = crypto_register_skcipher(algp);
+ err = crypto_engine_register_skcipher(algp);
if (err)
goto err_algs;
@@ -1067,7 +1053,7 @@ static int omap_des_probe(struct platform_device *pdev)
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_skcipher(
+ crypto_engine_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine:
@@ -1097,7 +1083,7 @@ static int omap_des_remove(struct platform_device *pdev)
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_skcipher(
+ crypto_engine_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index cbeda59c6b19..a6b4a0b3ace3 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -13,34 +13,30 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
+#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <linux/err.h>
#include <linux/device.h>
-#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/init.h>
-#include <linux/errno.h>
#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/pm_runtime.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/delay.h>
-#include <linux/crypto.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
-#include <crypto/internal/hash.h>
-#include <crypto/engine.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#define MD5_DIGEST_SIZE 16
@@ -168,7 +164,6 @@ struct omap_sham_hmac_ctx {
};
struct omap_sham_ctx {
- struct crypto_engine_ctx enginectx;
unsigned long flags;
/* fallback stuff */
@@ -180,7 +175,7 @@ struct omap_sham_ctx {
#define OMAP_SHAM_QUEUE_LENGTH 10
struct omap_sham_algs_info {
- struct ahash_alg *algs_list;
+ struct ahash_engine_alg *algs_list;
unsigned int size;
unsigned int registered;
};
@@ -1074,6 +1069,10 @@ static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
ctx->op, ctx->total, ctx->digcnt, final);
+ err = omap_sham_prepare_request(engine, areq);
+ if (err)
+ return err;
+
err = pm_runtime_resume_and_get(dd->dev);
if (err < 0) {
dev_err(dd->dev, "failed to get sync: %d\n", err);
@@ -1349,10 +1348,6 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
- tctx->enginectx.op.do_one_request = omap_sham_hash_one_req;
- tctx->enginectx.op.prepare_request = omap_sham_prepare_request;
- tctx->enginectx.op.unprepare_request = NULL;
-
return 0;
}
@@ -1423,15 +1418,15 @@ static int omap_sham_import(struct ahash_request *req, const void *in)
return 0;
}
-static struct ahash_alg algs_sha1_md5[] = {
+static struct ahash_engine_alg algs_sha1_md5[] = {
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.halg.digestsize = SHA1_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "sha1",
.cra_driver_name = "omap-sha1",
.cra_priority = 400,
@@ -1444,16 +1439,17 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .halg.digestsize = MD5_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.halg.digestsize = MD5_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "md5",
.cra_driver_name = "omap-md5",
.cra_priority = 400,
@@ -1466,17 +1462,18 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .setkey = omap_sham_setkey,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.setkey = omap_sham_setkey,
+ .base.halg.digestsize = SHA1_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "omap-hmac-sha1",
.cra_priority = 400,
@@ -1490,17 +1487,18 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha1_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .setkey = omap_sham_setkey,
- .halg.digestsize = MD5_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.setkey = omap_sham_setkey,
+ .base.halg.digestsize = MD5_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "omap-hmac-md5",
.cra_priority = 400,
@@ -1514,20 +1512,21 @@ static struct ahash_alg algs_sha1_md5[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_md5_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
}
};
/* OMAP4 has some algs in addition to what OMAP2 has */
-static struct ahash_alg algs_sha224_sha256[] = {
-{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .halg.digestsize = SHA224_DIGEST_SIZE,
- .halg.base = {
+static struct ahash_engine_alg algs_sha224_sha256[] = {
+{
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.halg.digestsize = SHA224_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "sha224",
.cra_driver_name = "omap-sha224",
.cra_priority = 400,
@@ -1540,16 +1539,17 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.halg.digestsize = SHA256_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "sha256",
.cra_driver_name = "omap-sha256",
.cra_priority = 400,
@@ -1562,17 +1562,18 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .setkey = omap_sham_setkey,
- .halg.digestsize = SHA224_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.setkey = omap_sham_setkey,
+ .base.halg.digestsize = SHA224_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "omap-hmac-sha224",
.cra_priority = 400,
@@ -1586,17 +1587,18 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha224_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .setkey = omap_sham_setkey,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.setkey = omap_sham_setkey,
+ .base.halg.digestsize = SHA256_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "omap-hmac-sha256",
.cra_priority = 400,
@@ -1610,19 +1612,20 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha256_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
};
-static struct ahash_alg algs_sha384_sha512[] = {
+static struct ahash_engine_alg algs_sha384_sha512[] = {
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.halg.digestsize = SHA384_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "sha384",
.cra_driver_name = "omap-sha384",
.cra_priority = 400,
@@ -1635,16 +1638,17 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.halg.digestsize = SHA512_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "sha512",
.cra_driver_name = "omap-sha512",
.cra_priority = 400,
@@ -1657,17 +1661,18 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .setkey = omap_sham_setkey,
- .halg.digestsize = SHA384_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.setkey = omap_sham_setkey,
+ .base.halg.digestsize = SHA384_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "hmac(sha384)",
.cra_driver_name = "omap-hmac-sha384",
.cra_priority = 400,
@@ -1681,17 +1686,18 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha384_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
{
- .init = omap_sham_init,
- .update = omap_sham_update,
- .final = omap_sham_final,
- .finup = omap_sham_finup,
- .digest = omap_sham_digest,
- .setkey = omap_sham_setkey,
- .halg.digestsize = SHA512_DIGEST_SIZE,
- .halg.base = {
+ .base.init = omap_sham_init,
+ .base.update = omap_sham_update,
+ .base.final = omap_sham_final,
+ .base.finup = omap_sham_finup,
+ .base.digest = omap_sham_digest,
+ .base.setkey = omap_sham_setkey,
+ .base.halg.digestsize = SHA512_DIGEST_SIZE,
+ .base.halg.base = {
.cra_name = "hmac(sha512)",
.cra_driver_name = "omap-hmac-sha512",
.cra_priority = 400,
@@ -1705,7 +1711,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha512_init,
.cra_exit = omap_sham_cra_exit,
- }
+ },
+ .op.do_one_request = omap_sham_hash_one_req,
},
};
@@ -2146,14 +2153,16 @@ static int omap_sham_probe(struct platform_device *pdev)
break;
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+ struct ahash_engine_alg *ealg;
struct ahash_alg *alg;
- alg = &dd->pdata->algs_info[i].algs_list[j];
+ ealg = &dd->pdata->algs_info[i].algs_list[j];
+ alg = &ealg->base;
alg->export = omap_sham_export;
alg->import = omap_sham_import;
alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
BUFLEN;
- err = crypto_register_ahash(alg);
+ err = crypto_engine_register_ahash(ealg);
if (err)
goto err_algs;
@@ -2172,7 +2181,7 @@ static int omap_sham_probe(struct platform_device *pdev)
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
- crypto_unregister_ahash(
+ crypto_engine_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
err_engine_start:
crypto_engine_exit(dd->engine);
@@ -2203,7 +2212,7 @@ static int omap_sham_remove(struct platform_device *pdev)
spin_unlock_bh(&sham.lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
- crypto_unregister_ahash(
+ crypto_engine_unregister_ahash(
&dd->pdata->algs_info[i].algs_list[j]);
dd->pdata->algs_info[i].registered--;
}
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 72dd1a4ebac4..825a729f205e 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -173,13 +173,9 @@ static int qcom_rng_probe(struct platform_device *pdev)
if (IS_ERR(rng->base))
return PTR_ERR(rng->base);
- /* ACPI systems have clk already on, so skip clk_get */
- if (!has_acpi_companion(&pdev->dev)) {
- rng->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(rng->clk))
- return PTR_ERR(rng->clk);
- }
-
+ rng->clk = devm_clk_get_optional(&pdev->dev, "core");
+ if (IS_ERR(rng->clk))
+ return PTR_ERR(rng->clk);
rng->skip_init = (unsigned long)device_get_match_data(&pdev->dev);
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index 9f6ba770a90a..77d5705a5d96 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -10,14 +10,21 @@
*/
#include "rk3288_crypto.h"
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/reset.h>
+#include <linux/spinlock.h>
static struct rockchip_ip rocklist = {
.dev_list = LIST_HEAD_INIT(rocklist.dev_list),
@@ -184,7 +191,6 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = {
&rk_ahash_md5,
};
-#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
{
struct rk_crypto_info *dd;
@@ -204,8 +210,8 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
switch (rk_cipher_algs[i]->type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name,
- rk_cipher_algs[i]->alg.skcipher.base.cra_name,
+ rk_cipher_algs[i]->alg.skcipher.base.base.cra_driver_name,
+ rk_cipher_algs[i]->alg.skcipher.base.base.cra_name,
rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
seq_printf(seq, "\tfallback due to length: %lu\n",
rk_cipher_algs[i]->stat_fb_len);
@@ -216,8 +222,8 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
break;
case CRYPTO_ALG_TYPE_AHASH:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
- rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name,
- rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
+ rk_cipher_algs[i]->alg.hash.base.halg.base.cra_driver_name,
+ rk_cipher_algs[i]->alg.hash.base.halg.base.cra_name,
rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
break;
}
@@ -226,17 +232,20 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs);
-#endif
static void register_debugfs(struct rk_crypto_info *crypto_info)
{
-#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+ struct dentry *dbgfs_dir __maybe_unused;
+ struct dentry *dbgfs_stats __maybe_unused;
+
/* Ignore error of debugfs */
- rocklist.dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
- rocklist.dbgfs_stats = debugfs_create_file("stats", 0444,
- rocklist.dbgfs_dir,
- &rocklist,
- &rk_crypto_debugfs_fops);
+ dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
+ dbgfs_stats = debugfs_create_file("stats", 0444, dbgfs_dir, &rocklist,
+ &rk_crypto_debugfs_fops);
+
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+ rocklist.dbgfs_dir = dbgfs_dir;
+ rocklist.dbgfs_stats = dbgfs_stats;
#endif
}
@@ -250,15 +259,15 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
switch (rk_cipher_algs[i]->type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(crypto_info->dev, "Register %s as %s\n",
- rk_cipher_algs[i]->alg.skcipher.base.cra_name,
- rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name);
- err = crypto_register_skcipher(&rk_cipher_algs[i]->alg.skcipher);
+ rk_cipher_algs[i]->alg.skcipher.base.base.cra_name,
+ rk_cipher_algs[i]->alg.skcipher.base.base.cra_driver_name);
+ err = crypto_engine_register_skcipher(&rk_cipher_algs[i]->alg.skcipher);
break;
case CRYPTO_ALG_TYPE_AHASH:
dev_info(crypto_info->dev, "Register %s as %s\n",
- rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
- rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name);
- err = crypto_register_ahash(&rk_cipher_algs[i]->alg.hash);
+ rk_cipher_algs[i]->alg.hash.base.halg.base.cra_name,
+ rk_cipher_algs[i]->alg.hash.base.halg.base.cra_driver_name);
+ err = crypto_engine_register_ahash(&rk_cipher_algs[i]->alg.hash);
break;
default:
dev_err(crypto_info->dev, "unknown algorithm\n");
@@ -271,9 +280,9 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
err_cipher_algs:
for (k = 0; k < i; k++) {
if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
- crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
+ crypto_engine_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
else
- crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+ crypto_engine_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
return err;
}
@@ -284,9 +293,9 @@ static void rk_crypto_unregister(void)
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
- crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
+ crypto_engine_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
else
- crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+ crypto_engine_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h
index b2695258cade..3aa03cbfb6be 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.h
+++ b/drivers/crypto/rockchip/rk3288_crypto.h
@@ -3,21 +3,18 @@
#define __RK3288_CRYPTO_H__
#include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <crypto/algapi.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/scatterlist.h>
#include <crypto/engine.h>
+#include <crypto/internal/des.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
-
#include <crypto/md5.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
#define _SBF(v, f) ((v) << (f))
@@ -231,7 +228,6 @@ struct rk_crypto_info {
/* the private variable of hash */
struct rk_ahash_ctx {
- struct crypto_engine_ctx enginectx;
/* for fallback */
struct crypto_ahash *fallback_tfm;
};
@@ -246,7 +242,6 @@ struct rk_ahash_rctx {
/* the private variable of cipher */
struct rk_cipher_ctx {
- struct crypto_engine_ctx enginectx;
unsigned int keylen;
u8 key[AES_MAX_KEY_SIZE];
u8 iv[AES_BLOCK_SIZE];
@@ -264,8 +259,8 @@ struct rk_crypto_tmp {
u32 type;
struct rk_crypto_info *dev;
union {
- struct skcipher_alg skcipher;
- struct ahash_alg hash;
+ struct skcipher_engine_alg skcipher;
+ struct ahash_engine_alg hash;
} alg;
unsigned long stat_req;
unsigned long stat_fb;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index a78ff3dcd0b1..8c143180645e 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -8,9 +8,15 @@
*
* Some ideas are from marvell/cesa.c and s5p-sss.c driver.
*/
-#include <linux/device.h>
+
#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include "rk3288_crypto.h"
/*
@@ -40,8 +46,8 @@ static int rk_ahash_digest_fb(struct ahash_request *areq)
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base);
algt->stat_fb++;
@@ -240,14 +246,13 @@ static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
return 0;
}
-static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
+static void rk_hash_unprepare(struct crypto_engine *engine, void *breq)
{
struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
struct rk_crypto_info *rkc = rctx->dev;
dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
- return 0;
}
static int rk_hash_run(struct crypto_engine *engine, void *breq)
@@ -255,11 +260,11 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base);
struct scatterlist *sg = areq->src;
struct rk_crypto_info *rkc = rctx->dev;
- int err = 0;
+ int err;
int i;
u32 v;
@@ -267,6 +272,10 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
if (err)
return err;
+ err = rk_hash_prepare(engine, breq);
+ if (err)
+ goto theend;
+
rctx->mode = 0;
algt->stat_req++;
@@ -327,15 +336,17 @@ theend:
crypto_finalize_hash_request(engine, breq, err);
local_bh_enable();
+ rk_hash_unprepare(engine, breq);
+
return 0;
}
-static int rk_cra_hash_init(struct crypto_tfm *tfm)
+static int rk_hash_init_tfm(struct crypto_ahash *tfm)
{
- struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
- const char *alg_name = crypto_tfm_alg_name(tfm);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
+ const char *alg_name = crypto_ahash_alg_name(tfm);
+ struct ahash_alg *alg = crypto_ahash_alg(tfm);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base);
/* for fallback */
tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
@@ -345,27 +356,23 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm)
return PTR_ERR(tctx->fallback_tfm);
}
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ crypto_ahash_set_reqsize(tfm,
sizeof(struct rk_ahash_rctx) +
crypto_ahash_reqsize(tctx->fallback_tfm));
- tctx->enginectx.op.do_one_request = rk_hash_run;
- tctx->enginectx.op.prepare_request = rk_hash_prepare;
- tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
-
return 0;
}
-static void rk_cra_hash_exit(struct crypto_tfm *tfm)
+static void rk_hash_exit_tfm(struct crypto_ahash *tfm)
{
- struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
crypto_free_ahash(tctx->fallback_tfm);
}
struct rk_crypto_tmp rk_ahash_sha1 = {
.type = CRYPTO_ALG_TYPE_AHASH,
- .alg.hash = {
+ .alg.hash.base = {
.init = rk_ahash_init,
.update = rk_ahash_update,
.final = rk_ahash_final,
@@ -373,6 +380,8 @@ struct rk_crypto_tmp rk_ahash_sha1 = {
.export = rk_ahash_export,
.import = rk_ahash_import,
.digest = rk_ahash_digest,
+ .init_tfm = rk_hash_init_tfm,
+ .exit_tfm = rk_hash_exit_tfm,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
@@ -385,17 +394,18 @@ struct rk_crypto_tmp rk_ahash_sha1 = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_ahash_ctx),
.cra_alignmask = 3,
- .cra_init = rk_cra_hash_init,
- .cra_exit = rk_cra_hash_exit,
.cra_module = THIS_MODULE,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = rk_hash_run,
+ },
};
struct rk_crypto_tmp rk_ahash_sha256 = {
.type = CRYPTO_ALG_TYPE_AHASH,
- .alg.hash = {
+ .alg.hash.base = {
.init = rk_ahash_init,
.update = rk_ahash_update,
.final = rk_ahash_final,
@@ -403,6 +413,8 @@ struct rk_crypto_tmp rk_ahash_sha256 = {
.export = rk_ahash_export,
.import = rk_ahash_import,
.digest = rk_ahash_digest,
+ .init_tfm = rk_hash_init_tfm,
+ .exit_tfm = rk_hash_exit_tfm,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@@ -415,17 +427,18 @@ struct rk_crypto_tmp rk_ahash_sha256 = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_ahash_ctx),
.cra_alignmask = 3,
- .cra_init = rk_cra_hash_init,
- .cra_exit = rk_cra_hash_exit,
.cra_module = THIS_MODULE,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = rk_hash_run,
+ },
};
struct rk_crypto_tmp rk_ahash_md5 = {
.type = CRYPTO_ALG_TYPE_AHASH,
- .alg.hash = {
+ .alg.hash.base = {
.init = rk_ahash_init,
.update = rk_ahash_update,
.final = rk_ahash_final,
@@ -433,6 +446,8 @@ struct rk_crypto_tmp rk_ahash_md5 = {
.export = rk_ahash_export,
.import = rk_ahash_import,
.digest = rk_ahash_digest,
+ .init_tfm = rk_hash_init_tfm,
+ .exit_tfm = rk_hash_exit_tfm,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
@@ -445,10 +460,11 @@ struct rk_crypto_tmp rk_ahash_md5 = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_ahash_ctx),
.cra_alignmask = 3,
- .cra_init = rk_cra_hash_init,
- .cra_exit = rk_cra_hash_exit,
.cra_module = THIS_MODULE,
}
}
- }
+ },
+ .alg.hash.op = {
+ .do_one_request = rk_hash_run,
+ },
};
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 59069457582b..da95747d973f 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -8,8 +8,14 @@
*
* Some ideas are from marvell-cesa.c and s5p-sss.c driver.
*/
-#include <linux/device.h>
+
+#include <crypto/engine.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
#include "rk3288_crypto.h"
#define RK_CRYPTO_DEC BIT(0)
@@ -18,7 +24,7 @@ static int rk_cipher_need_fallback(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
struct scatterlist *sgs, *sgd;
unsigned int stodo, dtodo, len;
unsigned int bs = crypto_skcipher_blocksize(tfm);
@@ -65,7 +71,7 @@ static int rk_cipher_fallback(struct skcipher_request *areq)
struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
int err;
algt->stat_fb++;
@@ -305,7 +311,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
unsigned int len = areq->cryptlen;
unsigned int todo;
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
struct rk_crypto_info *rkc = rctx->dev;
err = pm_runtime_resume_and_get(rkc->dev);
@@ -430,7 +436,7 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
const char *name = crypto_tfm_alg_name(&tfm->base);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+ struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback_tfm)) {
@@ -442,8 +448,6 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
tfm->reqsize = sizeof(struct rk_cipher_rctx) +
crypto_skcipher_reqsize(ctx->fallback_tfm);
- ctx->enginectx.op.do_one_request = rk_cipher_run;
-
return 0;
}
@@ -457,7 +461,7 @@ static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm)
struct rk_crypto_tmp rk_ecb_aes_alg = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "ecb-aes-rk",
.base.cra_priority = 300,
@@ -474,12 +478,15 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
.setkey = rk_aes_setkey,
.encrypt = rk_aes_ecb_encrypt,
.decrypt = rk_aes_ecb_decrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = rk_cipher_run,
+ },
};
struct rk_crypto_tmp rk_cbc_aes_alg = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-rk",
.base.cra_priority = 300,
@@ -497,12 +504,15 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
.setkey = rk_aes_setkey,
.encrypt = rk_aes_cbc_encrypt,
.decrypt = rk_aes_cbc_decrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = rk_cipher_run,
+ },
};
struct rk_crypto_tmp rk_ecb_des_alg = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-rk",
.base.cra_priority = 300,
@@ -519,12 +529,15 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
.setkey = rk_des_setkey,
.encrypt = rk_des_ecb_encrypt,
.decrypt = rk_des_ecb_decrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = rk_cipher_run,
+ },
};
struct rk_crypto_tmp rk_cbc_des_alg = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-rk",
.base.cra_priority = 300,
@@ -542,12 +555,15 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
.setkey = rk_des_setkey,
.encrypt = rk_des_cbc_encrypt,
.decrypt = rk_des_cbc_decrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = rk_cipher_run,
+ },
};
struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3-ede-rk",
.base.cra_priority = 300,
@@ -564,12 +580,15 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
.setkey = rk_tdes_setkey,
.encrypt = rk_des3_ede_ecb_encrypt,
.decrypt = rk_des3_ede_ecb_decrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = rk_cipher_run,
+ },
};
struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
+ .alg.skcipher.base = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3-ede-rk",
.base.cra_priority = 300,
@@ -587,5 +606,8 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
.setkey = rk_tdes_setkey,
.encrypt = rk_des3_ede_cbc_encrypt,
.decrypt = rk_des3_ede_cbc_decrypt,
- }
+ },
+ .alg.skcipher.op = {
+ .do_one_request = rk_cipher_run,
+ },
};
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 1c4d5fb05d69..fe8cf9ba8005 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index df5f9d675c57..6238d34f8db2 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -15,7 +15,8 @@
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 4c799df3e883..62d93526920f 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -27,7 +27,6 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/crypto/starfive/Kconfig b/drivers/crypto/starfive/Kconfig
index df745fcb09df..2cb192502c1b 100644
--- a/drivers/crypto/starfive/Kconfig
+++ b/drivers/crypto/starfive/Kconfig
@@ -12,6 +12,8 @@ config CRYPTO_DEV_JH7110
select CRYPTO_SHA512
select CRYPTO_SM3_GENERIC
select CRYPTO_RSA
+ select CRYPTO_AES
+ select CRYPTO_CCM
help
Support for StarFive JH7110 crypto hardware acceleration engine.
This module provides acceleration for public key algo,
diff --git a/drivers/crypto/starfive/Makefile b/drivers/crypto/starfive/Makefile
index 98b01d2f1ccf..8c137afe58ad 100644
--- a/drivers/crypto/starfive/Makefile
+++ b/drivers/crypto/starfive/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_JH7110) += jh7110-crypto.o
-jh7110-crypto-objs := jh7110-cryp.o jh7110-hash.o jh7110-rsa.o
+jh7110-crypto-objs := jh7110-cryp.o jh7110-hash.o jh7110-rsa.o jh7110-aes.o
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
new file mode 100644
index 000000000000..9378e6682f0e
--- /dev/null
+++ b/drivers/crypto/starfive/jh7110-aes.c
@@ -0,0 +1,1024 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive AES acceleration driver
+ *
+ * Copyright (c) 2022 StarFive Technology
+ */
+
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include "jh7110-cryp.h"
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#define STARFIVE_AES_REGS_OFFSET 0x100
+#define STARFIVE_AES_AESDIO0R (STARFIVE_AES_REGS_OFFSET + 0x0)
+#define STARFIVE_AES_KEY0 (STARFIVE_AES_REGS_OFFSET + 0x4)
+#define STARFIVE_AES_KEY1 (STARFIVE_AES_REGS_OFFSET + 0x8)
+#define STARFIVE_AES_KEY2 (STARFIVE_AES_REGS_OFFSET + 0xC)
+#define STARFIVE_AES_KEY3 (STARFIVE_AES_REGS_OFFSET + 0x10)
+#define STARFIVE_AES_KEY4 (STARFIVE_AES_REGS_OFFSET + 0x14)
+#define STARFIVE_AES_KEY5 (STARFIVE_AES_REGS_OFFSET + 0x18)
+#define STARFIVE_AES_KEY6 (STARFIVE_AES_REGS_OFFSET + 0x1C)
+#define STARFIVE_AES_KEY7 (STARFIVE_AES_REGS_OFFSET + 0x20)
+#define STARFIVE_AES_CSR (STARFIVE_AES_REGS_OFFSET + 0x24)
+#define STARFIVE_AES_IV0 (STARFIVE_AES_REGS_OFFSET + 0x28)
+#define STARFIVE_AES_IV1 (STARFIVE_AES_REGS_OFFSET + 0x2C)
+#define STARFIVE_AES_IV2 (STARFIVE_AES_REGS_OFFSET + 0x30)
+#define STARFIVE_AES_IV3 (STARFIVE_AES_REGS_OFFSET + 0x34)
+#define STARFIVE_AES_NONCE0 (STARFIVE_AES_REGS_OFFSET + 0x3C)
+#define STARFIVE_AES_NONCE1 (STARFIVE_AES_REGS_OFFSET + 0x40)
+#define STARFIVE_AES_NONCE2 (STARFIVE_AES_REGS_OFFSET + 0x44)
+#define STARFIVE_AES_NONCE3 (STARFIVE_AES_REGS_OFFSET + 0x48)
+#define STARFIVE_AES_ALEN0 (STARFIVE_AES_REGS_OFFSET + 0x4C)
+#define STARFIVE_AES_ALEN1 (STARFIVE_AES_REGS_OFFSET + 0x50)
+#define STARFIVE_AES_MLEN0 (STARFIVE_AES_REGS_OFFSET + 0x54)
+#define STARFIVE_AES_MLEN1 (STARFIVE_AES_REGS_OFFSET + 0x58)
+#define STARFIVE_AES_IVLEN (STARFIVE_AES_REGS_OFFSET + 0x5C)
+
+#define FLG_MODE_MASK GENMASK(2, 0)
+#define FLG_ENCRYPT BIT(4)
+
+/* Misc */
+#define CCM_B0_ADATA 0x40
+#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
+
+static inline int starfive_aes_wait_busy(struct starfive_cryp_dev *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status,
+ !(status & STARFIVE_AES_BUSY), 10, 100000);
+}
+
+static inline int starfive_aes_wait_keydone(struct starfive_cryp_dev *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status,
+ (status & STARFIVE_AES_KEY_DONE), 10, 100000);
+}
+
+static inline int starfive_aes_wait_gcmdone(struct starfive_cryp_dev *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status,
+ (status & STARFIVE_AES_GCM_DONE), 10, 100000);
+}
+
+static inline int is_gcm(struct starfive_cryp_dev *cryp)
+{
+ return (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM;
+}
+
+static inline int is_encrypt(struct starfive_cryp_dev *cryp)
+{
+ return cryp->flags & FLG_ENCRYPT;
+}
+
+static void starfive_aes_aead_hw_start(struct starfive_cryp_ctx *ctx, u32 hw_mode)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ unsigned int value;
+
+ switch (hw_mode) {
+ case STARFIVE_AES_MODE_GCM:
+ value = readl(ctx->cryp->base + STARFIVE_AES_CSR);
+ value |= STARFIVE_AES_GCM_START;
+ writel(value, cryp->base + STARFIVE_AES_CSR);
+ starfive_aes_wait_gcmdone(cryp);
+ break;
+ case STARFIVE_AES_MODE_CCM:
+ value = readl(ctx->cryp->base + STARFIVE_AES_CSR);
+ value |= STARFIVE_AES_CCM_START;
+ writel(value, cryp->base + STARFIVE_AES_CSR);
+ break;
+ }
+}
+
+static inline void starfive_aes_set_ivlen(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ if (is_gcm(cryp))
+ writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN);
+ else
+ writel(AES_BLOCK_SIZE, cryp->base + STARFIVE_AES_IVLEN);
+}
+
+static inline void starfive_aes_set_alen(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ writel(upper_32_bits(cryp->assoclen), cryp->base + STARFIVE_AES_ALEN0);
+ writel(lower_32_bits(cryp->assoclen), cryp->base + STARFIVE_AES_ALEN1);
+}
+
+static inline void starfive_aes_set_mlen(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ writel(upper_32_bits(cryp->total_in), cryp->base + STARFIVE_AES_MLEN0);
+ writel(lower_32_bits(cryp->total_in), cryp->base + STARFIVE_AES_MLEN1);
+}
+
+static inline int starfive_aes_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (iv[0] < 1 || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int starfive_aes_write_iv(struct starfive_cryp_ctx *ctx, u32 *iv)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ writel(iv[0], cryp->base + STARFIVE_AES_IV0);
+ writel(iv[1], cryp->base + STARFIVE_AES_IV1);
+ writel(iv[2], cryp->base + STARFIVE_AES_IV2);
+
+ if (is_gcm(cryp)) {
+ if (starfive_aes_wait_gcmdone(cryp))
+ return -ETIMEDOUT;
+
+ return 0;
+ }
+
+ writel(iv[3], cryp->base + STARFIVE_AES_IV3);
+
+ return 0;
+}
+
+static inline void starfive_aes_get_iv(struct starfive_cryp_dev *cryp, u32 *iv)
+{
+ iv[0] = readl(cryp->base + STARFIVE_AES_IV0);
+ iv[1] = readl(cryp->base + STARFIVE_AES_IV1);
+ iv[2] = readl(cryp->base + STARFIVE_AES_IV2);
+ iv[3] = readl(cryp->base + STARFIVE_AES_IV3);
+}
+
+static inline void starfive_aes_write_nonce(struct starfive_cryp_ctx *ctx, u32 *nonce)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ writel(nonce[0], cryp->base + STARFIVE_AES_NONCE0);
+ writel(nonce[1], cryp->base + STARFIVE_AES_NONCE1);
+ writel(nonce[2], cryp->base + STARFIVE_AES_NONCE2);
+ writel(nonce[3], cryp->base + STARFIVE_AES_NONCE3);
+}
+
+static int starfive_aes_write_key(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 *key = (u32 *)ctx->key;
+
+ if (ctx->keylen >= AES_KEYSIZE_128) {
+ writel(key[0], cryp->base + STARFIVE_AES_KEY0);
+ writel(key[1], cryp->base + STARFIVE_AES_KEY1);
+ writel(key[2], cryp->base + STARFIVE_AES_KEY2);
+ writel(key[3], cryp->base + STARFIVE_AES_KEY3);
+ }
+
+ if (ctx->keylen >= AES_KEYSIZE_192) {
+ writel(key[4], cryp->base + STARFIVE_AES_KEY4);
+ writel(key[5], cryp->base + STARFIVE_AES_KEY5);
+ }
+
+ if (ctx->keylen >= AES_KEYSIZE_256) {
+ writel(key[6], cryp->base + STARFIVE_AES_KEY6);
+ writel(key[7], cryp->base + STARFIVE_AES_KEY7);
+ }
+
+ if (starfive_aes_wait_keydone(cryp))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int starfive_aes_ccm_init(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE];
+ unsigned int textlen;
+
+ memcpy(iv, cryp->req.areq->iv, AES_BLOCK_SIZE);
+ memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
+
+ /* Build B0 */
+ memcpy(b0, iv, AES_BLOCK_SIZE);
+
+ b0[0] |= (8 * ((cryp->authsize - 2) / 2));
+
+ if (cryp->assoclen)
+ b0[0] |= CCM_B0_ADATA;
+
+ textlen = cryp->total_in;
+
+ b0[AES_BLOCK_SIZE - 2] = textlen >> 8;
+ b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF;
+
+ starfive_aes_write_nonce(ctx, (u32 *)b0);
+
+ return 0;
+}
+
+static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 hw_mode;
+
+ /* reset */
+ rctx->csr.aes.v = 0;
+ rctx->csr.aes.aesrst = 1;
+ writel(rctx->csr.aes.v, cryp->base + STARFIVE_AES_CSR);
+
+ /* csr setup */
+ hw_mode = cryp->flags & FLG_MODE_MASK;
+
+ rctx->csr.aes.v = 0;
+
+ switch (ctx->keylen) {
+ case AES_KEYSIZE_128:
+ rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_128;
+ break;
+ case AES_KEYSIZE_192:
+ rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_192;
+ break;
+ case AES_KEYSIZE_256:
+ rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_256;
+ break;
+ }
+
+ rctx->csr.aes.mode = hw_mode;
+ rctx->csr.aes.cmode = !is_encrypt(cryp);
+ rctx->csr.aes.ie = 1;
+
+ if (hw_mode == STARFIVE_AES_MODE_CFB ||
+ hw_mode == STARFIVE_AES_MODE_OFB)
+ rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_128;
+ else
+ rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1;
+
+ if (cryp->side_chan) {
+ rctx->csr.aes.delay_aes = 1;
+ rctx->csr.aes.vaes_start = 1;
+ }
+
+ writel(rctx->csr.aes.v, cryp->base + STARFIVE_AES_CSR);
+
+ cryp->err = starfive_aes_write_key(ctx);
+ if (cryp->err)
+ return cryp->err;
+
+ switch (hw_mode) {
+ case STARFIVE_AES_MODE_GCM:
+ starfive_aes_set_alen(ctx);
+ starfive_aes_set_mlen(ctx);
+ starfive_aes_set_ivlen(ctx);
+ starfive_aes_aead_hw_start(ctx, hw_mode);
+ starfive_aes_write_iv(ctx, (void *)cryp->req.areq->iv);
+ break;
+ case STARFIVE_AES_MODE_CCM:
+ starfive_aes_set_alen(ctx);
+ starfive_aes_set_mlen(ctx);
+ starfive_aes_ccm_init(ctx);
+ starfive_aes_aead_hw_start(ctx, hw_mode);
+ break;
+ case STARFIVE_AES_MODE_OFB:
+ case STARFIVE_AES_MODE_CFB:
+ case STARFIVE_AES_MODE_CBC:
+ case STARFIVE_AES_MODE_CTR:
+ starfive_aes_write_iv(ctx, (void *)cryp->req.sreq->iv);
+ break;
+ default:
+ break;
+ }
+
+ return cryp->err;
+}
+
+static int starfive_aes_read_authtag(struct starfive_cryp_dev *cryp)
+{
+ int i, start_addr;
+
+ if (starfive_aes_wait_busy(cryp))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT,
+ "Timeout waiting for tag generation.");
+
+ start_addr = STARFIVE_AES_NONCE0;
+
+ if (is_gcm(cryp))
+ for (i = 0; i < AES_BLOCK_32; i++, start_addr += 4)
+ cryp->tag_out[i] = readl(cryp->base + start_addr);
+ else
+ for (i = 0; i < AES_BLOCK_32; i++)
+ cryp->tag_out[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R);
+
+ if (is_encrypt(cryp)) {
+ scatterwalk_copychunks(cryp->tag_out, &cryp->out_walk, cryp->authsize, 1);
+ } else {
+ scatterwalk_copychunks(cryp->tag_in, &cryp->in_walk, cryp->authsize, 0);
+
+ if (crypto_memneq(cryp->tag_in, cryp->tag_out, cryp->authsize))
+ return dev_err_probe(cryp->dev, -EBADMSG, "Failed tag verification\n");
+ }
+
+ return 0;
+}
+
+static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp)
+{
+ union starfive_aes_csr csr;
+ int err = cryp->err;
+
+ if (!err && cryp->authsize)
+ err = starfive_aes_read_authtag(cryp);
+
+ if (!err && ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC ||
+ (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CTR))
+ starfive_aes_get_iv(cryp, (void *)cryp->req.sreq->iv);
+
+ /* reset irq flags*/
+ csr.v = 0;
+ csr.aesrst = 1;
+ writel(csr.v, cryp->base + STARFIVE_AES_CSR);
+
+ if (cryp->authsize)
+ crypto_finalize_aead_request(cryp->engine, cryp->req.areq, err);
+ else
+ crypto_finalize_skcipher_request(cryp->engine, cryp->req.sreq,
+ err);
+}
+
+void starfive_aes_done_task(unsigned long param)
+{
+ struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param;
+ u32 block[AES_BLOCK_32];
+ u32 stat;
+ int i;
+
+ for (i = 0; i < AES_BLOCK_32; i++)
+ block[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R);
+
+ scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, AES_BLOCK_SIZE,
+ cryp->total_out), 1);
+
+ cryp->total_out -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_out);
+
+ if (!cryp->total_out) {
+ starfive_aes_finish_req(cryp);
+ return;
+ }
+
+ memset(block, 0, AES_BLOCK_SIZE);
+ scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
+ cryp->total_in), 0);
+ cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
+
+ for (i = 0; i < AES_BLOCK_32; i++)
+ writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
+
+ stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ stat &= ~STARFIVE_IE_MASK_AES_DONE;
+ writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+}
+
+static int starfive_aes_gcm_write_adata(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ u32 *buffer;
+ int total_len, loop;
+
+ total_len = ALIGN(cryp->assoclen, AES_BLOCK_SIZE) / sizeof(unsigned int);
+ buffer = (u32 *)rctx->adata;
+
+ for (loop = 0; loop < total_len; loop += 4) {
+ writel(*buffer, cryp->base + STARFIVE_AES_NONCE0);
+ buffer++;
+ writel(*buffer, cryp->base + STARFIVE_AES_NONCE1);
+ buffer++;
+ writel(*buffer, cryp->base + STARFIVE_AES_NONCE2);
+ buffer++;
+ writel(*buffer, cryp->base + STARFIVE_AES_NONCE3);
+ buffer++;
+ }
+
+ if (starfive_aes_wait_gcmdone(cryp))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT,
+ "Timeout processing gcm aad block");
+
+ return 0;
+}
+
+static int starfive_aes_ccm_write_adata(struct starfive_cryp_ctx *ctx)
+{
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ u32 *buffer;
+ u8 *ci;
+ int total_len, loop;
+
+ total_len = cryp->assoclen;
+
+ ci = rctx->adata;
+ writeb(*ci, cryp->base + STARFIVE_AES_AESDIO0R);
+ ci++;
+ writeb(*ci, cryp->base + STARFIVE_AES_AESDIO0R);
+ ci++;
+ total_len -= 2;
+ buffer = (u32 *)ci;
+
+ for (loop = 0; loop < 3; loop++, buffer++)
+ writel(*buffer, cryp->base + STARFIVE_AES_AESDIO0R);
+
+ total_len -= 12;
+
+ while (total_len > 0) {
+ for (loop = 0; loop < AES_BLOCK_32; loop++, buffer++)
+ writel(*buffer, cryp->base + STARFIVE_AES_AESDIO0R);
+
+ total_len -= AES_BLOCK_SIZE;
+ }
+
+ if (starfive_aes_wait_busy(cryp))
+ return dev_err_probe(cryp->dev, -ETIMEDOUT,
+ "Timeout processing ccm aad block");
+
+ return 0;
+}
+
+static int starfive_aes_prepare_req(struct skcipher_request *req,
+ struct aead_request *areq)
+{
+ struct starfive_cryp_ctx *ctx;
+ struct starfive_cryp_request_ctx *rctx;
+ struct starfive_cryp_dev *cryp;
+
+ if (!req && !areq)
+ return -EINVAL;
+
+ ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) :
+ crypto_aead_ctx(crypto_aead_reqtfm(areq));
+
+ cryp = ctx->cryp;
+ rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
+
+ if (req) {
+ cryp->req.sreq = req;
+ cryp->total_in = req->cryptlen;
+ cryp->total_out = req->cryptlen;
+ cryp->assoclen = 0;
+ cryp->authsize = 0;
+ } else {
+ cryp->req.areq = areq;
+ cryp->assoclen = areq->assoclen;
+ cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
+ if (is_encrypt(cryp)) {
+ cryp->total_in = areq->cryptlen;
+ cryp->total_out = areq->cryptlen;
+ } else {
+ cryp->total_in = areq->cryptlen - cryp->authsize;
+ cryp->total_out = cryp->total_in;
+ }
+ }
+
+ rctx->in_sg = req ? req->src : areq->src;
+ scatterwalk_start(&cryp->in_walk, rctx->in_sg);
+
+ rctx->out_sg = req ? req->dst : areq->dst;
+ scatterwalk_start(&cryp->out_walk, rctx->out_sg);
+
+ if (cryp->assoclen) {
+ rctx->adata = kzalloc(ALIGN(cryp->assoclen, AES_BLOCK_SIZE), GFP_KERNEL);
+ if (!rctx->adata)
+ return dev_err_probe(cryp->dev, -ENOMEM,
+ "Failed to alloc memory for adata");
+
+ scatterwalk_copychunks(rctx->adata, &cryp->in_walk, cryp->assoclen, 0);
+ scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->assoclen, 2);
+ }
+
+ ctx->rctx = rctx;
+
+ return starfive_aes_hw_init(ctx);
+}
+
+static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req =
+ container_of(areq, struct skcipher_request, base);
+ struct starfive_cryp_ctx *ctx =
+ crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 block[AES_BLOCK_32];
+ u32 stat;
+ int err;
+ int i;
+
+ err = starfive_aes_prepare_req(req, NULL);
+ if (err)
+ return err;
+
+ /*
+ * Write first plain/ciphertext block to start the module
+ * then let irq tasklet handle the rest of the data blocks.
+ */
+ scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
+ cryp->total_in), 0);
+ cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
+
+ for (i = 0; i < AES_BLOCK_32; i++)
+ writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
+
+ stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ stat &= ~STARFIVE_IE_MASK_AES_DONE;
+ writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+
+ return 0;
+}
+
+static int starfive_aes_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->cryp = starfive_cryp_find_dev(ctx);
+ if (!ctx->cryp)
+ return -ENODEV;
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
+ sizeof(struct skcipher_request));
+
+ return 0;
+}
+
+static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req =
+ container_of(areq, struct aead_request, base);
+ struct starfive_cryp_ctx *ctx =
+ crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ u32 block[AES_BLOCK_32];
+ u32 stat;
+ int err;
+ int i;
+
+ err = starfive_aes_prepare_req(NULL, req);
+ if (err)
+ return err;
+
+ if (!cryp->assoclen)
+ goto write_text;
+
+ if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM)
+ cryp->err = starfive_aes_ccm_write_adata(ctx);
+ else
+ cryp->err = starfive_aes_gcm_write_adata(ctx);
+
+ kfree(rctx->adata);
+
+ if (cryp->err)
+ return cryp->err;
+
+write_text:
+ if (!cryp->total_in)
+ goto finish_req;
+
+ /*
+ * Write first plain/ciphertext block to start the module
+ * then let irq tasklet handle the rest of the data blocks.
+ */
+ scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
+ cryp->total_in), 0);
+ cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
+
+ for (i = 0; i < AES_BLOCK_32; i++)
+ writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
+
+ stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+ stat &= ~STARFIVE_IE_MASK_AES_DONE;
+ writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+
+ return 0;
+
+finish_req:
+ starfive_aes_finish_req(cryp);
+ return 0;
+}
+
+static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm)
+{
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ struct crypto_tfm *aead = crypto_aead_tfm(tfm);
+ struct crypto_alg *alg = aead->__crt_alg;
+
+ ctx->cryp = starfive_cryp_find_dev(ctx);
+ if (!ctx->cryp)
+ return -ENODEV;
+
+ if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->aead_fbk = crypto_alloc_aead(alg->cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->aead_fbk))
+ return dev_err_probe(cryp->dev, PTR_ERR(ctx->aead_fbk),
+ "%s() failed to allocate fallback for %s\n",
+ __func__, alg->cra_name);
+ }
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_ctx) +
+ sizeof(struct aead_request));
+
+ return 0;
+}
+
+static void starfive_aes_aead_exit_tfm(struct crypto_aead *tfm)
+{
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_aead(ctx->aead_fbk);
+}
+
+static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+ unsigned int blocksize_align = crypto_skcipher_blocksize(tfm) - 1;
+
+ cryp->flags = flags;
+
+ if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_ECB ||
+ (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC)
+ if (req->cryptlen & blocksize_align)
+ return -EINVAL;
+
+ return crypto_transfer_skcipher_request_to_engine(cryp->engine, req);
+}
+
+static int starfive_aes_aead_crypt(struct aead_request *req, unsigned long flags)
+{
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct starfive_cryp_dev *cryp = ctx->cryp;
+
+ cryp->flags = flags;
+
+ /*
+ * HW engine could not perform CCM tag verification on
+ * non-blocksize aligned text, use fallback algo instead
+ */
+ if (ctx->aead_fbk && !is_encrypt(cryp)) {
+ struct aead_request *subreq = aead_request_ctx(req);
+
+ aead_request_set_tfm(subreq, ctx->aead_fbk);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src,
+ req->dst, req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return crypto_aead_decrypt(subreq);
+ }
+
+ return crypto_transfer_aead_request_to_engine(cryp->engine, req);
+}
+
+static int starfive_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (!key || !keylen)
+ return -EINVAL;
+
+ if (keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (!key || !keylen)
+ return -EINVAL;
+
+ if (keylen != AES_KEYSIZE_128 &&
+ keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ if (ctx->aead_fbk)
+ return crypto_aead_setkey(ctx->aead_fbk, key, keylen);
+
+ return 0;
+}
+
+static int starfive_aes_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ return crypto_gcm_check_authsize(authsize);
+}
+
+static int starfive_aes_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+ switch (authsize) {
+ case 4:
+ case 6:
+ case 8:
+ case 10:
+ case 12:
+ case 14:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return crypto_aead_setauthsize(ctx->aead_fbk, authsize);
+}
+
+static int starfive_aes_ecb_encrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_ECB | FLG_ENCRYPT);
+}
+
+static int starfive_aes_ecb_decrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_ECB);
+}
+
+static int starfive_aes_cbc_encrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_CBC | FLG_ENCRYPT);
+}
+
+static int starfive_aes_cbc_decrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_CBC);
+}
+
+static int starfive_aes_cfb_encrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB | FLG_ENCRYPT);
+}
+
+static int starfive_aes_cfb_decrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB);
+}
+
+static int starfive_aes_ofb_encrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB | FLG_ENCRYPT);
+}
+
+static int starfive_aes_ofb_decrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB);
+}
+
+static int starfive_aes_ctr_encrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_CTR | FLG_ENCRYPT);
+}
+
+static int starfive_aes_ctr_decrypt(struct skcipher_request *req)
+{
+ return starfive_aes_crypt(req, STARFIVE_AES_MODE_CTR);
+}
+
+static int starfive_aes_gcm_encrypt(struct aead_request *req)
+{
+ return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_GCM | FLG_ENCRYPT);
+}
+
+static int starfive_aes_gcm_decrypt(struct aead_request *req)
+{
+ return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_GCM);
+}
+
+static int starfive_aes_ccm_encrypt(struct aead_request *req)
+{
+ int ret;
+
+ ret = starfive_aes_ccm_check_iv(req->iv);
+ if (ret)
+ return ret;
+
+ return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM | FLG_ENCRYPT);
+}
+
+static int starfive_aes_ccm_decrypt(struct aead_request *req)
+{
+ int ret;
+
+ ret = starfive_aes_ccm_check_iv(req->iv);
+ if (ret)
+ return ret;
+
+ return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM);
+}
+
+static struct skcipher_engine_alg skcipher_algs[] = {
+{
+ .base.init = starfive_aes_init_tfm,
+ .base.setkey = starfive_aes_setkey,
+ .base.encrypt = starfive_aes_ecb_encrypt,
+ .base.decrypt = starfive_aes_ecb_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "starfive-ecb-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_do_one_req,
+ },
+}, {
+ .base.init = starfive_aes_init_tfm,
+ .base.setkey = starfive_aes_setkey,
+ .base.encrypt = starfive_aes_cbc_encrypt,
+ .base.decrypt = starfive_aes_cbc_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "starfive-cbc-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_do_one_req,
+ },
+}, {
+ .base.init = starfive_aes_init_tfm,
+ .base.setkey = starfive_aes_setkey,
+ .base.encrypt = starfive_aes_ctr_encrypt,
+ .base.decrypt = starfive_aes_ctr_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "starfive-ctr-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_do_one_req,
+ },
+}, {
+ .base.init = starfive_aes_init_tfm,
+ .base.setkey = starfive_aes_setkey,
+ .base.encrypt = starfive_aes_cfb_encrypt,
+ .base.decrypt = starfive_aes_cfb_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "starfive-cfb-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_do_one_req,
+ },
+}, {
+ .base.init = starfive_aes_init_tfm,
+ .base.setkey = starfive_aes_setkey,
+ .base.encrypt = starfive_aes_ofb_encrypt,
+ .base.decrypt = starfive_aes_ofb_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "starfive-ofb-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_do_one_req,
+ },
+},
+};
+
+static struct aead_engine_alg aead_algs[] = {
+{
+ .base.setkey = starfive_aes_aead_setkey,
+ .base.setauthsize = starfive_aes_gcm_setauthsize,
+ .base.encrypt = starfive_aes_gcm_encrypt,
+ .base.decrypt = starfive_aes_gcm_decrypt,
+ .base.init = starfive_aes_aead_init_tfm,
+ .base.exit = starfive_aes_aead_exit_tfm,
+ .base.ivsize = GCM_AES_IV_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "starfive-gcm-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_aead_do_one_req,
+ },
+}, {
+ .base.setkey = starfive_aes_aead_setkey,
+ .base.setauthsize = starfive_aes_ccm_setauthsize,
+ .base.encrypt = starfive_aes_ccm_encrypt,
+ .base.decrypt = starfive_aes_ccm_decrypt,
+ .base.init = starfive_aes_aead_init_tfm,
+ .base.exit = starfive_aes_aead_exit_tfm,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "starfive-ccm-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
+ },
+ .op = {
+ .do_one_request = starfive_aes_aead_do_one_req,
+ },
+},
+};
+
+int starfive_aes_register_algs(void)
+{
+ int ret;
+
+ ret = crypto_engine_register_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
+ if (ret)
+ return ret;
+
+ ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ if (ret)
+ crypto_engine_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
+
+ return ret;
+}
+
+void starfive_aes_unregister_algs(void)
+{
+ crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ crypto_engine_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
+}
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
index cc43556b6c80..08e974e0dd12 100644
--- a/drivers/crypto/starfive/jh7110-cryp.c
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -7,17 +7,20 @@
*
*/
+#include <crypto/engine.h>
+#include "jh7110-cryp.h"
#include <linux/clk.h>
-#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-
-#include "jh7110-cryp.h"
+#include <linux/spinlock.h>
#define DRIVER_NAME "jh7110-crypto"
@@ -51,6 +54,13 @@ struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx)
return cryp;
}
+static u16 side_chan;
+module_param(side_chan, ushort, 0);
+MODULE_PARM_DESC(side_chan, "Enable side channel mitigation for AES module.\n"
+ "Enabling this feature will reduce speed performance.\n"
+ " 0 - Disabled\n"
+ " other - Enabled");
+
static int starfive_dma_init(struct starfive_cryp_dev *cryp)
{
dma_cap_mask_t mask;
@@ -82,20 +92,26 @@ static void starfive_dma_cleanup(struct starfive_cryp_dev *cryp)
static irqreturn_t starfive_cryp_irq(int irq, void *priv)
{
u32 status;
+ u32 mask;
struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)priv;
+ mask = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
status = readl(cryp->base + STARFIVE_IE_FLAG_OFFSET);
+ if (status & STARFIVE_IE_FLAG_AES_DONE) {
+ mask |= STARFIVE_IE_MASK_AES_DONE;
+ writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ tasklet_schedule(&cryp->aes_done);
+ }
+
if (status & STARFIVE_IE_FLAG_HASH_DONE) {
- status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- status |= STARFIVE_IE_MASK_HASH_DONE;
- writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ mask |= STARFIVE_IE_MASK_HASH_DONE;
+ writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
tasklet_schedule(&cryp->hash_done);
}
if (status & STARFIVE_IE_FLAG_PKA_DONE) {
- status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- status |= STARFIVE_IE_MASK_PKA_DONE;
- writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET);
+ mask |= STARFIVE_IE_MASK_PKA_DONE;
+ writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
complete(&cryp->pka_done);
}
@@ -121,10 +137,12 @@ static int starfive_cryp_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(cryp->base),
"Error remapping memory for platform device\n");
+ tasklet_init(&cryp->aes_done, starfive_aes_done_task, (unsigned long)cryp);
tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp);
cryp->phys_base = res->start;
cryp->dma_maxburst = 32;
+ cryp->side_chan = side_chan;
cryp->hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(cryp->hclk))
@@ -180,6 +198,10 @@ static int starfive_cryp_probe(struct platform_device *pdev)
if (ret)
goto err_engine_start;
+ ret = starfive_aes_register_algs();
+ if (ret)
+ goto err_algs_aes;
+
ret = starfive_hash_register_algs();
if (ret)
goto err_algs_hash;
@@ -193,6 +215,8 @@ static int starfive_cryp_probe(struct platform_device *pdev)
err_algs_rsa:
starfive_hash_unregister_algs();
err_algs_hash:
+ starfive_aes_unregister_algs();
+err_algs_aes:
crypto_engine_stop(cryp->engine);
err_engine_start:
crypto_engine_exit(cryp->engine);
@@ -207,18 +231,21 @@ err_dma_init:
clk_disable_unprepare(cryp->ahb);
reset_control_assert(cryp->rst);
+ tasklet_kill(&cryp->aes_done);
tasklet_kill(&cryp->hash_done);
err_probe_defer:
return ret;
}
-static int starfive_cryp_remove(struct platform_device *pdev)
+static void starfive_cryp_remove(struct platform_device *pdev)
{
struct starfive_cryp_dev *cryp = platform_get_drvdata(pdev);
+ starfive_aes_unregister_algs();
starfive_hash_unregister_algs();
starfive_rsa_unregister_algs();
+ tasklet_kill(&cryp->aes_done);
tasklet_kill(&cryp->hash_done);
crypto_engine_stop(cryp->engine);
@@ -233,8 +260,6 @@ static int starfive_cryp_remove(struct platform_device *pdev)
clk_disable_unprepare(cryp->hclk);
clk_disable_unprepare(cryp->ahb);
reset_control_assert(cryp->rst);
-
- return 0;
}
static const struct of_device_id starfive_dt_ids[] __maybe_unused = {
@@ -245,7 +270,7 @@ MODULE_DEVICE_TABLE(of, starfive_dt_ids);
static struct platform_driver starfive_cryp_driver = {
.probe = starfive_cryp_probe,
- .remove = starfive_cryp_remove,
+ .remove_new = starfive_cryp_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = starfive_dt_ids,
diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h
index 0cdcffc0d7d4..fe011d50473d 100644
--- a/drivers/crypto/starfive/jh7110-cryp.h
+++ b/drivers/crypto/starfive/jh7110-cryp.h
@@ -2,13 +2,15 @@
#ifndef __STARFIVE_STR_H__
#define __STARFIVE_STR_H__
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha2.h>
+#include <crypto/sm3.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-
-#include <crypto/engine.h>
-#include <crypto/sha2.h>
-#include <crypto/sm3.h>
+#include <linux/interrupt.h>
#define STARFIVE_ALG_CR_OFFSET 0x0
#define STARFIVE_ALG_FIFO_OFFSET 0x4
@@ -17,13 +19,56 @@
#define STARFIVE_DMA_IN_LEN_OFFSET 0x10
#define STARFIVE_DMA_OUT_LEN_OFFSET 0x14
+#define STARFIVE_IE_MASK_AES_DONE 0x1
#define STARFIVE_IE_MASK_HASH_DONE 0x4
#define STARFIVE_IE_MASK_PKA_DONE 0x8
+#define STARFIVE_IE_FLAG_AES_DONE 0x1
#define STARFIVE_IE_FLAG_HASH_DONE 0x4
#define STARFIVE_IE_FLAG_PKA_DONE 0x8
#define STARFIVE_MSG_BUFFER_SIZE SZ_16K
#define MAX_KEY_SIZE SHA512_BLOCK_SIZE
+#define STARFIVE_AES_IV_LEN AES_BLOCK_SIZE
+#define STARFIVE_AES_CTR_LEN AES_BLOCK_SIZE
+
+union starfive_aes_csr {
+ u32 v;
+ struct {
+ u32 cmode :1;
+#define STARFIVE_AES_KEYMODE_128 0x0
+#define STARFIVE_AES_KEYMODE_192 0x1
+#define STARFIVE_AES_KEYMODE_256 0x2
+ u32 keymode :2;
+#define STARFIVE_AES_BUSY BIT(3)
+ u32 busy :1;
+ u32 done :1;
+#define STARFIVE_AES_KEY_DONE BIT(5)
+ u32 krdy :1;
+ u32 aesrst :1;
+ u32 ie :1;
+#define STARFIVE_AES_CCM_START BIT(8)
+ u32 ccm_start :1;
+#define STARFIVE_AES_MODE_ECB 0x0
+#define STARFIVE_AES_MODE_CBC 0x1
+#define STARFIVE_AES_MODE_CFB 0x2
+#define STARFIVE_AES_MODE_OFB 0x3
+#define STARFIVE_AES_MODE_CTR 0x4
+#define STARFIVE_AES_MODE_CCM 0x5
+#define STARFIVE_AES_MODE_GCM 0x6
+ u32 mode :3;
+#define STARFIVE_AES_GCM_START BIT(12)
+ u32 gcm_start :1;
+#define STARFIVE_AES_GCM_DONE BIT(13)
+ u32 gcm_done :1;
+ u32 delay_aes :1;
+ u32 vaes_start :1;
+ u32 rsvd_0 :8;
+#define STARFIVE_AES_MODE_XFB_1 0x0
+#define STARFIVE_AES_MODE_XFB_128 0x5
+ u32 stmode :3;
+ u32 rsvd_1 :5;
+ };
+};
union starfive_hash_csr {
u32 v;
@@ -105,7 +150,6 @@ union starfive_alg_cr {
};
struct starfive_cryp_ctx {
- struct crypto_engine_ctx enginectx;
struct starfive_cryp_dev *cryp;
struct starfive_cryp_request_ctx *rctx;
@@ -116,6 +160,7 @@ struct starfive_cryp_ctx {
struct starfive_rsa_key rsa_key;
struct crypto_akcipher *akcipher_fbk;
struct crypto_ahash *ahash_fbk;
+ struct crypto_aead *aead_fbk;
};
struct starfive_cryp_dev {
@@ -133,13 +178,26 @@ struct starfive_cryp_dev {
struct dma_chan *rx;
struct dma_slave_config cfg_in;
struct dma_slave_config cfg_out;
+ struct scatter_walk in_walk;
+ struct scatter_walk out_walk;
struct crypto_engine *engine;
+ struct tasklet_struct aes_done;
struct tasklet_struct hash_done;
struct completion pka_done;
+ size_t assoclen;
+ size_t total_in;
+ size_t total_out;
+ u32 tag_in[4];
+ u32 tag_out[4];
+ unsigned int authsize;
+ unsigned long flags;
int err;
+ bool side_chan;
union starfive_alg_cr alg_cr;
union {
struct ahash_request *hreq;
+ struct aead_request *areq;
+ struct skcipher_request *sreq;
} req;
};
@@ -147,6 +205,7 @@ struct starfive_cryp_request_ctx {
union {
union starfive_hash_csr hash;
union starfive_pka_cacr pka;
+ union starfive_aes_csr aes;
} csr;
struct scatterlist *in_sg;
@@ -157,6 +216,7 @@ struct starfive_cryp_request_ctx {
unsigned int blksize;
unsigned int digsize;
unsigned long in_sg_len;
+ unsigned char *adata;
u8 rsa_data[] __aligned(sizeof(u32));
};
@@ -168,5 +228,9 @@ void starfive_hash_unregister_algs(void);
int starfive_rsa_register_algs(void);
void starfive_rsa_unregister_algs(void);
+int starfive_aes_register_algs(void);
+void starfive_aes_unregister_algs(void);
+
void starfive_hash_done_task(unsigned long param);
+void starfive_aes_done_task(unsigned long param);
#endif
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index 5064150b8a1c..cc7650198d70 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -6,25 +6,20 @@
*
*/
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include "jh7110-cryp.h"
+#include <linux/amba/pl080.h>
#include <linux/clk.h>
-#include <linux/crypto.h>
#include <linux/dma-direct.h>
#include <linux/interrupt.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <linux/amba/pl080.h>
-
-#include <crypto/hash.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/hash.h>
-
-#include "jh7110-cryp.h"
#define STARFIVE_HASH_REGS_OFFSET 0x300
#define STARFIVE_HASH_SHACSR (STARFIVE_HASH_REGS_OFFSET + 0x0)
@@ -433,10 +428,6 @@ static int starfive_hash_init_tfm(struct crypto_ahash *hash,
ctx->keylen = 0;
ctx->hash_mode = mode;
- ctx->enginectx.op.do_one_request = starfive_hash_one_request;
- ctx->enginectx.op.prepare_request = NULL;
- ctx->enginectx.op.unprepare_request = NULL;
-
return 0;
}
@@ -445,11 +436,6 @@ static void starfive_hash_exit_tfm(struct crypto_ahash *hash)
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
crypto_free_ahash(ctx->ahash_fbk);
-
- ctx->ahash_fbk = NULL;
- ctx->enginectx.op.do_one_request = NULL;
- ctx->enginectx.op.prepare_request = NULL;
- ctx->enginectx.op.unprepare_request = NULL;
}
static int starfive_hash_long_setkey(struct starfive_cryp_ctx *ctx,
@@ -619,18 +605,18 @@ static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash)
STARFIVE_HASH_SM3);
}
-static struct ahash_alg algs_sha2_sm3[] = {
+static struct ahash_engine_alg algs_sha2_sm3[] = {
{
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_sha224_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_sha224_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.base = {
@@ -645,19 +631,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_hmac_sha224_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .setkey = starfive_hash_setkey,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_hmac_sha224_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.setkey = starfive_hash_setkey,
+ .base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.base = {
@@ -672,18 +661,21 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_sha256_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_sha256_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.base = {
@@ -698,19 +690,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_hmac_sha256_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .setkey = starfive_hash_setkey,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_hmac_sha256_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.setkey = starfive_hash_setkey,
+ .base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
.base = {
@@ -725,18 +720,21 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_sha384_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_sha384_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
.base = {
@@ -751,19 +749,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_hmac_sha384_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .setkey = starfive_hash_setkey,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_hmac_sha384_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.setkey = starfive_hash_setkey,
+ .base.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
.base = {
@@ -778,18 +779,21 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_sha512_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_sha512_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
.base = {
@@ -804,19 +808,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_hmac_sha512_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .setkey = starfive_hash_setkey,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_hmac_sha512_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.setkey = starfive_hash_setkey,
+ .base.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
.base = {
@@ -831,18 +838,21 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_sm3_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_sm3_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = sizeof(struct sm3_state),
.base = {
@@ -857,19 +867,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
}, {
- .init = starfive_hash_init,
- .update = starfive_hash_update,
- .final = starfive_hash_final,
- .finup = starfive_hash_finup,
- .digest = starfive_hash_digest,
- .export = starfive_hash_export,
- .import = starfive_hash_import,
- .init_tfm = starfive_hmac_sm3_init_tfm,
- .exit_tfm = starfive_hash_exit_tfm,
- .setkey = starfive_hash_setkey,
- .halg = {
+ .base.init = starfive_hash_init,
+ .base.update = starfive_hash_update,
+ .base.final = starfive_hash_final,
+ .base.finup = starfive_hash_finup,
+ .base.digest = starfive_hash_digest,
+ .base.export = starfive_hash_export,
+ .base.import = starfive_hash_import,
+ .base.init_tfm = starfive_hmac_sm3_init_tfm,
+ .base.exit_tfm = starfive_hash_exit_tfm,
+ .base.setkey = starfive_hash_setkey,
+ .base.halg = {
.digestsize = SM3_DIGEST_SIZE,
.statesize = sizeof(struct sm3_state),
.base = {
@@ -884,16 +897,19 @@ static struct ahash_alg algs_sha2_sm3[] = {
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = starfive_hash_one_request,
+ },
},
};
int starfive_hash_register_algs(void)
{
- return crypto_register_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
+ return crypto_engine_register_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
}
void starfive_hash_unregister_algs(void)
{
- crypto_unregister_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
+ crypto_engine_unregister_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
}
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 4fc581e9e595..49dfd161e9b9 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -16,6 +16,8 @@ config CRYPTO_DEV_STM32_HASH
select CRYPTO_MD5
select CRYPTO_SHA1
select CRYPTO_SHA256
+ select CRYPTO_SHA512
+ select CRYPTO_SHA3
select CRYPTO_ENGINE
help
This enables support for the HASH hw accelerator which can be found
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 6b8d731092a4..f095f0065428 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -5,22 +5,24 @@
* Ux500 support taken from snippets in the old Ux500 cryp driver
*/
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/interrupt.h>
+#include <linux/err.h>
#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-
-#include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <crypto/engine.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/aead.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/string.h>
#define DRIVER_NAME "stm32-cryp"
@@ -156,7 +158,6 @@ struct stm32_cryp_caps {
};
struct stm32_cryp_ctx {
- struct crypto_engine_ctx enginectx;
struct stm32_cryp *cryp;
int keylen;
__be32 key[AES_KEYSIZE_256 / sizeof(u32)];
@@ -825,35 +826,20 @@ static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
}
static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq);
-static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
- void *areq);
static int stm32_cryp_init_tfm(struct crypto_skcipher *tfm)
{
- struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
-
crypto_skcipher_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx));
- ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req;
- ctx->enginectx.op.prepare_request = stm32_cryp_prepare_cipher_req;
- ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq);
-static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine,
- void *areq);
static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm)
{
- struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm);
-
tfm->reqsize = sizeof(struct stm32_cryp_reqctx);
- ctx->enginectx.op.do_one_request = stm32_cryp_aead_one_req;
- ctx->enginectx.op.prepare_request = stm32_cryp_prepare_aead_req;
- ctx->enginectx.op.unprepare_request = NULL;
-
return 0;
}
@@ -1180,9 +1166,6 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
cryp = ctx->cryp;
- if (!cryp)
- return -ENODEV;
-
rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
rctx->mode &= FLG_MODE_MASK;
@@ -1248,16 +1231,6 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
return ret;
}
-static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
- void *areq)
-{
- struct skcipher_request *req = container_of(areq,
- struct skcipher_request,
- base);
-
- return stm32_cryp_prepare_req(req, NULL);
-}
-
static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
{
struct skcipher_request *req = container_of(areq,
@@ -1270,15 +1243,8 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
if (!cryp)
return -ENODEV;
- return stm32_cryp_cpu_start(cryp);
-}
-
-static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine, void *areq)
-{
- struct aead_request *req = container_of(areq, struct aead_request,
- base);
-
- return stm32_cryp_prepare_req(NULL, req);
+ return stm32_cryp_prepare_req(req, NULL) ?:
+ stm32_cryp_cpu_start(cryp);
}
static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
@@ -1287,10 +1253,15 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
base);
struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct stm32_cryp *cryp = ctx->cryp;
+ int err;
if (!cryp)
return -ENODEV;
+ err = stm32_cryp_prepare_req(NULL, req);
+ if (err)
+ return err;
+
if (unlikely(!cryp->payload_in && !cryp->header_in)) {
/* No input data to process: get tag and finish */
stm32_cryp_finish_req(cryp, 0);
@@ -1709,143 +1680,178 @@ static irqreturn_t stm32_cryp_irq(int irq, void *arg)
return IRQ_WAKE_THREAD;
}
-static struct skcipher_alg crypto_algs[] = {
+static struct skcipher_engine_alg crypto_algs[] = {
{
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "stm32-ecb-aes",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_ecb_encrypt,
- .decrypt = stm32_cryp_aes_ecb_decrypt,
+ .base = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "stm32-ecb-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ecb_encrypt,
+ .decrypt = stm32_cryp_aes_ecb_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
{
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "stm32-cbc-aes",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_cbc_encrypt,
- .decrypt = stm32_cryp_aes_cbc_decrypt,
+ .base = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "stm32-cbc-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_cbc_encrypt,
+ .decrypt = stm32_cryp_aes_cbc_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
{
- .base.cra_name = "ctr(aes)",
- .base.cra_driver_name = "stm32-ctr-aes",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = stm32_cryp_aes_setkey,
- .encrypt = stm32_cryp_aes_ctr_encrypt,
- .decrypt = stm32_cryp_aes_ctr_decrypt,
+ .base = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "stm32-ctr-aes",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ctr_encrypt,
+ .decrypt = stm32_cryp_aes_ctr_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
{
- .base.cra_name = "ecb(des)",
- .base.cra_driver_name = "stm32-ecb-des",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = DES_BLOCK_SIZE,
- .max_keysize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_des_setkey,
- .encrypt = stm32_cryp_des_ecb_encrypt,
- .decrypt = stm32_cryp_des_ecb_decrypt,
+ .base = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "stm32-ecb-des",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_ecb_encrypt,
+ .decrypt = stm32_cryp_des_ecb_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
{
- .base.cra_name = "cbc(des)",
- .base.cra_driver_name = "stm32-cbc-des",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = DES_BLOCK_SIZE,
- .max_keysize = DES_BLOCK_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_des_setkey,
- .encrypt = stm32_cryp_des_cbc_encrypt,
- .decrypt = stm32_cryp_des_cbc_decrypt,
+ .base = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "stm32-cbc-des",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_cbc_encrypt,
+ .decrypt = stm32_cryp_des_cbc_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
{
- .base.cra_name = "ecb(des3_ede)",
- .base.cra_driver_name = "stm32-ecb-des3",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = 3 * DES_BLOCK_SIZE,
- .max_keysize = 3 * DES_BLOCK_SIZE,
- .setkey = stm32_cryp_tdes_setkey,
- .encrypt = stm32_cryp_tdes_ecb_encrypt,
- .decrypt = stm32_cryp_tdes_ecb_decrypt,
+ .base = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "stm32-ecb-des3",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_ecb_encrypt,
+ .decrypt = stm32_cryp_tdes_ecb_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
{
- .base.cra_name = "cbc(des3_ede)",
- .base.cra_driver_name = "stm32-cbc-des3",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_ASYNC,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
- .base.cra_alignmask = 0,
- .base.cra_module = THIS_MODULE,
-
- .init = stm32_cryp_init_tfm,
- .min_keysize = 3 * DES_BLOCK_SIZE,
- .max_keysize = 3 * DES_BLOCK_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = stm32_cryp_tdes_setkey,
- .encrypt = stm32_cryp_tdes_cbc_encrypt,
- .decrypt = stm32_cryp_tdes_cbc_decrypt,
+ .base = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "stm32-cbc-des3",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_ASYNC,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .base.cra_alignmask = 0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = stm32_cryp_init_tfm,
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_cbc_encrypt,
+ .decrypt = stm32_cryp_tdes_cbc_decrypt,
+ },
+ .op = {
+ .do_one_request = stm32_cryp_cipher_one_req,
+ },
},
};
-static struct aead_alg aead_algs[] = {
+static struct aead_engine_alg aead_algs[] = {
{
- .setkey = stm32_cryp_aes_aead_setkey,
- .setauthsize = stm32_cryp_aes_gcm_setauthsize,
- .encrypt = stm32_cryp_aes_gcm_encrypt,
- .decrypt = stm32_cryp_aes_gcm_decrypt,
- .init = stm32_cryp_aes_aead_init,
- .ivsize = 12,
- .maxauthsize = AES_BLOCK_SIZE,
+ .base.setkey = stm32_cryp_aes_aead_setkey,
+ .base.setauthsize = stm32_cryp_aes_gcm_setauthsize,
+ .base.encrypt = stm32_cryp_aes_gcm_encrypt,
+ .base.decrypt = stm32_cryp_aes_gcm_decrypt,
+ .base.init = stm32_cryp_aes_aead_init,
+ .base.ivsize = 12,
+ .base.maxauthsize = AES_BLOCK_SIZE,
- .base = {
+ .base.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "stm32-gcm-aes",
.cra_priority = 200,
@@ -1855,17 +1861,20 @@ static struct aead_alg aead_algs[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
+ .op = {
+ .do_one_request = stm32_cryp_aead_one_req,
+ },
},
{
- .setkey = stm32_cryp_aes_aead_setkey,
- .setauthsize = stm32_cryp_aes_ccm_setauthsize,
- .encrypt = stm32_cryp_aes_ccm_encrypt,
- .decrypt = stm32_cryp_aes_ccm_decrypt,
- .init = stm32_cryp_aes_aead_init,
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
+ .base.setkey = stm32_cryp_aes_aead_setkey,
+ .base.setauthsize = stm32_cryp_aes_ccm_setauthsize,
+ .base.encrypt = stm32_cryp_aes_ccm_encrypt,
+ .base.decrypt = stm32_cryp_aes_ccm_decrypt,
+ .base.init = stm32_cryp_aes_aead_init,
+ .base.ivsize = AES_BLOCK_SIZE,
+ .base.maxauthsize = AES_BLOCK_SIZE,
- .base = {
+ .base.base = {
.cra_name = "ccm(aes)",
.cra_driver_name = "stm32-ccm-aes",
.cra_priority = 200,
@@ -1875,6 +1884,9 @@ static struct aead_alg aead_algs[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
+ .op = {
+ .do_one_request = stm32_cryp_aead_one_req,
+ },
},
};
@@ -2036,14 +2048,14 @@ static int stm32_cryp_probe(struct platform_device *pdev)
goto err_engine2;
}
- ret = crypto_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
+ ret = crypto_engine_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
if (ret) {
dev_err(dev, "Could not register algs\n");
goto err_algs;
}
if (cryp->caps->aeads_support) {
- ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
if (ret)
goto err_aead_algs;
}
@@ -2055,7 +2067,7 @@ static int stm32_cryp_probe(struct platform_device *pdev)
return 0;
err_aead_algs:
- crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
+ crypto_engine_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
err_algs:
err_engine2:
crypto_engine_exit(cryp->engine);
@@ -2085,8 +2097,8 @@ static int stm32_cryp_remove(struct platform_device *pdev)
return ret;
if (cryp->caps->aeads_support)
- crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
- crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
+ crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ crypto_engine_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
crypto_engine_exit(cryp->engine);
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index f0df32382719..2b2382d4332c 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -6,27 +6,26 @@
* Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
*/
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/sha3.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-
-#include <crypto/engine.h>
-#include <crypto/hash.h>
-#include <crypto/md5.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/internal/hash.h>
+#include <linux/string.h>
#define HASH_CR 0x00
#define HASH_DIN 0x04
@@ -45,19 +44,11 @@
#define HASH_CR_DMAE BIT(3)
#define HASH_CR_DATATYPE_POS 4
#define HASH_CR_MODE BIT(6)
+#define HASH_CR_ALGO_POS 7
#define HASH_CR_MDMAT BIT(13)
#define HASH_CR_DMAA BIT(14)
#define HASH_CR_LKEY BIT(16)
-#define HASH_CR_ALGO_SHA1 0x0
-#define HASH_CR_ALGO_MD5 0x80
-#define HASH_CR_ALGO_SHA224 0x40000
-#define HASH_CR_ALGO_SHA256 0x40080
-
-#define HASH_CR_UX500_EMPTYMSG BIT(20)
-#define HASH_CR_UX500_ALGO_SHA1 BIT(7)
-#define HASH_CR_UX500_ALGO_SHA256 0x0
-
/* Interrupt */
#define HASH_DINIE BIT(0)
#define HASH_DCIE BIT(1)
@@ -66,9 +57,6 @@
#define HASH_MASK_CALC_COMPLETION BIT(0)
#define HASH_MASK_DATA_INPUT BIT(1)
-/* Context swap register */
-#define HASH_CSR_REGISTER_NUMBER 54
-
/* Status Flags */
#define HASH_SR_DATA_INPUT_READY BIT(0)
#define HASH_SR_OUTPUT_READY BIT(1)
@@ -79,28 +67,39 @@
#define HASH_STR_NBLW_MASK GENMASK(4, 0)
#define HASH_STR_DCAL BIT(8)
+/* HWCFGR Register */
+#define HASH_HWCFG_DMA_MASK GENMASK(3, 0)
+
+/* Context swap register */
+#define HASH_CSR_NB_SHA256_HMAC 54
+#define HASH_CSR_NB_SHA256 38
+#define HASH_CSR_NB_SHA512_HMAC 103
+#define HASH_CSR_NB_SHA512 91
+#define HASH_CSR_NB_SHA3_HMAC 88
+#define HASH_CSR_NB_SHA3 72
+#define HASH_CSR_NB_MAX HASH_CSR_NB_SHA512_HMAC
+
#define HASH_FLAGS_INIT BIT(0)
#define HASH_FLAGS_OUTPUT_READY BIT(1)
#define HASH_FLAGS_CPU BIT(2)
-#define HASH_FLAGS_DMA_READY BIT(3)
-#define HASH_FLAGS_DMA_ACTIVE BIT(4)
-#define HASH_FLAGS_HMAC_INIT BIT(5)
-#define HASH_FLAGS_HMAC_FINAL BIT(6)
-#define HASH_FLAGS_HMAC_KEY BIT(7)
-
+#define HASH_FLAGS_DMA_ACTIVE BIT(3)
+#define HASH_FLAGS_HMAC_INIT BIT(4)
+#define HASH_FLAGS_HMAC_FINAL BIT(5)
+#define HASH_FLAGS_HMAC_KEY BIT(6)
+#define HASH_FLAGS_SHA3_MODE BIT(7)
#define HASH_FLAGS_FINAL BIT(15)
#define HASH_FLAGS_FINUP BIT(16)
-#define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
-#define HASH_FLAGS_MD5 BIT(18)
-#define HASH_FLAGS_SHA1 BIT(19)
-#define HASH_FLAGS_SHA224 BIT(20)
-#define HASH_FLAGS_SHA256 BIT(21)
+#define HASH_FLAGS_ALGO_MASK GENMASK(20, 17)
+#define HASH_FLAGS_ALGO_SHIFT 17
+#define HASH_FLAGS_ERRORS BIT(21)
#define HASH_FLAGS_EMPTY BIT(22)
#define HASH_FLAGS_HMAC BIT(23)
#define HASH_OP_UPDATE 1
#define HASH_OP_FINAL 2
+#define HASH_BURST_LEVEL 4
+
enum stm32_hash_data_format {
HASH_DATA_32_BITS = 0x0,
HASH_DATA_16_BITS = 0x1,
@@ -108,16 +107,30 @@ enum stm32_hash_data_format {
HASH_DATA_1_BIT = 0x3
};
-#define HASH_BUFLEN 256
-#define HASH_LONG_KEY 64
-#define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
-#define HASH_QUEUE_LENGTH 16
-#define HASH_DMA_THRESHOLD 50
+#define HASH_BUFLEN (SHA3_224_BLOCK_SIZE + 4)
+#define HASH_MAX_KEY_SIZE (SHA512_BLOCK_SIZE * 8)
+
+enum stm32_hash_algo {
+ HASH_SHA1 = 0,
+ HASH_MD5 = 1,
+ HASH_SHA224 = 2,
+ HASH_SHA256 = 3,
+ HASH_SHA3_224 = 4,
+ HASH_SHA3_256 = 5,
+ HASH_SHA3_384 = 6,
+ HASH_SHA3_512 = 7,
+ HASH_SHA384 = 12,
+ HASH_SHA512 = 15,
+};
+
+enum ux500_hash_algo {
+ HASH_SHA256_UX500 = 0,
+ HASH_SHA1_UX500 = 1,
+};
#define HASH_AUTOSUSPEND_DELAY 50
struct stm32_hash_ctx {
- struct crypto_engine_ctx enginectx;
struct stm32_hash_dev *hdev;
struct crypto_shash *xtfm;
unsigned long flags;
@@ -130,19 +143,19 @@ struct stm32_hash_state {
u32 flags;
u16 bufcnt;
- u16 buflen;
+ u16 blocklen;
u8 buffer[HASH_BUFLEN] __aligned(4);
/* hash state */
- u32 hw_context[3 + HASH_CSR_REGISTER_NUMBER];
+ u32 hw_context[3 + HASH_CSR_NB_MAX];
};
struct stm32_hash_request_ctx {
struct stm32_hash_dev *hdev;
unsigned long op;
- u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+ u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
size_t digcnt;
/* DMA */
@@ -161,17 +174,18 @@ struct stm32_hash_request_ctx {
};
struct stm32_hash_algs_info {
- struct ahash_alg *algs_list;
+ struct ahash_engine_alg *algs_list;
size_t size;
};
struct stm32_hash_pdata {
- struct stm32_hash_algs_info *algs_info;
- size_t algs_info_size;
- bool has_sr;
- bool has_mdmat;
- bool broken_emptymsg;
- bool ux500;
+ const int alg_shift;
+ const struct stm32_hash_algs_info *algs_info;
+ size_t algs_info_size;
+ bool has_sr;
+ bool has_mdmat;
+ bool broken_emptymsg;
+ bool ux500;
};
struct stm32_hash_dev {
@@ -182,7 +196,6 @@ struct stm32_hash_dev {
void __iomem *io_base;
phys_addr_t phys_base;
u32 dma_mode;
- u32 dma_maxburst;
bool polled;
struct ahash_request *req;
@@ -269,37 +282,25 @@ static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
return 0;
}
-static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
+static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct stm32_hash_state *state = &rctx->state;
+ u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT;
u32 reg = HASH_CR_INIT;
if (!(hdev->flags & HASH_FLAGS_INIT)) {
- switch (state->flags & HASH_FLAGS_ALGO_MASK) {
- case HASH_FLAGS_MD5:
- reg |= HASH_CR_ALGO_MD5;
- break;
- case HASH_FLAGS_SHA1:
- if (hdev->pdata->ux500)
- reg |= HASH_CR_UX500_ALGO_SHA1;
+ if (hdev->pdata->ux500) {
+ reg |= ((alg & BIT(0)) << HASH_CR_ALGO_POS);
+ } else {
+ if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS)
+ reg |= ((alg & BIT(1)) << 17) |
+ ((alg & BIT(0)) << HASH_CR_ALGO_POS);
else
- reg |= HASH_CR_ALGO_SHA1;
- break;
- case HASH_FLAGS_SHA224:
- reg |= HASH_CR_ALGO_SHA224;
- break;
- case HASH_FLAGS_SHA256:
- if (hdev->pdata->ux500)
- reg |= HASH_CR_UX500_ALGO_SHA256;
- else
- reg |= HASH_CR_ALGO_SHA256;
- break;
- default:
- reg |= HASH_CR_ALGO_MD5;
+ reg |= alg << hdev->pdata->alg_shift;
}
reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
@@ -307,7 +308,7 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
if (state->flags & HASH_FLAGS_HMAC) {
hdev->flags |= HASH_FLAGS_HMAC;
reg |= HASH_CR_MODE;
- if (ctx->keylen > HASH_LONG_KEY)
+ if (ctx->keylen > crypto_ahash_blocksize(tfm))
reg |= HASH_CR_LKEY;
}
@@ -318,6 +319,12 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
hdev->flags |= HASH_FLAGS_INIT;
+ /*
+ * After first block + 1 words are fill up,
+ * we only need to fill 1 block to start partial computation
+ */
+ rctx->state.blocklen -= sizeof(u32);
+
dev_dbg(hdev->dev, "Write Control %x\n", reg);
}
}
@@ -327,9 +334,9 @@ static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
struct stm32_hash_state *state = &rctx->state;
size_t count;
- while ((state->bufcnt < state->buflen) && rctx->total) {
+ while ((state->bufcnt < state->blocklen) && rctx->total) {
count = min(rctx->sg->length - rctx->offset, rctx->total);
- count = min_t(size_t, count, state->buflen - state->bufcnt);
+ count = min_t(size_t, count, state->blocklen - state->bufcnt);
if (count <= 0) {
if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
@@ -384,7 +391,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
hdev->flags |= HASH_FLAGS_CPU;
- stm32_hash_write_ctrl(hdev, length);
+ stm32_hash_write_ctrl(hdev);
if (stm32_hash_wait_busy(hdev))
return -ETIMEDOUT;
@@ -419,20 +426,59 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
return 0;
}
+static int hash_swap_reg(struct stm32_hash_request_ctx *rctx)
+{
+ struct stm32_hash_state *state = &rctx->state;
+
+ switch ((state->flags & HASH_FLAGS_ALGO_MASK) >>
+ HASH_FLAGS_ALGO_SHIFT) {
+ case HASH_MD5:
+ case HASH_SHA1:
+ case HASH_SHA224:
+ case HASH_SHA256:
+ if (state->flags & HASH_FLAGS_HMAC)
+ return HASH_CSR_NB_SHA256_HMAC;
+ else
+ return HASH_CSR_NB_SHA256;
+ break;
+
+ case HASH_SHA384:
+ case HASH_SHA512:
+ if (state->flags & HASH_FLAGS_HMAC)
+ return HASH_CSR_NB_SHA512_HMAC;
+ else
+ return HASH_CSR_NB_SHA512;
+ break;
+
+ case HASH_SHA3_224:
+ case HASH_SHA3_256:
+ case HASH_SHA3_384:
+ case HASH_SHA3_512:
+ if (state->flags & HASH_FLAGS_HMAC)
+ return HASH_CSR_NB_SHA3_HMAC;
+ else
+ return HASH_CSR_NB_SHA3;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+}
+
static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct stm32_hash_state *state = &rctx->state;
u32 *preg = state->hw_context;
int bufcnt, err = 0, final;
- int i;
+ int i, swap_reg;
dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
final = state->flags & HASH_FLAGS_FINAL;
- while ((rctx->total >= state->buflen) ||
- (state->bufcnt + rctx->total >= state->buflen)) {
+ while ((rctx->total >= state->blocklen) ||
+ (state->bufcnt + rctx->total >= state->blocklen)) {
stm32_hash_append_sg(rctx);
bufcnt = state->bufcnt;
state->bufcnt = 0;
@@ -455,11 +501,13 @@ static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
if (stm32_hash_wait_busy(hdev))
return -ETIMEDOUT;
+ swap_reg = hash_swap_reg(rctx);
+
if (!hdev->pdata->ux500)
*preg++ = stm32_hash_read(hdev, HASH_IMR);
*preg++ = stm32_hash_read(hdev, HASH_STR);
*preg++ = stm32_hash_read(hdev, HASH_CR);
- for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ for (i = 0; i < swap_reg; i++)
*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
state->flags |= HASH_FLAGS_INIT;
@@ -492,7 +540,7 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
reg = stm32_hash_read(hdev, HASH_CR);
- if (!hdev->pdata->has_mdmat) {
+ if (hdev->pdata->has_mdmat) {
if (mdma)
reg |= HASH_CR_MDMAT;
else
@@ -533,8 +581,6 @@ static void stm32_hash_dma_callback(void *param)
struct stm32_hash_dev *hdev = param;
complete(&hdev->dma_completion);
-
- hdev->flags |= HASH_FLAGS_DMA_READY;
}
static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
@@ -544,7 +590,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
int err;
- if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
+ if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode == 1) {
err = stm32_hash_write_key(hdev);
if (stm32_hash_wait_busy(hdev))
return -ETIMEDOUT;
@@ -579,8 +625,8 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
dma_conf.direction = DMA_MEM_TO_DEV;
dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_conf.src_maxburst = hdev->dma_maxburst;
- dma_conf.dst_maxburst = hdev->dma_maxburst;
+ dma_conf.src_maxburst = HASH_BURST_LEVEL;
+ dma_conf.dst_maxburst = HASH_BURST_LEVEL;
dma_conf.device_fc = false;
chan = dma_request_chan(hdev->dev, "in");
@@ -607,18 +653,18 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
u32 *buffer = (void *)rctx->state.buffer;
struct scatterlist sg[1], *tsg;
- int err = 0, len = 0, reg, ncp = 0;
- unsigned int i;
+ int err = 0, reg, ncp = 0;
+ unsigned int i, len = 0, bufcnt = 0;
+ bool is_last = false;
rctx->sg = hdev->req->src;
rctx->total = hdev->req->nbytes;
rctx->nents = sg_nents(rctx->sg);
-
if (rctx->nents < 0)
return -EINVAL;
- stm32_hash_write_ctrl(hdev, rctx->total);
+ stm32_hash_write_ctrl(hdev);
if (hdev->flags & HASH_FLAGS_HMAC) {
err = stm32_hash_hmac_dma_send(hdev);
@@ -627,10 +673,12 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
}
for_each_sg(rctx->sg, tsg, rctx->nents, i) {
+ sg[0] = *tsg;
len = sg->length;
- sg[0] = *tsg;
- if (sg_is_last(sg)) {
+ if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) {
+ sg->length = rctx->total - bufcnt;
+ is_last = true;
if (hdev->dma_mode == 1) {
len = (ALIGN(sg->length, 16) - 16);
@@ -656,13 +704,15 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
return -ENOMEM;
}
- err = stm32_hash_xmit_dma(hdev, sg, len,
- !sg_is_last(sg));
+ err = stm32_hash_xmit_dma(hdev, sg, len, !is_last);
+ bufcnt += sg[0].length;
dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
if (err == -ENOMEM)
return err;
+ if (is_last)
+ break;
}
if (hdev->dma_mode == 1) {
@@ -718,11 +768,12 @@ static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
{
struct scatterlist *sg;
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
int i;
- if (req->nbytes <= HASH_DMA_THRESHOLD)
+ if (!hdev->dma_lch || req->nbytes <= rctx->state.blocklen)
return false;
if (sg_nents(req->src) > 1) {
@@ -748,31 +799,64 @@ static int stm32_hash_init(struct ahash_request *req)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
struct stm32_hash_state *state = &rctx->state;
+ bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE;
rctx->hdev = hdev;
state->flags = HASH_FLAGS_CPU;
+ if (sha3_mode)
+ state->flags |= HASH_FLAGS_SHA3_MODE;
+
rctx->digcnt = crypto_ahash_digestsize(tfm);
switch (rctx->digcnt) {
case MD5_DIGEST_SIZE:
- state->flags |= HASH_FLAGS_MD5;
+ state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT;
break;
case SHA1_DIGEST_SIZE:
- state->flags |= HASH_FLAGS_SHA1;
+ if (hdev->pdata->ux500)
+ state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT;
+ else
+ state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT;
break;
case SHA224_DIGEST_SIZE:
- state->flags |= HASH_FLAGS_SHA224;
+ if (sha3_mode)
+ state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT;
+ else
+ state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT;
break;
case SHA256_DIGEST_SIZE:
- state->flags |= HASH_FLAGS_SHA256;
+ if (sha3_mode) {
+ state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT;
+ } else {
+ if (hdev->pdata->ux500)
+ state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT;
+ else
+ state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT;
+ }
+ break;
+ case SHA384_DIGEST_SIZE:
+ if (sha3_mode)
+ state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT;
+ else
+ state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT;
+ break;
+ case SHA512_DIGEST_SIZE:
+ if (sha3_mode)
+ state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT;
+ else
+ state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT;
break;
default:
return -EINVAL;
}
rctx->state.bufcnt = 0;
- rctx->state.buflen = HASH_BUFLEN;
+ rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32);
+ if (rctx->state.blocklen > HASH_BUFLEN) {
+ dev_err(hdev->dev, "Error, block too large");
+ return -EINVAL;
+ }
rctx->total = 0;
rctx->offset = 0;
rctx->data_type = HASH_DATA_8_BITS;
@@ -842,6 +926,7 @@ static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
static void stm32_hash_copy_hash(struct ahash_request *req)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_state *state = &rctx->state;
struct stm32_hash_dev *hdev = rctx->hdev;
@@ -851,22 +936,7 @@ static void stm32_hash_copy_hash(struct ahash_request *req)
if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
return stm32_hash_emptymsg_fallback(req);
- switch (state->flags & HASH_FLAGS_ALGO_MASK) {
- case HASH_FLAGS_MD5:
- hashsize = MD5_DIGEST_SIZE;
- break;
- case HASH_FLAGS_SHA1:
- hashsize = SHA1_DIGEST_SIZE;
- break;
- case HASH_FLAGS_SHA224:
- hashsize = SHA224_DIGEST_SIZE;
- break;
- case HASH_FLAGS_SHA256:
- hashsize = SHA256_DIGEST_SIZE;
- break;
- default:
- return;
- }
+ hashsize = crypto_ahash_digestsize(tfm);
for (i = 0; i < hashsize / sizeof(u32); i++) {
if (hdev->pdata->ux500)
@@ -881,6 +951,11 @@ static void stm32_hash_copy_hash(struct ahash_request *req)
static int stm32_hash_finish(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ u32 reg;
+
+ reg = stm32_hash_read(rctx->hdev, HASH_SR);
+ reg &= ~HASH_SR_OUTPUT_READY;
+ stm32_hash_write(rctx->hdev, HASH_SR, reg);
if (!req->result)
return -EINVAL;
@@ -920,6 +995,7 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
struct stm32_hash_state *state = &rctx->state;
+ int swap_reg;
int err = 0;
if (!hdev)
@@ -932,6 +1008,7 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
hdev->req = req;
hdev->flags = 0;
+ swap_reg = hash_swap_reg(rctx);
if (state->flags & HASH_FLAGS_INIT) {
u32 *preg = rctx->state.hw_context;
@@ -945,7 +1022,7 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
reg = *preg++ | HASH_CR_INIT;
stm32_hash_write(hdev, HASH_CR, reg);
- for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ for (i = 0; i < swap_reg; i++)
stm32_hash_write(hdev, HASH_CSR(i), *preg++);
hdev->flags |= HASH_FLAGS_INIT;
@@ -1000,7 +1077,7 @@ static int stm32_hash_update(struct ahash_request *req)
rctx->sg = req->src;
rctx->offset = 0;
- if ((state->bufcnt + rctx->total < state->buflen)) {
+ if ((state->bufcnt + rctx->total < state->blocklen)) {
stm32_hash_append_sg(rctx);
return 0;
}
@@ -1102,8 +1179,7 @@ static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
return 0;
}
-static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
- const char *algs_hmac_name)
+static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, u32 algs_flags)
{
struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1112,38 +1188,33 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
ctx->keylen = 0;
- if (algs_hmac_name)
- ctx->flags |= HASH_FLAGS_HMAC;
-
- ctx->enginectx.op.do_one_request = stm32_hash_one_request;
+ if (algs_flags)
+ ctx->flags |= algs_flags;
return stm32_hash_init_fallback(tfm);
}
static int stm32_hash_cra_init(struct crypto_tfm *tfm)
{
- return stm32_hash_cra_init_algs(tfm, NULL);
+ return stm32_hash_cra_init_algs(tfm, 0);
}
-static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
+static int stm32_hash_cra_hmac_init(struct crypto_tfm *tfm)
{
- return stm32_hash_cra_init_algs(tfm, "md5");
+ return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_HMAC);
}
-static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
+static int stm32_hash_cra_sha3_init(struct crypto_tfm *tfm)
{
- return stm32_hash_cra_init_algs(tfm, "sha1");
+ return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE);
}
-static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
+static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm)
{
- return stm32_hash_cra_init_algs(tfm, "sha224");
+ return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE |
+ HASH_FLAGS_HMAC);
}
-static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
-{
- return stm32_hash_cra_init_algs(tfm, "sha256");
-}
static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
{
@@ -1162,11 +1233,9 @@ static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
goto finish;
}
- } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
- if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
- hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
- goto finish;
- }
+ } else if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
+ hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
+ goto finish;
}
return IRQ_HANDLED;
@@ -1185,8 +1254,6 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
reg = stm32_hash_read(hdev, HASH_SR);
if (reg & HASH_SR_OUTPUT_READY) {
- reg &= ~HASH_SR_OUTPUT_READY;
- stm32_hash_write(hdev, HASH_SR, reg);
hdev->flags |= HASH_FLAGS_OUTPUT_READY;
/* Disable IT*/
stm32_hash_write(hdev, HASH_IMR, 0);
@@ -1196,16 +1263,16 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}
-static struct ahash_alg algs_md5[] = {
+static struct ahash_engine_alg algs_md5[] = {
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1221,18 +1288,21 @@ static struct ahash_alg algs_md5[] = {
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .setkey = stm32_hash_setkey,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1244,24 +1314,27 @@ static struct ahash_alg algs_md5[] = {
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
- .cra_init = stm32_hash_cra_md5_init,
+ .cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
- },
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ }
};
-static struct ahash_alg algs_sha1[] = {
+static struct ahash_engine_alg algs_sha1[] = {
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1277,18 +1350,21 @@ static struct ahash_alg algs_sha1[] = {
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .setkey = stm32_hash_setkey,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1300,24 +1376,27 @@ static struct ahash_alg algs_sha1[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
- .cra_init = stm32_hash_cra_sha1_init,
+ .cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
};
-static struct ahash_alg algs_sha224[] = {
+static struct ahash_engine_alg algs_sha224[] = {
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1333,18 +1412,21 @@ static struct ahash_alg algs_sha224[] = {
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .setkey = stm32_hash_setkey,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.setkey = stm32_hash_setkey,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1356,24 +1438,27 @@ static struct ahash_alg algs_sha224[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
- .cra_init = stm32_hash_cra_sha224_init,
+ .cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
};
-static struct ahash_alg algs_sha256[] = {
+static struct ahash_engine_alg algs_sha256[] = {
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1389,18 +1474,21 @@ static struct ahash_alg algs_sha256[] = {
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
{
- .init = stm32_hash_init,
- .update = stm32_hash_update,
- .final = stm32_hash_final,
- .finup = stm32_hash_finup,
- .digest = stm32_hash_digest,
- .export = stm32_hash_export,
- .import = stm32_hash_import,
- .setkey = stm32_hash_setkey,
- .halg = {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct stm32_hash_state),
.base = {
@@ -1412,14 +1500,377 @@ static struct ahash_alg algs_sha256[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
- .cra_init = stm32_hash_cra_sha256_init,
+ .cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
- }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
},
};
+static struct ahash_engine_alg algs_sha384_sha512[] = {
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "stm32-sha384",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.setkey = stm32_hash_setkey,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "stm32-hmac-sha384",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_hmac_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "stm32-sha512",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "stm32-hmac-sha512",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_hmac_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+};
+
+static struct ahash_engine_alg algs_sha3[] = {
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "sha3-224",
+ .cra_driver_name = "stm32-sha3-224",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "hmac(sha3-224)",
+ .cra_driver_name = "stm32-hmac-sha3-224",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_hmac_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "sha3-256",
+ .cra_driver_name = "stm32-sha3-256",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "hmac(sha3-256)",
+ .cra_driver_name = "stm32-hmac-sha3-256",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_hmac_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "sha3-384",
+ .cra_driver_name = "stm32-sha3-384",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "hmac(sha3-384)",
+ .cra_driver_name = "stm32-hmac-sha3-384",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_hmac_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.halg = {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "sha3-512",
+ .cra_driver_name = "stm32-sha3-512",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ },
+ {
+ .base.init = stm32_hash_init,
+ .base.update = stm32_hash_update,
+ .base.final = stm32_hash_final,
+ .base.finup = stm32_hash_finup,
+ .base.digest = stm32_hash_digest,
+ .base.export = stm32_hash_export,
+ .base.import = stm32_hash_import,
+ .base.setkey = stm32_hash_setkey,
+ .base.halg = {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .statesize = sizeof(struct stm32_hash_state),
+ .base = {
+ .cra_name = "hmac(sha3-512)",
+ .cra_driver_name = "stm32-hmac-sha3-512",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+ .cra_alignmask = 3,
+ .cra_init = stm32_hash_cra_sha3_hmac_init,
+ .cra_exit = stm32_hash_cra_exit,
+ .cra_module = THIS_MODULE,
+ }
+ },
+ .op = {
+ .do_one_request = stm32_hash_one_request,
+ },
+ }
+};
+
static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
{
unsigned int i, j;
@@ -1427,7 +1878,7 @@ static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
for (i = 0; i < hdev->pdata->algs_info_size; i++) {
for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
- err = crypto_register_ahash(
+ err = crypto_engine_register_ahash(
&hdev->pdata->algs_info[i].algs_list[j]);
if (err)
goto err_algs;
@@ -1439,7 +1890,7 @@ err_algs:
dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
for (; i--; ) {
for (; j--;)
- crypto_unregister_ahash(
+ crypto_engine_unregister_ahash(
&hdev->pdata->algs_info[i].algs_list[j]);
}
@@ -1452,7 +1903,7 @@ static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
for (i = 0; i < hdev->pdata->algs_info_size; i++) {
for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
- crypto_unregister_ahash(
+ crypto_engine_unregister_ahash(
&hdev->pdata->algs_info[i].algs_list[j]);
}
@@ -1471,6 +1922,7 @@ static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
};
static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
+ .alg_shift = 7,
.algs_info = stm32_hash_algs_info_ux500,
.algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500),
.broken_emptymsg = true,
@@ -1489,6 +1941,7 @@ static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
};
static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
+ .alg_shift = 7,
.algs_info = stm32_hash_algs_info_stm32f4,
.algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
.has_sr = true,
@@ -1515,25 +1968,49 @@ static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
};
static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
+ .alg_shift = 7,
.algs_info = stm32_hash_algs_info_stm32f7,
.algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
.has_sr = true,
.has_mdmat = true,
};
-static const struct of_device_id stm32_hash_of_match[] = {
+static struct stm32_hash_algs_info stm32_hash_algs_info_stm32mp13[] = {
{
- .compatible = "stericsson,ux500-hash",
- .data = &stm32_hash_pdata_ux500,
+ .algs_list = algs_sha1,
+ .size = ARRAY_SIZE(algs_sha1),
+ },
+ {
+ .algs_list = algs_sha224,
+ .size = ARRAY_SIZE(algs_sha224),
+ },
+ {
+ .algs_list = algs_sha256,
+ .size = ARRAY_SIZE(algs_sha256),
},
{
- .compatible = "st,stm32f456-hash",
- .data = &stm32_hash_pdata_stm32f4,
+ .algs_list = algs_sha384_sha512,
+ .size = ARRAY_SIZE(algs_sha384_sha512),
},
{
- .compatible = "st,stm32f756-hash",
- .data = &stm32_hash_pdata_stm32f7,
+ .algs_list = algs_sha3,
+ .size = ARRAY_SIZE(algs_sha3),
},
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = {
+ .alg_shift = 17,
+ .algs_info = stm32_hash_algs_info_stm32mp13,
+ .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32mp13),
+ .has_sr = true,
+ .has_mdmat = true,
+};
+
+static const struct of_device_id stm32_hash_of_match[] = {
+ { .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 },
+ { .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 },
+ { .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 },
+ { .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 },
{},
};
@@ -1548,12 +2025,6 @@ static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
return -EINVAL;
}
- if (of_property_read_u32(dev->of_node, "dma-maxburst",
- &hdev->dma_maxburst)) {
- dev_info(dev, "dma-maxburst not specified, using 0\n");
- hdev->dma_maxburst = 0;
- }
-
return 0;
}
@@ -1663,7 +2134,7 @@ static int stm32_hash_probe(struct platform_device *pdev)
/* FIXME: implement DMA mode for Ux500 */
hdev->dma_mode = 0;
else
- hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
+ hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK;
/* Register algos */
ret = stm32_hash_register_algs(hdev);
@@ -1696,18 +2167,12 @@ err_reset:
return ret;
}
-static int stm32_hash_remove(struct platform_device *pdev)
+static void stm32_hash_remove(struct platform_device *pdev)
{
- struct stm32_hash_dev *hdev;
+ struct stm32_hash_dev *hdev = platform_get_drvdata(pdev);
int ret;
- hdev = platform_get_drvdata(pdev);
- if (!hdev)
- return -ENODEV;
-
- ret = pm_runtime_resume_and_get(hdev->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(hdev->dev);
stm32_hash_unregister_algs(hdev);
@@ -1723,9 +2188,8 @@ static int stm32_hash_remove(struct platform_device *pdev)
pm_runtime_disable(hdev->dev);
pm_runtime_put_noidle(hdev->dev);
- clk_disable_unprepare(hdev->clk);
-
- return 0;
+ if (ret >= 0)
+ clk_disable_unprepare(hdev->clk);
}
#ifdef CONFIG_PM
@@ -1762,7 +2226,7 @@ static const struct dev_pm_ops stm32_hash_pm_ops = {
static struct platform_driver stm32_hash_driver = {
.probe = stm32_hash_probe,
- .remove = stm32_hash_remove,
+ .remove_new = stm32_hash_remove,
.driver = {
.name = "stm32-hash",
.pm = &stm32_hash_pm_ops,
@@ -1772,6 +2236,6 @@ static struct platform_driver stm32_hash_driver = {
module_platform_driver(stm32_hash_driver);
-MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
+MODULE_DESCRIPTION("STM32 SHA1/SHA2/SHA3 & MD5 (HMAC) hw accelerator driver");
MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index bb27f011cf31..4ca4fbd227bc 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -19,9 +19,9 @@
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/spinlock.h>
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index 6963344f6a3a..2621ff8a9376 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -7,15 +7,16 @@
* Copyright 2022 Bytedance CO., LTD.
*/
-#include <linux/mpi.h>
-#include <linux/scatterlist.h>
-#include <crypto/algapi.h>
+#include <crypto/engine.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/rsa.h>
-#include <linux/err.h>
#include <crypto/scatterwalk.h>
-#include <linux/atomic.h>
-
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mpi.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
@@ -24,7 +25,6 @@ struct virtio_crypto_rsa_ctx {
};
struct virtio_crypto_akcipher_ctx {
- struct crypto_engine_ctx enginectx;
struct virtio_crypto *vcrypto;
struct crypto_akcipher *tfm;
bool session_valid;
@@ -47,7 +47,7 @@ struct virtio_crypto_akcipher_algo {
uint32_t algonum;
uint32_t service;
unsigned int active_devs;
- struct akcipher_alg algo;
+ struct akcipher_engine_alg algo;
};
static DEFINE_MUTEX(algs_lock);
@@ -475,9 +475,6 @@ static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
ctx->tfm = tfm;
- ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req;
- ctx->enginectx.op.prepare_request = NULL;
- ctx->enginectx.op.unprepare_request = NULL;
akcipher_set_reqsize(tfm,
sizeof(struct virtio_crypto_akcipher_request));
@@ -500,7 +497,7 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
{
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
- .algo = {
+ .algo.base = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
.set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
@@ -516,11 +513,14 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
},
},
+ .algo.op = {
+ .do_one_request = virtio_crypto_rsa_do_req,
+ },
},
{
.algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
.service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
- .algo = {
+ .algo.base = {
.encrypt = virtio_crypto_rsa_encrypt,
.decrypt = virtio_crypto_rsa_decrypt,
.sign = virtio_crypto_rsa_sign,
@@ -538,6 +538,9 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
.cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
},
},
+ .algo.op = {
+ .do_one_request = virtio_crypto_rsa_do_req,
+ },
},
};
@@ -556,14 +559,14 @@ int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
- ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+ ret = crypto_engine_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
if (ret)
goto unlock;
}
virtio_crypto_akcipher_algs[i].active_devs++;
dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
- virtio_crypto_akcipher_algs[i].algo.base.cra_name);
+ virtio_crypto_akcipher_algs[i].algo.base.base.cra_name);
}
unlock:
@@ -586,7 +589,7 @@ void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_akcipher_algs[i].active_devs == 1)
- crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+ crypto_engine_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
virtio_crypto_akcipher_algs[i].active_devs--;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
index e5876286828b..23c41d87d835 100644
--- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
@@ -6,19 +6,16 @@
* Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
*/
-#include <linux/scatterlist.h>
-#include <crypto/algapi.h>
+#include <crypto/engine.h>
#include <crypto/internal/skcipher.h>
-#include <linux/err.h>
#include <crypto/scatterwalk.h>
-#include <linux/atomic.h>
-
+#include <linux/err.h>
+#include <linux/scatterlist.h>
#include <uapi/linux/virtio_crypto.h>
#include "virtio_crypto_common.h"
struct virtio_crypto_skcipher_ctx {
- struct crypto_engine_ctx enginectx;
struct virtio_crypto *vcrypto;
struct crypto_skcipher *tfm;
@@ -42,7 +39,7 @@ struct virtio_crypto_algo {
uint32_t algonum;
uint32_t service;
unsigned int active_devs;
- struct skcipher_alg algo;
+ struct skcipher_engine_alg algo;
};
/*
@@ -523,9 +520,6 @@ static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
ctx->tfm = tfm;
- ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
- ctx->enginectx.op.prepare_request = NULL;
- ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
@@ -580,7 +574,7 @@ static void virtio_crypto_skcipher_finalize_req(
static struct virtio_crypto_algo virtio_crypto_algs[] = { {
.algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
.service = VIRTIO_CRYPTO_SERVICE_CIPHER,
- .algo = {
+ .algo.base = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "virtio_crypto_aes_cbc",
.base.cra_priority = 150,
@@ -598,6 +592,9 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { {
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
+ .algo.op = {
+ .do_one_request = virtio_crypto_skcipher_crypt_req,
+ },
} };
int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
@@ -616,14 +613,14 @@ int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_algs[i].active_devs == 0) {
- ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
+ ret = crypto_engine_register_skcipher(&virtio_crypto_algs[i].algo);
if (ret)
goto unlock;
}
virtio_crypto_algs[i].active_devs++;
dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
- virtio_crypto_algs[i].algo.base.cra_name);
+ virtio_crypto_algs[i].algo.base.base.cra_name);
}
unlock:
@@ -647,7 +644,7 @@ void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
continue;
if (virtio_crypto_algs[i].active_devs == 1)
- crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
+ crypto_engine_unregister_skcipher(&virtio_crypto_algs[i].algo);
virtio_crypto_algs[i].active_devs--;
}
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index bf1f421e05f2..ce335578b759 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -9,13 +9,14 @@
#include <crypto/gcm.h>
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
-
#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-
-#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/string.h>
#define ZYNQMP_DMA_BIT_MASK 32U
@@ -43,7 +44,7 @@ enum zynqmp_aead_keysrc {
struct zynqmp_aead_drv_ctx {
union {
- struct aead_alg aead;
+ struct aead_engine_alg aead;
} alg;
struct device *dev;
struct crypto_engine *engine;
@@ -60,7 +61,6 @@ struct zynqmp_aead_hw_req {
};
struct zynqmp_aead_tfm_ctx {
- struct crypto_engine_ctx engine_ctx;
struct device *dev;
u8 key[ZYNQMP_AES_KEY_SIZE];
u8 *iv;
@@ -286,7 +286,7 @@ static int zynqmp_aes_aead_encrypt(struct aead_request *req)
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
rq_ctx->op = ZYNQMP_AES_ENCRYPT;
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
}
@@ -299,7 +299,7 @@ static int zynqmp_aes_aead_decrypt(struct aead_request *req)
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
rq_ctx->op = ZYNQMP_AES_DECRYPT;
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
}
@@ -312,20 +312,16 @@ static int zynqmp_aes_aead_init(struct crypto_aead *aead)
struct zynqmp_aead_drv_ctx *drv_ctx;
struct aead_alg *alg = crypto_aead_alg(aead);
- drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+ drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
tfm_ctx->dev = drv_ctx->dev;
- tfm_ctx->engine_ctx.op.do_one_request = zynqmp_handle_aes_req;
- tfm_ctx->engine_ctx.op.prepare_request = NULL;
- tfm_ctx->engine_ctx.op.unprepare_request = NULL;
-
- tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.cra_name,
+ tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.base.cra_name,
0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(tfm_ctx->fbk_cipher)) {
pr_err("%s() Error: failed to allocate fallback for %s\n",
- __func__, drv_ctx->alg.aead.base.cra_name);
+ __func__, drv_ctx->alg.aead.base.base.cra_name);
return PTR_ERR(tfm_ctx->fbk_cipher);
}
@@ -350,7 +346,7 @@ static void zynqmp_aes_aead_exit(struct crypto_aead *aead)
}
static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
- .alg.aead = {
+ .alg.aead.base = {
.setkey = zynqmp_aes_aead_setkey,
.setauthsize = zynqmp_aes_aead_setauthsize,
.encrypt = zynqmp_aes_aead_encrypt,
@@ -372,7 +368,10 @@ static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
.cra_ctxsize = sizeof(struct zynqmp_aead_tfm_ctx),
.cra_module = THIS_MODULE,
}
- }
+ },
+ .alg.aead.op = {
+ .do_one_request = zynqmp_handle_aes_req,
+ },
};
static int zynqmp_aes_aead_probe(struct platform_device *pdev)
@@ -405,7 +404,7 @@ static int zynqmp_aes_aead_probe(struct platform_device *pdev)
goto err_engine;
}
- err = crypto_register_aead(&aes_drv_ctx.alg.aead);
+ err = crypto_engine_register_aead(&aes_drv_ctx.alg.aead);
if (err < 0) {
dev_err(dev, "Failed to register AEAD alg.\n");
goto err_aead;
@@ -413,7 +412,7 @@ static int zynqmp_aes_aead_probe(struct platform_device *pdev)
return 0;
err_aead:
- crypto_unregister_aead(&aes_drv_ctx.alg.aead);
+ crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead);
err_engine:
if (aes_drv_ctx.engine)
@@ -425,7 +424,7 @@ err_engine:
static int zynqmp_aes_aead_remove(struct platform_device *pdev)
{
crypto_engine_exit(aes_drv_ctx.engine);
- crypto_unregister_aead(&aes_drv_ctx.alg.aead);
+ crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead);
return 0;
}
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 43ff170ff1c2..426bf1a72ba6 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -15,7 +15,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#define ZYNQMP_DMA_BIT_MASK 32U
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index e36cbb920ec8..474d81831ad3 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -472,10 +472,11 @@ static void devfreq_monitor(struct work_struct *work)
* devfreq_monitor_start() - Start load monitoring of devfreq instance
* @devfreq: the devfreq instance.
*
- * Helper function for starting devfreq device load monitoring. By
- * default delayed work based monitoring is supported. Function
- * to be called from governor in response to DEVFREQ_GOV_START
- * event when device is added to devfreq framework.
+ * Helper function for starting devfreq device load monitoring. By default,
+ * deferrable timer is used for load monitoring. But the users can change this
+ * behavior using the "timer" type in devfreq_dev_profile. This function will be
+ * called by devfreq governor in response to the DEVFREQ_GOV_START event
+ * generated while adding a device to the devfreq framework.
*/
void devfreq_monitor_start(struct devfreq *devfreq)
{
@@ -763,6 +764,7 @@ static void devfreq_dev_release(struct device *dev)
dev_pm_opp_put_opp_table(devfreq->opp_table);
mutex_destroy(&devfreq->lock);
+ srcu_cleanup_notifier_head(&devfreq->transition_notifier_list);
kfree(devfreq);
}
diff --git a/drivers/devfreq/imx-bus.c b/drivers/devfreq/imx-bus.c
index a727067980fb..86850b7dea09 100644
--- a/drivers/devfreq/imx-bus.c
+++ b/drivers/devfreq/imx-bus.c
@@ -7,7 +7,7 @@
#include <linux/devfreq.h>
#include <linux/device.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/devfreq/imx8m-ddrc.c b/drivers/devfreq/imx8m-ddrc.c
index 16636973eb10..e1348490c8aa 100644
--- a/drivers/devfreq/imx8m-ddrc.c
+++ b/drivers/devfreq/imx8m-ddrc.c
@@ -3,9 +3,9 @@
* Copyright 2019 NXP
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/devfreq.h>
#include <linux/pm_opp.h>
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 6354622eda65..83a73f0ccd80 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -8,7 +8,6 @@
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 503376b894b6..4a4f0106ab9d 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/reset.h>
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 63f0aeb66db6..f0a35277fd84 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
*/
static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
{
+ LIST_HEAD(signalled);
struct sync_pt *pt, *next;
trace_sync_timeline(obj);
@@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
if (!timeline_fence_signaled(&pt->base))
break;
- list_del_init(&pt->link);
+ dma_fence_get(&pt->base);
+
+ list_move_tail(&pt->link, &signalled);
rb_erase(&pt->node, &obj->pt_tree);
- /*
- * A signal callback may release the last reference to this
- * fence, causing it to be freed. That operation has to be
- * last to avoid a use after free inside this loop, and must
- * be after we remove the fence from the timeline in order to
- * prevent deadlocking on timeline->lock inside
- * timeline_fence_release().
- */
dma_fence_signal_locked(&pt->base);
}
spin_unlock_irq(&obj->lock);
+
+ list_for_each_entry_safe(pt, next, &signalled, link) {
+ list_del_init(&pt->link);
+ dma_fence_put(&pt->base);
+ }
}
/**
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 597dae7692b1..9b6642d00871 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -4150,6 +4150,20 @@ static int per_family_init(struct amd64_pvt *pvt)
}
break;
+ case 0x1A:
+ switch (pvt->model) {
+ case 0x00 ... 0x1f:
+ pvt->ctl_name = "F1Ah";
+ pvt->max_mcs = 12;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x40 ... 0x4f:
+ pvt->ctl_name = "F1Ah_M40h";
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ }
+ break;
+
default:
amd64_err("Unsupported family!\n");
return -ENODEV;
@@ -4344,6 +4358,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
X86_MATCH_VENDOR_FAM(HYGON, 0x18, NULL),
X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
+ X86_MATCH_VENDOR_FAM(AMD, 0x1A, NULL),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index a897b6aff368..5abf997ca7c1 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -906,7 +906,7 @@ static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
- X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SIERRAFOREST_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index 713582cc27d1..33f0ba11c6ad 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -60,7 +60,7 @@ static void __init eisa_name_device(struct eisa_device *edev)
int i;
for (i = 0; i < EISA_INFOS; i++) {
if (!strcmp(edev->id.sig, eisa_table[i].id.sig)) {
- strlcpy(edev->pretty_name,
+ strscpy(edev->pretty_name,
eisa_table[i].name,
sizeof(edev->pretty_name));
return;
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index f9040bd61081..285fe7ad490d 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -1095,3 +1095,22 @@ int sdei_event_handler(struct pt_regs *regs,
return err;
}
NOKPROBE_SYMBOL(sdei_event_handler);
+
+void sdei_handler_abort(void)
+{
+ /*
+ * If the crash happened in an SDEI event handler then we need to
+ * finish the handler with the firmware so that we can have working
+ * interrupts in the crash kernel.
+ */
+ if (__this_cpu_read(sdei_active_critical_event)) {
+ pr_warn("still in SDEI critical event context, attempting to finish handler.\n");
+ __sdei_handler_abort();
+ __this_cpu_write(sdei_active_critical_event, NULL);
+ }
+ if (__this_cpu_read(sdei_active_normal_event)) {
+ pr_warn("still in SDEI normal event context, attempting to finish handler.\n");
+ __sdei_handler_abort();
+ __this_cpu_write(sdei_active_normal_event, NULL);
+ }
+}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 16d64a34d1e1..92389a5481ff 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -88,6 +88,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o
+lib-$(CONFIG_X86_64) += x86-5lvl.o
lib-$(CONFIG_RISCV) += riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
@@ -146,7 +147,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
# For RISC-V, we don't need anything special other than arm64. Keep all the
# symbols in .init section and make sure that no absolute symbols references
-# doesn't exist.
+# exist.
STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 770b8ecb7398..8c40fc89f5f9 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -106,7 +106,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
*/
status = efi_random_alloc(*reserve_size, min_kimg_align,
reserve_addr, phys_seed,
- EFI_LOADER_CODE);
+ EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
} else {
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 732984295295..bfa30625f5d0 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -73,6 +73,8 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_loglevel = CONSOLE_LOGLEVEL_QUIET;
} else if (!strcmp(param, "noinitrd")) {
efi_noinitrd = true;
+ } else if (IS_ENABLED(CONFIG_X86_64) && !strcmp(param, "no5lvl")) {
+ efi_no5lvl = true;
} else if (!strcmp(param, "efi") && val) {
efi_nochunk = parse_option_str(val, "nochunk");
efi_novamap |= parse_option_str(val, "novamap");
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 6aa38a1bf126..9823f6fb3e01 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -33,6 +33,7 @@
#define EFI_ALLOC_LIMIT ULONG_MAX
#endif
+extern bool efi_no5lvl;
extern bool efi_nochunk;
extern bool efi_nokaslr;
extern int efi_loglevel;
@@ -955,7 +956,7 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long random_seed,
- int memory_type);
+ int memory_type, unsigned long alloc_limit);
efi_status_t efi_random_get_seed(void);
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 32c7a54923b4..674a064b8f7a 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -16,7 +16,8 @@
*/
static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
unsigned long size,
- unsigned long align_shift)
+ unsigned long align_shift,
+ u64 alloc_limit)
{
unsigned long align = 1UL << align_shift;
u64 first_slot, last_slot, region_end;
@@ -29,7 +30,7 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
return 0;
region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
- (u64)EFI_ALLOC_LIMIT);
+ alloc_limit);
if (region_end < size)
return 0;
@@ -54,7 +55,8 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long align,
unsigned long *addr,
unsigned long random_seed,
- int memory_type)
+ int memory_type,
+ unsigned long alloc_limit)
{
unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
@@ -76,7 +78,7 @@ efi_status_t efi_random_alloc(unsigned long size,
efi_memory_desc_t *md = (void *)map->map + map_offset;
unsigned long slots;
- slots = get_entry_num_slots(md, size, ilog2(align));
+ slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit);
MD_NUM_SLOTS(md) = slots;
total_slots += slots;
if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c
new file mode 100644
index 000000000000..479dd445acdc
--- /dev/null
+++ b/drivers/firmware/efi/libstub/x86-5lvl.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/efi.h>
+
+#include <asm/boot.h>
+#include <asm/desc.h>
+#include <asm/efi.h>
+
+#include "efistub.h"
+#include "x86-stub.h"
+
+bool efi_no5lvl;
+
+static void (*la57_toggle)(void *cr3);
+
+static const struct desc_struct gdt[] = {
+ [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
+ [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+};
+
+/*
+ * Enabling (or disabling) 5 level paging is tricky, because it can only be
+ * done from 32-bit mode with paging disabled. This means not only that the
+ * code itself must be running from 32-bit addressable physical memory, but
+ * also that the root page table must be 32-bit addressable, as programming
+ * a 64-bit value into CR3 when running in 32-bit mode is not supported.
+ */
+efi_status_t efi_setup_5level_paging(void)
+{
+ u8 tmpl_size = (u8 *)&trampoline_ljmp_imm_offset - (u8 *)&trampoline_32bit_src;
+ efi_status_t status;
+ u8 *la57_code;
+
+ if (!efi_is_64bit())
+ return EFI_SUCCESS;
+
+ /* check for 5 level paging support */
+ if (native_cpuid_eax(0) < 7 ||
+ !(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31))))
+ return EFI_SUCCESS;
+
+ /* allocate some 32-bit addressable memory for code and a page table */
+ status = efi_allocate_pages(2 * PAGE_SIZE, (unsigned long *)&la57_code,
+ U32_MAX);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ la57_toggle = memcpy(la57_code, trampoline_32bit_src, tmpl_size);
+ memset(la57_code + tmpl_size, 0x90, PAGE_SIZE - tmpl_size);
+
+ /*
+ * To avoid the need to allocate a 32-bit addressable stack, the
+ * trampoline uses a LJMP instruction to switch back to long mode.
+ * LJMP takes an absolute destination address, which needs to be
+ * fixed up at runtime.
+ */
+ *(u32 *)&la57_code[trampoline_ljmp_imm_offset] += (unsigned long)la57_code;
+
+ efi_adjust_memory_range_protection((unsigned long)la57_toggle, PAGE_SIZE);
+
+ return EFI_SUCCESS;
+}
+
+void efi_5level_switch(void)
+{
+ bool want_la57 = IS_ENABLED(CONFIG_X86_5LEVEL) && !efi_no5lvl;
+ bool have_la57 = native_read_cr4() & X86_CR4_LA57;
+ bool need_toggle = want_la57 ^ have_la57;
+ u64 *pgt = (void *)la57_toggle + PAGE_SIZE;
+ u64 *cr3 = (u64 *)__native_read_cr3();
+ u64 *new_cr3;
+
+ if (!la57_toggle || !need_toggle)
+ return;
+
+ if (!have_la57) {
+ /*
+ * 5 level paging will be enabled, so a root level page needs
+ * to be allocated from the 32-bit addressable physical region,
+ * with its first entry referring to the existing hierarchy.
+ */
+ new_cr3 = memset(pgt, 0, PAGE_SIZE);
+ new_cr3[0] = (u64)cr3 | _PAGE_TABLE_NOENC;
+ } else {
+ /* take the new root table pointer from the current entry #0 */
+ new_cr3 = (u64 *)(cr3[0] & PAGE_MASK);
+
+ /* copy the new root table if it is not 32-bit addressable */
+ if ((u64)new_cr3 > U32_MAX)
+ new_cr3 = memcpy(pgt, new_cr3, PAGE_SIZE);
+ }
+
+ native_load_gdt(&(struct desc_ptr){ sizeof(gdt) - 1, (u64)gdt });
+
+ la57_toggle(new_cr3);
+}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 220be75a5cdc..2fee52ed335d 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -15,16 +15,16 @@
#include <asm/setup.h>
#include <asm/desc.h>
#include <asm/boot.h>
+#include <asm/kaslr.h>
+#include <asm/sev.h>
#include "efistub.h"
-
-/* Maximum physical address for 64-bit kernel with 4-level paging */
-#define MAXMEM_X86_64_4LEVEL (1ull << 46)
+#include "x86-stub.h"
const efi_system_table_t *efi_system_table;
const efi_dxe_services_table_t *efi_dxe_table;
-u32 image_offset __section(".data");
static efi_loaded_image_t *image = NULL;
+static efi_memory_attribute_protocol_t *memattr;
typedef union sev_memory_acceptance_protocol sev_memory_acceptance_protocol_t;
union sev_memory_acceptance_protocol {
@@ -72,7 +72,7 @@ preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
rom->data.type = SETUP_PCI;
rom->data.len = size - sizeof(struct setup_data);
rom->data.next = 0;
- rom->pcilen = pci->romsize;
+ rom->pcilen = romsize;
*__rom = rom;
status = efi_call_proto(pci, pci.read, EfiPciIoWidthUint16,
@@ -223,8 +223,8 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
}
}
-static void
-adjust_memory_range_protection(unsigned long start, unsigned long size)
+void efi_adjust_memory_range_protection(unsigned long start,
+ unsigned long size)
{
efi_status_t status;
efi_gcd_memory_space_desc_t desc;
@@ -232,12 +232,18 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
unsigned long rounded_start, rounded_end;
unsigned long unprotect_start, unprotect_size;
- if (efi_dxe_table == NULL)
- return;
-
rounded_start = rounddown(start, EFI_PAGE_SIZE);
rounded_end = roundup(start + size, EFI_PAGE_SIZE);
+ if (memattr != NULL) {
+ efi_call_proto(memattr, clear_memory_attributes, rounded_start,
+ rounded_end - rounded_start, EFI_MEMORY_XP);
+ return;
+ }
+
+ if (efi_dxe_table == NULL)
+ return;
+
/*
* Don't modify memory region attributes, they are
* already suitable, to lower the possibility to
@@ -278,49 +284,6 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
}
}
-/*
- * Trampoline takes 2 pages and can be loaded in first megabyte of memory
- * with its end placed between 128k and 640k where BIOS might start.
- * (see arch/x86/boot/compressed/pgtable_64.c)
- *
- * We cannot find exact trampoline placement since memory map
- * can be modified by UEFI, and it can alter the computed address.
- */
-
-#define TRAMPOLINE_PLACEMENT_BASE ((128 - 8)*1024)
-#define TRAMPOLINE_PLACEMENT_SIZE (640*1024 - (128 - 8)*1024)
-
-void startup_32(struct boot_params *boot_params);
-
-static void
-setup_memory_protection(unsigned long image_base, unsigned long image_size)
-{
- /*
- * Allow execution of possible trampoline used
- * for switching between 4- and 5-level page tables
- * and relocated kernel image.
- */
-
- adjust_memory_range_protection(TRAMPOLINE_PLACEMENT_BASE,
- TRAMPOLINE_PLACEMENT_SIZE);
-
-#ifdef CONFIG_64BIT
- if (image_base != (unsigned long)startup_32)
- adjust_memory_range_protection(image_base, image_size);
-#else
- /*
- * Clear protection flags on a whole range of possible
- * addresses used for KASLR. We don't need to do that
- * on x86_64, since KASLR/extraction is performed after
- * dedicated identity page tables are built and we only
- * need to remove possible protection on relocated image
- * itself disregarding further relocations.
- */
- adjust_memory_range_protection(LOAD_PHYSICAL_ADDR,
- KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR);
-#endif
-}
-
static void setup_unaccepted_memory(void)
{
efi_guid_t mem_acceptance_proto = OVMF_SEV_MEMORY_ACCEPTANCE_PROTOCOL_GUID;
@@ -346,9 +309,7 @@ static void setup_unaccepted_memory(void)
static const efi_char16_t apple[] = L"Apple";
-static void setup_quirks(struct boot_params *boot_params,
- unsigned long image_base,
- unsigned long image_size)
+static void setup_quirks(struct boot_params *boot_params)
{
efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
efi_table_attr(efi_system_table, fw_vendor);
@@ -357,9 +318,6 @@ static void setup_quirks(struct boot_params *boot_params,
if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
retrieve_apple_device_properties(boot_params);
}
-
- if (IS_ENABLED(CONFIG_EFI_DXE_MEM_ATTRIBUTES))
- setup_memory_protection(image_base, image_size);
}
/*
@@ -512,7 +470,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
}
image_base = efi_table_attr(image, image_base);
- image_offset = (void *)startup_32 - image_base;
status = efi_allocate_pages(sizeof(struct boot_params),
(unsigned long *)&boot_params, ULONG_MAX);
@@ -803,19 +760,96 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
return EFI_SUCCESS;
}
+static bool have_unsupported_snp_features(void)
+{
+ u64 unsupported;
+
+ unsupported = snp_get_unsupported_features(sev_get_status());
+ if (unsupported) {
+ efi_err("Unsupported SEV-SNP features detected: 0x%llx\n",
+ unsupported);
+ return true;
+ }
+ return false;
+}
+
+static void efi_get_seed(void *seed, int size)
+{
+ efi_get_random_bytes(size, seed);
+
+ /*
+ * This only updates seed[0] when running on 32-bit, but in that case,
+ * seed[1] is not used anyway, as there is no virtual KASLR on 32-bit.
+ */
+ *(unsigned long *)seed ^= kaslr_get_random_long("EFI");
+}
+
+static void error(char *str)
+{
+ efi_warn("Decompression failed: %s\n", str);
+}
+
+static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
+{
+ unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+ unsigned long addr, alloc_size, entry;
+ efi_status_t status;
+ u32 seed[2] = {};
+
+ /* determine the required size of the allocation */
+ alloc_size = ALIGN(max_t(unsigned long, output_len, kernel_total_size),
+ MIN_KERNEL_ALIGN);
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && !efi_nokaslr) {
+ u64 range = KERNEL_IMAGE_SIZE - LOAD_PHYSICAL_ADDR - kernel_total_size;
+
+ efi_get_seed(seed, sizeof(seed));
+
+ virt_addr += (range * seed[1]) >> 32;
+ virt_addr &= ~(CONFIG_PHYSICAL_ALIGN - 1);
+ }
+
+ status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
+ seed[0], EFI_LOADER_CODE,
+ EFI_X86_KERNEL_ALLOC_LIMIT);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ entry = decompress_kernel((void *)addr, virt_addr, error);
+ if (entry == ULONG_MAX) {
+ efi_free(alloc_size, addr);
+ return EFI_LOAD_ERROR;
+ }
+
+ *kernel_entry = addr + entry;
+
+ efi_adjust_memory_range_protection(addr, kernel_total_size);
+
+ return EFI_SUCCESS;
+}
+
+static void __noreturn enter_kernel(unsigned long kernel_addr,
+ struct boot_params *boot_params)
+{
+ /* enter decompressed kernel with boot_params pointer in RSI/ESI */
+ asm("jmp *%0"::"r"(kernel_addr), "S"(boot_params));
+
+ unreachable();
+}
+
/*
- * On success, we return the address of startup_32, which has potentially been
- * relocated by efi_relocate_kernel.
- * On failure, we exit to the firmware via efi_exit instead of returning.
+ * On success, this routine will jump to the relocated image directly and never
+ * return. On failure, it will exit to the firmware via efi_exit() instead of
+ * returning.
*/
-asmlinkage unsigned long efi_main(efi_handle_t handle,
- efi_system_table_t *sys_table_arg,
- struct boot_params *boot_params)
+void __noreturn efi_stub_entry(efi_handle_t handle,
+ efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params)
{
- unsigned long bzimage_addr = (unsigned long)startup_32;
- unsigned long buffer_start, buffer_end;
+ efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
struct setup_header *hdr = &boot_params->hdr;
const struct linux_efi_initrd *initrd = NULL;
+ unsigned long kernel_entry;
efi_status_t status;
efi_system_table = sys_table_arg;
@@ -823,65 +857,25 @@ asmlinkage unsigned long efi_main(efi_handle_t handle,
if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
efi_exit(handle, EFI_INVALID_PARAMETER);
- efi_dxe_table = get_efi_config_table(EFI_DXE_SERVICES_TABLE_GUID);
- if (efi_dxe_table &&
- efi_dxe_table->hdr.signature != EFI_DXE_SERVICES_TABLE_SIGNATURE) {
- efi_warn("Ignoring DXE services table: invalid signature\n");
- efi_dxe_table = NULL;
+ if (have_unsupported_snp_features())
+ efi_exit(handle, EFI_UNSUPPORTED);
+
+ if (IS_ENABLED(CONFIG_EFI_DXE_MEM_ATTRIBUTES)) {
+ efi_dxe_table = get_efi_config_table(EFI_DXE_SERVICES_TABLE_GUID);
+ if (efi_dxe_table &&
+ efi_dxe_table->hdr.signature != EFI_DXE_SERVICES_TABLE_SIGNATURE) {
+ efi_warn("Ignoring DXE services table: invalid signature\n");
+ efi_dxe_table = NULL;
+ }
}
- /*
- * If the kernel isn't already loaded at a suitable address,
- * relocate it.
- *
- * It must be loaded above LOAD_PHYSICAL_ADDR.
- *
- * The maximum address for 64-bit is 1 << 46 for 4-level paging. This
- * is defined as the macro MAXMEM, but unfortunately that is not a
- * compile-time constant if 5-level paging is configured, so we instead
- * define our own macro for use here.
- *
- * For 32-bit, the maximum address is complicated to figure out, for
- * now use KERNEL_IMAGE_SIZE, which will be 512MiB, the same as what
- * KASLR uses.
- *
- * Also relocate it if image_offset is zero, i.e. the kernel wasn't
- * loaded by LoadImage, but rather by a bootloader that called the
- * handover entry. The reason we must always relocate in this case is
- * to handle the case of systemd-boot booting a unified kernel image,
- * which is a PE executable that contains the bzImage and an initrd as
- * COFF sections. The initrd section is placed after the bzImage
- * without ensuring that there are at least init_size bytes available
- * for the bzImage, and thus the compressed kernel's startup code may
- * overwrite the initrd unless it is moved out of the way.
- */
+ /* grab the memory attributes protocol if it exists */
+ efi_bs_call(locate_protocol, &guid, NULL, (void **)&memattr);
- buffer_start = ALIGN(bzimage_addr - image_offset,
- hdr->kernel_alignment);
- buffer_end = buffer_start + hdr->init_size;
-
- if ((buffer_start < LOAD_PHYSICAL_ADDR) ||
- (IS_ENABLED(CONFIG_X86_32) && buffer_end > KERNEL_IMAGE_SIZE) ||
- (IS_ENABLED(CONFIG_X86_64) && buffer_end > MAXMEM_X86_64_4LEVEL) ||
- (image_offset == 0)) {
- extern char _bss[];
-
- status = efi_relocate_kernel(&bzimage_addr,
- (unsigned long)_bss - bzimage_addr,
- hdr->init_size,
- hdr->pref_address,
- hdr->kernel_alignment,
- LOAD_PHYSICAL_ADDR);
- if (status != EFI_SUCCESS) {
- efi_err("efi_relocate_kernel() failed!\n");
- goto fail;
- }
- /*
- * Now that we've copied the kernel elsewhere, we no longer
- * have a set up block before startup_32(), so reset image_offset
- * to zero in case it was set earlier.
- */
- image_offset = 0;
+ status = efi_setup_5level_paging();
+ if (status != EFI_SUCCESS) {
+ efi_err("efi_setup_5level_paging() failed!\n");
+ goto fail;
}
#ifdef CONFIG_CMDLINE_BOOL
@@ -901,6 +895,12 @@ asmlinkage unsigned long efi_main(efi_handle_t handle,
}
}
+ status = efi_decompress_kernel(&kernel_entry);
+ if (status != EFI_SUCCESS) {
+ efi_err("Failed to decompress kernel\n");
+ goto fail;
+ }
+
/*
* At this point, an initrd may already have been loaded by the
* bootloader and passed via bootparams. We permit an initrd loaded
@@ -940,7 +940,7 @@ asmlinkage unsigned long efi_main(efi_handle_t handle,
setup_efi_pci(boot_params);
- setup_quirks(boot_params, bzimage_addr, buffer_end - buffer_start);
+ setup_quirks(boot_params);
setup_unaccepted_memory();
@@ -950,9 +950,38 @@ asmlinkage unsigned long efi_main(efi_handle_t handle,
goto fail;
}
- return bzimage_addr;
+ /*
+ * Call the SEV init code while still running with the firmware's
+ * GDT/IDT, so #VC exceptions will be handled by EFI.
+ */
+ sev_enable(boot_params);
+
+ efi_5level_switch();
+
+ enter_kernel(kernel_entry, boot_params);
fail:
- efi_err("efi_main() failed!\n");
+ efi_err("efi_stub_entry() failed!\n");
efi_exit(handle, status);
}
+
+#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
+void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params)
+{
+ extern char _bss[], _ebss[];
+
+ memset(_bss, 0, _ebss - _bss);
+ efi_stub_entry(handle, sys_table_arg, boot_params);
+}
+
+#ifndef CONFIG_EFI_MIXED
+extern __alias(efi_handover_entry)
+void efi32_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params);
+
+extern __alias(efi_handover_entry)
+void efi64_stub_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
+ struct boot_params *boot_params);
+#endif
+#endif
diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h
new file mode 100644
index 000000000000..37c5a36b9d8c
--- /dev/null
+++ b/drivers/firmware/efi/libstub/x86-stub.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/efi.h>
+
+extern void trampoline_32bit_src(void *, bool);
+extern const u16 trampoline_ljmp_imm_offset;
+
+void efi_adjust_memory_range_protection(unsigned long start,
+ unsigned long size);
+
+#ifdef CONFIG_X86_64
+efi_status_t efi_setup_5level_paging(void);
+void efi_5level_switch(void);
+#else
+static inline efi_status_t efi_setup_5level_paging(void) { return EFI_SUCCESS; }
+static inline void efi_5level_switch(void) {}
+#endif
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
index e5d7fa1f1d8f..bdb17eac0cb4 100644
--- a/drivers/firmware/efi/libstub/zboot.c
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -119,7 +119,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
}
status = efi_random_alloc(alloc_size, min_kimg_align, &image_base,
- seed, EFI_LOADER_CODE);
+ seed, EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS) {
efi_err("Failed to allocate memory\n");
goto free_cmdline;
diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
index d0daacd2c903..09525fb5c240 100644
--- a/drivers/firmware/efi/riscv-runtime.c
+++ b/drivers/firmware/efi/riscv-runtime.c
@@ -130,14 +130,25 @@ static int __init riscv_enable_runtime_services(void)
}
early_initcall(riscv_enable_runtime_services);
-void efi_virtmap_load(void)
+static void efi_virtmap_load(void)
{
preempt_disable();
switch_mm(current->active_mm, &efi_mm, NULL);
}
-void efi_virtmap_unload(void)
+static void efi_virtmap_unload(void)
{
switch_mm(&efi_mm, current->active_mm, NULL);
preempt_enable();
}
+
+void arch_efi_call_virt_setup(void)
+{
+ sync_kernel_mappings(efi_mm.pgd);
+ efi_virtmap_load();
+}
+
+void arch_efi_call_virt_teardown(void)
+{
+ efi_virtmap_unload();
+}
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index a400c4312c82..5d56bc40a79d 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -40,55 +40,97 @@
* code doesn't get too cluttered:
*/
#define efi_call_virt(f, args...) \
- efi_call_virt_pointer(efi.runtime, f, args)
-#define __efi_call_virt(f, args...) \
- __efi_call_virt_pointer(efi.runtime, f, args)
+ arch_efi_call_virt(efi.runtime, f, args)
+
+union efi_rts_args {
+ struct {
+ efi_time_t *time;
+ efi_time_cap_t *capabilities;
+ } GET_TIME;
+
+ struct {
+ efi_time_t *time;
+ } SET_TIME;
+
+ struct {
+ efi_bool_t *enabled;
+ efi_bool_t *pending;
+ efi_time_t *time;
+ } GET_WAKEUP_TIME;
+
+ struct {
+ efi_bool_t enable;
+ efi_time_t *time;
+ } SET_WAKEUP_TIME;
+
+ struct {
+ efi_char16_t *name;
+ efi_guid_t *vendor;
+ u32 *attr;
+ unsigned long *data_size;
+ void *data;
+ } GET_VARIABLE;
+
+ struct {
+ unsigned long *name_size;
+ efi_char16_t *name;
+ efi_guid_t *vendor;
+ } GET_NEXT_VARIABLE;
+
+ struct {
+ efi_char16_t *name;
+ efi_guid_t *vendor;
+ u32 attr;
+ unsigned long data_size;
+ void *data;
+ } SET_VARIABLE;
+
+ struct {
+ u32 attr;
+ u64 *storage_space;
+ u64 *remaining_space;
+ u64 *max_variable_size;
+ } QUERY_VARIABLE_INFO;
+
+ struct {
+ u32 *high_count;
+ } GET_NEXT_HIGH_MONO_COUNT;
+
+ struct {
+ efi_capsule_header_t **capsules;
+ unsigned long count;
+ unsigned long sg_list;
+ } UPDATE_CAPSULE;
+
+ struct {
+ efi_capsule_header_t **capsules;
+ unsigned long count;
+ u64 *max_size;
+ int *reset_type;
+ } QUERY_CAPSULE_CAPS;
+
+ struct {
+ efi_status_t (__efiapi *acpi_prm_handler)(u64, void *);
+ u64 param_buffer_addr;
+ void *context;
+ } ACPI_PRM_HANDLER;
+};
struct efi_runtime_work efi_rts_work;
/*
- * efi_queue_work: Queue efi_runtime_service() and wait until it's done
- * @rts: efi_runtime_service() function identifier
- * @rts_arg<1-5>: efi_runtime_service() function arguments
+ * efi_queue_work: Queue EFI runtime service call and wait for completion
+ * @_rts: EFI runtime service function identifier
+ * @_args: Arguments to pass to the EFI runtime service
*
* Accesses to efi_runtime_services() are serialized by a binary
* semaphore (efi_runtime_lock) and caller waits until the work is
* finished, hence _only_ one work is queued at a time and the caller
* thread waits for completion.
*/
-#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5) \
-({ \
- efi_rts_work.status = EFI_ABORTED; \
- \
- if (!efi_enabled(EFI_RUNTIME_SERVICES)) { \
- pr_warn_once("EFI Runtime Services are disabled!\n"); \
- efi_rts_work.status = EFI_DEVICE_ERROR; \
- goto exit; \
- } \
- \
- init_completion(&efi_rts_work.efi_rts_comp); \
- INIT_WORK(&efi_rts_work.work, efi_call_rts); \
- efi_rts_work.arg1 = _arg1; \
- efi_rts_work.arg2 = _arg2; \
- efi_rts_work.arg3 = _arg3; \
- efi_rts_work.arg4 = _arg4; \
- efi_rts_work.arg5 = _arg5; \
- efi_rts_work.efi_rts_id = _rts; \
- \
- /* \
- * queue_work() returns 0 if work was already on queue, \
- * _ideally_ this should never happen. \
- */ \
- if (queue_work(efi_rts_wq, &efi_rts_work.work)) \
- wait_for_completion(&efi_rts_work.efi_rts_comp); \
- else \
- pr_err("Failed to queue work to efi_rts_wq.\n"); \
- \
- WARN_ON_ONCE(efi_rts_work.status == EFI_ABORTED); \
-exit: \
- efi_rts_work.efi_rts_id = EFI_NONE; \
- efi_rts_work.status; \
-})
+#define efi_queue_work(_rts, _args...) \
+ __efi_queue_work(EFI_ ## _rts, \
+ &(union efi_rts_args){ ._rts = { _args }})
#ifndef arch_efi_save_flags
#define arch_efi_save_flags(state_flags) local_save_flags(state_flags)
@@ -103,7 +145,7 @@ unsigned long efi_call_virt_save_flags(void)
return flags;
}
-void efi_call_virt_check_flags(unsigned long flags, const char *call)
+void efi_call_virt_check_flags(unsigned long flags, const void *caller)
{
unsigned long cur_flags, mismatch;
@@ -114,8 +156,8 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
return;
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE);
- pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI %s\n",
- flags, cur_flags, call);
+ pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI call from %pS\n",
+ flags, cur_flags, caller ?: __builtin_return_address(0));
arch_efi_restore_flags(flags);
}
@@ -170,74 +212,90 @@ extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
/*
* Calls the appropriate efi_runtime_service() with the appropriate
* arguments.
- *
- * Semantics followed by efi_call_rts() to understand efi_runtime_work:
- * 1. If argument was a pointer, recast it from void pointer to original
- * pointer type.
- * 2. If argument was a value, recast it from void pointer to original
- * pointer type and dereference it.
*/
static void efi_call_rts(struct work_struct *work)
{
- void *arg1, *arg2, *arg3, *arg4, *arg5;
+ const union efi_rts_args *args = efi_rts_work.args;
efi_status_t status = EFI_NOT_FOUND;
+ unsigned long flags;
- arg1 = efi_rts_work.arg1;
- arg2 = efi_rts_work.arg2;
- arg3 = efi_rts_work.arg3;
- arg4 = efi_rts_work.arg4;
- arg5 = efi_rts_work.arg5;
+ arch_efi_call_virt_setup();
+ flags = efi_call_virt_save_flags();
switch (efi_rts_work.efi_rts_id) {
case EFI_GET_TIME:
- status = efi_call_virt(get_time, (efi_time_t *)arg1,
- (efi_time_cap_t *)arg2);
+ status = efi_call_virt(get_time,
+ args->GET_TIME.time,
+ args->GET_TIME.capabilities);
break;
case EFI_SET_TIME:
- status = efi_call_virt(set_time, (efi_time_t *)arg1);
+ status = efi_call_virt(set_time,
+ args->SET_TIME.time);
break;
case EFI_GET_WAKEUP_TIME:
- status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
- (efi_bool_t *)arg2, (efi_time_t *)arg3);
+ status = efi_call_virt(get_wakeup_time,
+ args->GET_WAKEUP_TIME.enabled,
+ args->GET_WAKEUP_TIME.pending,
+ args->GET_WAKEUP_TIME.time);
break;
case EFI_SET_WAKEUP_TIME:
- status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
- (efi_time_t *)arg2);
+ status = efi_call_virt(set_wakeup_time,
+ args->SET_WAKEUP_TIME.enable,
+ args->SET_WAKEUP_TIME.time);
break;
case EFI_GET_VARIABLE:
- status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
- (efi_guid_t *)arg2, (u32 *)arg3,
- (unsigned long *)arg4, (void *)arg5);
+ status = efi_call_virt(get_variable,
+ args->GET_VARIABLE.name,
+ args->GET_VARIABLE.vendor,
+ args->GET_VARIABLE.attr,
+ args->GET_VARIABLE.data_size,
+ args->GET_VARIABLE.data);
break;
case EFI_GET_NEXT_VARIABLE:
- status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
- (efi_char16_t *)arg2,
- (efi_guid_t *)arg3);
+ status = efi_call_virt(get_next_variable,
+ args->GET_NEXT_VARIABLE.name_size,
+ args->GET_NEXT_VARIABLE.name,
+ args->GET_NEXT_VARIABLE.vendor);
break;
case EFI_SET_VARIABLE:
- status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
- (efi_guid_t *)arg2, *(u32 *)arg3,
- *(unsigned long *)arg4, (void *)arg5);
+ status = efi_call_virt(set_variable,
+ args->SET_VARIABLE.name,
+ args->SET_VARIABLE.vendor,
+ args->SET_VARIABLE.attr,
+ args->SET_VARIABLE.data_size,
+ args->SET_VARIABLE.data);
break;
case EFI_QUERY_VARIABLE_INFO:
- status = efi_call_virt(query_variable_info, *(u32 *)arg1,
- (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
+ status = efi_call_virt(query_variable_info,
+ args->QUERY_VARIABLE_INFO.attr,
+ args->QUERY_VARIABLE_INFO.storage_space,
+ args->QUERY_VARIABLE_INFO.remaining_space,
+ args->QUERY_VARIABLE_INFO.max_variable_size);
break;
case EFI_GET_NEXT_HIGH_MONO_COUNT:
- status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
+ status = efi_call_virt(get_next_high_mono_count,
+ args->GET_NEXT_HIGH_MONO_COUNT.high_count);
break;
case EFI_UPDATE_CAPSULE:
status = efi_call_virt(update_capsule,
- (efi_capsule_header_t **)arg1,
- *(unsigned long *)arg2,
- *(unsigned long *)arg3);
+ args->UPDATE_CAPSULE.capsules,
+ args->UPDATE_CAPSULE.count,
+ args->UPDATE_CAPSULE.sg_list);
break;
case EFI_QUERY_CAPSULE_CAPS:
status = efi_call_virt(query_capsule_caps,
- (efi_capsule_header_t **)arg1,
- *(unsigned long *)arg2, (u64 *)arg3,
- (int *)arg4);
+ args->QUERY_CAPSULE_CAPS.capsules,
+ args->QUERY_CAPSULE_CAPS.count,
+ args->QUERY_CAPSULE_CAPS.max_size,
+ args->QUERY_CAPSULE_CAPS.reset_type);
break;
+ case EFI_ACPI_PRM_HANDLER:
+#ifdef CONFIG_ACPI_PRMT
+ status = arch_efi_call_virt(args, ACPI_PRM_HANDLER.acpi_prm_handler,
+ args->ACPI_PRM_HANDLER.param_buffer_addr,
+ args->ACPI_PRM_HANDLER.context);
+ break;
+#endif
default:
/*
* Ideally, we should never reach here because a caller of this
@@ -246,17 +304,53 @@ static void efi_call_rts(struct work_struct *work)
*/
pr_err("Requested executing invalid EFI Runtime Service.\n");
}
+
+ efi_call_virt_check_flags(flags, efi_rts_work.caller);
+ arch_efi_call_virt_teardown();
+
efi_rts_work.status = status;
complete(&efi_rts_work.efi_rts_comp);
}
+static efi_status_t __efi_queue_work(enum efi_rts_ids id,
+ union efi_rts_args *args)
+{
+ efi_rts_work.efi_rts_id = id;
+ efi_rts_work.args = args;
+ efi_rts_work.caller = __builtin_return_address(0);
+ efi_rts_work.status = EFI_ABORTED;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+ pr_warn_once("EFI Runtime Services are disabled!\n");
+ efi_rts_work.status = EFI_DEVICE_ERROR;
+ goto exit;
+ }
+
+ init_completion(&efi_rts_work.efi_rts_comp);
+ INIT_WORK(&efi_rts_work.work, efi_call_rts);
+
+ /*
+ * queue_work() returns 0 if work was already on queue,
+ * _ideally_ this should never happen.
+ */
+ if (queue_work(efi_rts_wq, &efi_rts_work.work))
+ wait_for_completion(&efi_rts_work.efi_rts_comp);
+ else
+ pr_err("Failed to queue work to efi_rts_wq.\n");
+
+ WARN_ON_ONCE(efi_rts_work.status == EFI_ABORTED);
+exit:
+ efi_rts_work.efi_rts_id = EFI_NONE;
+ return efi_rts_work.status;
+}
+
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
efi_status_t status;
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(EFI_GET_TIME, tm, tc, NULL, NULL, NULL);
+ status = efi_queue_work(GET_TIME, tm, tc);
up(&efi_runtime_lock);
return status;
}
@@ -267,7 +361,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(EFI_SET_TIME, tm, NULL, NULL, NULL, NULL);
+ status = efi_queue_work(SET_TIME, tm);
up(&efi_runtime_lock);
return status;
}
@@ -280,8 +374,7 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(EFI_GET_WAKEUP_TIME, enabled, pending, tm, NULL,
- NULL);
+ status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm);
up(&efi_runtime_lock);
return status;
}
@@ -292,8 +385,7 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(EFI_SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
- NULL);
+ status = efi_queue_work(SET_WAKEUP_TIME, enabled, tm);
up(&efi_runtime_lock);
return status;
}
@@ -308,7 +400,7 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(EFI_GET_VARIABLE, name, vendor, attr, data_size,
+ status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
data);
up(&efi_runtime_lock);
return status;
@@ -322,8 +414,7 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(EFI_GET_NEXT_VARIABLE, name_size, name, vendor,
- NULL, NULL);
+ status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor);
up(&efi_runtime_lock);
return status;
}
@@ -338,24 +429,23 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(EFI_SET_VARIABLE, name, vendor, &attr, &data_size,
+ status = efi_queue_work(SET_VARIABLE, name, vendor, attr, data_size,
data);
up(&efi_runtime_lock);
return status;
}
static efi_status_t
-virt_efi_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
- u32 attr, unsigned long data_size,
- void *data)
+virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr,
+ unsigned long data_size, void *data)
{
efi_status_t status;
if (down_trylock(&efi_runtime_lock))
return EFI_NOT_READY;
- status = efi_call_virt(set_variable, name, vendor, attr, data_size,
- data);
+ status = efi_call_virt_pointer(efi.runtime, set_variable, name, vendor,
+ attr, data_size, data);
up(&efi_runtime_lock);
return status;
}
@@ -373,17 +463,15 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(EFI_QUERY_VARIABLE_INFO, &attr, storage_space,
- remaining_space, max_variable_size, NULL);
+ status = efi_queue_work(QUERY_VARIABLE_INFO, attr, storage_space,
+ remaining_space, max_variable_size);
up(&efi_runtime_lock);
return status;
}
static efi_status_t
-virt_efi_query_variable_info_nonblocking(u32 attr,
- u64 *storage_space,
- u64 *remaining_space,
- u64 *max_variable_size)
+virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space,
+ u64 *remaining_space, u64 *max_variable_size)
{
efi_status_t status;
@@ -393,8 +481,9 @@ virt_efi_query_variable_info_nonblocking(u32 attr,
if (down_trylock(&efi_runtime_lock))
return EFI_NOT_READY;
- status = efi_call_virt(query_variable_info, attr, storage_space,
- remaining_space, max_variable_size);
+ status = efi_call_virt_pointer(efi.runtime, query_variable_info, attr,
+ storage_space, remaining_space,
+ max_variable_size);
up(&efi_runtime_lock);
return status;
}
@@ -405,8 +494,7 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(EFI_GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
- NULL, NULL);
+ status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count);
up(&efi_runtime_lock);
return status;
}
@@ -421,8 +509,13 @@ static void virt_efi_reset_system(int reset_type,
"could not get exclusive access to the firmware\n");
return;
}
+
+ arch_efi_call_virt_setup();
efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM;
- __efi_call_virt(reset_system, reset_type, status, data_size, data);
+ arch_efi_call_virt(efi.runtime, reset_system, reset_type, status,
+ data_size, data);
+ arch_efi_call_virt_teardown();
+
up(&efi_runtime_lock);
}
@@ -437,8 +530,7 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(EFI_UPDATE_CAPSULE, capsules, &count, &sg_list,
- NULL, NULL);
+ status = efi_queue_work(UPDATE_CAPSULE, capsules, count, sg_list);
up(&efi_runtime_lock);
return status;
}
@@ -455,26 +547,44 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(EFI_QUERY_CAPSULE_CAPS, capsules, &count,
- max_size, reset_type, NULL);
+ status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, count,
+ max_size, reset_type);
up(&efi_runtime_lock);
return status;
}
-void efi_native_runtime_setup(void)
+void __init efi_native_runtime_setup(void)
{
- efi.get_time = virt_efi_get_time;
- efi.set_time = virt_efi_set_time;
- efi.get_wakeup_time = virt_efi_get_wakeup_time;
- efi.set_wakeup_time = virt_efi_set_wakeup_time;
- efi.get_variable = virt_efi_get_variable;
- efi.get_next_variable = virt_efi_get_next_variable;
- efi.set_variable = virt_efi_set_variable;
- efi.set_variable_nonblocking = virt_efi_set_variable_nonblocking;
- efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
- efi.reset_system = virt_efi_reset_system;
- efi.query_variable_info = virt_efi_query_variable_info;
- efi.query_variable_info_nonblocking = virt_efi_query_variable_info_nonblocking;
- efi.update_capsule = virt_efi_update_capsule;
- efi.query_capsule_caps = virt_efi_query_capsule_caps;
+ efi.get_time = virt_efi_get_time;
+ efi.set_time = virt_efi_set_time;
+ efi.get_wakeup_time = virt_efi_get_wakeup_time;
+ efi.set_wakeup_time = virt_efi_set_wakeup_time;
+ efi.get_variable = virt_efi_get_variable;
+ efi.get_next_variable = virt_efi_get_next_variable;
+ efi.set_variable = virt_efi_set_variable;
+ efi.set_variable_nonblocking = virt_efi_set_variable_nb;
+ efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
+ efi.reset_system = virt_efi_reset_system;
+ efi.query_variable_info = virt_efi_query_variable_info;
+ efi.query_variable_info_nonblocking = virt_efi_query_variable_info_nb;
+ efi.update_capsule = virt_efi_update_capsule;
+ efi.query_capsule_caps = virt_efi_query_capsule_caps;
}
+
+#ifdef CONFIG_ACPI_PRMT
+
+efi_status_t
+efi_call_acpi_prm_handler(efi_status_t (__efiapi *handler_addr)(u64, void *),
+ u64 param_buffer_addr, void *context)
+{
+ efi_status_t status;
+
+ if (down_interruptible(&efi_runtime_lock))
+ return EFI_ABORTED;
+ status = efi_queue_work(ACPI_PRM_HANDLER, handler_addr,
+ param_buffer_addr, context);
+ up(&efi_runtime_lock);
+ return status;
+}
+
+#endif
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index e382dfebad7c..673bafb8be58 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -111,6 +111,9 @@ config GPIO_MAX730X
config GPIO_IDIO_16
tristate
+ select REGMAP_IRQ
+ select GPIOLIB_IRQCHIP
+ select GPIO_REGMAP
help
Enables support for the idio-16 library functions. The idio-16 library
provides functions to facilitate communication with devices within the
@@ -191,7 +194,7 @@ config GPIO_RASPBERRYPI_EXP
config GPIO_BCM_KONA
bool "Broadcom Kona GPIO"
- depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
+ depends on ARCH_BCM_MOBILE || COMPILE_TEST
help
Turn on GPIO support for Broadcom "Kona" chips.
@@ -283,7 +286,7 @@ config GPIO_EXAR
config GPIO_GE_FPGA
bool "GE FPGA based GPIO"
- depends on GE_FPGA
+ depends on GE_FPGA || COMPILE_TEST
select GPIO_GENERIC
help
Support for common GPIO functionality provided on some GE Single Board
@@ -564,7 +567,7 @@ config GPIO_SAMA5D2_PIOBU
maintain their value during backup/self-refresh.
config GPIO_SIFIVE
- bool "SiFive GPIO support"
+ tristate "SiFive GPIO support"
depends on OF_GPIO
select IRQ_DOMAIN_HIERARCHY
select GPIO_GENERIC
@@ -858,6 +861,7 @@ config GPIO_104_DIO_48E
select REGMAP_IRQ
select GPIOLIB_IRQCHIP
select GPIO_I8255
+ select I8254
help
Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
104-DIO-24E). The base port addresses for the devices may be
@@ -868,7 +872,7 @@ config GPIO_104_IDIO_16
tristate "ACCES 104-IDIO-16 GPIO support"
depends on PC104
select ISA_BUS_API
- select GPIOLIB_IRQCHIP
+ select REGMAP_MMIO
select GPIO_IDIO_16
help
Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16,
@@ -994,7 +998,10 @@ config GPIO_WINBOND
config GPIO_WS16C48
tristate "WinSystems WS16C48 GPIO support"
select ISA_BUS_API
+ select REGMAP_IRQ
+ select REGMAP_MMIO
select GPIOLIB_IRQCHIP
+ select GPIO_REGMAP
help
Enables GPIO support for the WinSystems WS16C48. The base port
addresses for the devices may be configured via the base module
@@ -1028,6 +1035,17 @@ config GPIO_FXL6408
To compile this driver as a module, choose M here: the module will
be called gpio-fxl6408.
+config GPIO_DS4520
+ tristate "DS4520 I2C GPIO expander"
+ select REGMAP_I2C
+ select GPIO_REGMAP
+ help
+ GPIO driver for ADI DS4520 I2C-based GPIO expander.
+ Say yes here to enable the GPIO driver for the ADI DS4520 chip.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-ds4520.
+
config GPIO_GW_PLD
tristate "Gateworks PLD GPIO Expander"
depends on OF_GPIO
@@ -1640,7 +1658,7 @@ config GPIO_PCH
config GPIO_PCI_IDIO_16
tristate "ACCES PCI-IDIO-16 GPIO support"
- select GPIOLIB_IRQCHIP
+ select REGMAP_MMIO
select GPIO_IDIO_16
help
Enables GPIO support for the ACCES PCI-IDIO-16. An interrupt is
@@ -1650,7 +1668,10 @@ config GPIO_PCI_IDIO_16
config GPIO_PCIE_IDIO_24
tristate "ACCES PCIe-IDIO-24 GPIO support"
+ select REGMAP_IRQ
+ select REGMAP_MMIO
select GPIOLIB_IRQCHIP
+ select GPIO_REGMAP
help
Enables GPIO support for the ACCES PCIe-IDIO-24 family (PCIe-IDIO-24,
PCIe-IDI-24, PCIe-IDO-24, PCIe-IDIO-12). An interrupt is generated
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index c3ac51d47aa9..eb73b5d633eb 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
obj-$(CONFIG_GPIO_DA9055) += gpio-da9055.o
obj-$(CONFIG_GPIO_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_DLN2) += gpio-dln2.o
+obj-$(CONFIG_GPIO_DS4520) += gpio-ds4520.o
obj-$(CONFIG_GPIO_DWAPB) += gpio-dwapb.o
obj-$(CONFIG_GPIO_EIC_SPRD) += gpio-eic-sprd.o
obj-$(CONFIG_GPIO_ELKHARTLAKE) += gpio-elkhartlake.o
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 8ff5f4ff5958..4df9becaf349 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -9,6 +9,7 @@
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/i8254.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/isa.h>
@@ -16,6 +17,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/regmap.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include "gpio-i8255.h"
@@ -37,6 +39,8 @@ MODULE_PARM_DESC(irq, "ACCES 104-DIO-48E interrupt line numbers");
#define DIO48E_ENABLE_INTERRUPT 0xB
#define DIO48E_DISABLE_INTERRUPT DIO48E_ENABLE_INTERRUPT
+#define DIO48E_ENABLE_COUNTER_TIMER_ADDRESSING 0xD
+#define DIO48E_DISABLE_COUNTER_TIMER_ADDRESSING DIO48E_ENABLE_COUNTER_TIMER_ADDRESSING
#define DIO48E_CLEAR_INTERRUPT 0xF
#define DIO48E_NUM_PPI 2
@@ -75,18 +79,20 @@ static const struct regmap_access_table dio48e_precious_table = {
.yes_ranges = dio48e_precious_ranges,
.n_yes_ranges = ARRAY_SIZE(dio48e_precious_ranges),
};
-static const struct regmap_config dio48e_regmap_config = {
- .reg_bits = 8,
- .reg_stride = 1,
- .val_bits = 8,
- .io_port = true,
- .max_register = 0xF,
- .wr_table = &dio48e_wr_table,
- .rd_table = &dio48e_rd_table,
- .volatile_table = &dio48e_volatile_table,
- .precious_table = &dio48e_precious_table,
- .cache_type = REGCACHE_FLAT,
- .use_raw_spinlock = true,
+
+static const struct regmap_range pit_wr_ranges[] = {
+ regmap_reg_range(0x0, 0x3),
+};
+static const struct regmap_range pit_rd_ranges[] = {
+ regmap_reg_range(0x0, 0x2),
+};
+static const struct regmap_access_table pit_wr_table = {
+ .yes_ranges = pit_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(pit_wr_ranges),
+};
+static const struct regmap_access_table pit_rd_table = {
+ .yes_ranges = pit_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(pit_rd_ranges),
};
/* only bit 3 on each respective Port C supports interrupts */
@@ -102,14 +108,56 @@ static const struct regmap_irq dio48e_regmap_irqs[] = {
/**
* struct dio48e_gpio - GPIO device private data structure
+ * @lock: synchronization lock to prevent I/O race conditions
* @map: Regmap for the device
+ * @regs: virtual mapping for device registers
+ * @flags: IRQ flags saved during locking
* @irq_mask: Current IRQ mask state on the device
*/
struct dio48e_gpio {
+ raw_spinlock_t lock;
struct regmap *map;
+ void __iomem *regs;
+ unsigned long flags;
unsigned int irq_mask;
};
+static void dio48e_regmap_lock(void *lock_arg) __acquires(&dio48egpio->lock)
+{
+ struct dio48e_gpio *const dio48egpio = lock_arg;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&dio48egpio->lock, flags);
+ dio48egpio->flags = flags;
+}
+
+static void dio48e_regmap_unlock(void *lock_arg) __releases(&dio48egpio->lock)
+{
+ struct dio48e_gpio *const dio48egpio = lock_arg;
+
+ raw_spin_unlock_irqrestore(&dio48egpio->lock, dio48egpio->flags);
+}
+
+static void pit_regmap_lock(void *lock_arg) __acquires(&dio48egpio->lock)
+{
+ struct dio48e_gpio *const dio48egpio = lock_arg;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&dio48egpio->lock, flags);
+ dio48egpio->flags = flags;
+
+ iowrite8(0x00, dio48egpio->regs + DIO48E_ENABLE_COUNTER_TIMER_ADDRESSING);
+}
+
+static void pit_regmap_unlock(void *lock_arg) __releases(&dio48egpio->lock)
+{
+ struct dio48e_gpio *const dio48egpio = lock_arg;
+
+ ioread8(dio48egpio->regs + DIO48E_DISABLE_COUNTER_TIMER_ADDRESSING);
+
+ raw_spin_unlock_irqrestore(&dio48egpio->lock, dio48egpio->flags);
+}
+
static int dio48e_handle_mask_sync(const int index,
const unsigned int mask_buf_def,
const unsigned int mask_buf,
@@ -176,6 +224,9 @@ static int dio48e_probe(struct device *dev, unsigned int id)
struct i8255_regmap_config config = {};
void __iomem *regs;
struct regmap *map;
+ struct regmap_config dio48e_regmap_config;
+ struct regmap_config pit_regmap_config;
+ struct i8254_regmap_config pit_config;
int err;
struct regmap_irq_chip *chip;
struct dio48e_gpio *dio48egpio;
@@ -187,21 +238,58 @@ static int dio48e_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
+ dio48egpio = devm_kzalloc(dev, sizeof(*dio48egpio), GFP_KERNEL);
+ if (!dio48egpio)
+ return -ENOMEM;
+
regs = devm_ioport_map(dev, base[id], DIO48E_EXTENT);
if (!regs)
return -ENOMEM;
+ dio48egpio->regs = regs;
+
+ raw_spin_lock_init(&dio48egpio->lock);
+
+ dio48e_regmap_config = (struct regmap_config) {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .lock = dio48e_regmap_lock,
+ .unlock = dio48e_regmap_unlock,
+ .lock_arg = dio48egpio,
+ .io_port = true,
+ .wr_table = &dio48e_wr_table,
+ .rd_table = &dio48e_rd_table,
+ .volatile_table = &dio48e_volatile_table,
+ .precious_table = &dio48e_precious_table,
+ .cache_type = REGCACHE_FLAT,
+ };
+
map = devm_regmap_init_mmio(dev, regs, &dio48e_regmap_config);
if (IS_ERR(map))
return dev_err_probe(dev, PTR_ERR(map),
"Unable to initialize register map\n");
- dio48egpio = devm_kzalloc(dev, sizeof(*dio48egpio), GFP_KERNEL);
- if (!dio48egpio)
- return -ENOMEM;
-
dio48egpio->map = map;
+ pit_regmap_config = (struct regmap_config) {
+ .name = "i8254",
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .lock = pit_regmap_lock,
+ .unlock = pit_regmap_unlock,
+ .lock_arg = dio48egpio,
+ .io_port = true,
+ .wr_table = &pit_wr_table,
+ .rd_table = &pit_rd_table,
+ };
+
+ pit_config.map = devm_regmap_init_mmio(dev, regs, &pit_regmap_config);
+ if (IS_ERR(pit_config.map))
+ return dev_err_probe(dev, PTR_ERR(pit_config.map),
+ "Unable to initialize i8254 register map\n");
+
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -225,6 +313,12 @@ static int dio48e_probe(struct device *dev, unsigned int id)
if (err)
return dev_err_probe(dev, err, "IRQ registration failed\n");
+ pit_config.parent = dev;
+
+ err = devm_i8254_regmap_register(dev, &pit_config);
+ if (err)
+ return err;
+
config.parent = dev;
config.map = map;
config.num_ppi = DIO48E_NUM_PPI;
@@ -245,3 +339,4 @@ module_isa_driver_with_irq(dio48e_driver, num_dio48e, num_irq);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-DIO-48E GPIO driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(I8254);
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 098fbefdbe22..f03ccd0f534c 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -6,19 +6,16 @@
* This driver supports the following ACCES devices: 104-IDIO-16,
* 104-IDIO-16E, 104-IDO-16, 104-IDIO-8, 104-IDIO-8E, and 104-IDO-8.
*/
-#include <linux/bitmap.h>
+#include <linux/bits.h>
#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/gpio/driver.h>
-#include <linux/io.h>
+#include <linux/err.h>
#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/irqdesc.h>
+#include <linux/irq.h>
#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/spinlock.h>
+#include <linux/regmap.h>
#include <linux/types.h>
#include "gpio-idio-16.h"
@@ -36,187 +33,62 @@ static unsigned int num_irq;
module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDIO-16 interrupt line numbers");
-/**
- * struct idio_16_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @lock: synchronization lock to prevent I/O race conditions
- * @irq_mask: I/O bits affected by interrupts
- * @reg: I/O address offset for the device registers
- * @state: ACCES IDIO-16 device state
- */
-struct idio_16_gpio {
- struct gpio_chip chip;
- raw_spinlock_t lock;
- unsigned long irq_mask;
- struct idio_16 __iomem *reg;
- struct idio_16_state state;
+static const struct regmap_range idio_16_wr_ranges[] = {
+ regmap_reg_range(0x0, 0x2), regmap_reg_range(0x4, 0x4),
};
-
-static int idio_16_gpio_get_direction(struct gpio_chip *chip,
- unsigned int offset)
-{
- if (idio_16_get_direction(offset))
- return GPIO_LINE_DIRECTION_IN;
-
- return GPIO_LINE_DIRECTION_OUT;
-}
-
-static int idio_16_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- return 0;
-}
-
-static int idio_16_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- chip->set(chip, offset, value);
- return 0;
-}
-
-static int idio_16_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
-
- return idio_16_get(idio16gpio->reg, &idio16gpio->state, offset);
-}
-
-static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
-{
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
-
- idio_16_get_multiple(idio16gpio->reg, &idio16gpio->state, mask, bits);
-
- return 0;
-}
-
-static void idio_16_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
-
- idio_16_set(idio16gpio->reg, &idio16gpio->state, offset, value);
-}
-
-static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
-{
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
-
- idio_16_set_multiple(idio16gpio->reg, &idio16gpio->state, mask, bits);
-}
-
-static void idio_16_irq_ack(struct irq_data *data)
-{
-}
-
-static void idio_16_irq_mask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned long offset = irqd_to_hwirq(data);
- unsigned long flags;
-
- idio16gpio->irq_mask &= ~BIT(offset);
- gpiochip_disable_irq(chip, offset);
-
- if (!idio16gpio->irq_mask) {
- raw_spin_lock_irqsave(&idio16gpio->lock, flags);
-
- iowrite8(0, &idio16gpio->reg->irq_ctl);
-
- raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
- }
-}
-
-static void idio_16_irq_unmask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned long offset = irqd_to_hwirq(data);
- const unsigned long prev_irq_mask = idio16gpio->irq_mask;
- unsigned long flags;
-
- gpiochip_enable_irq(chip, offset);
- idio16gpio->irq_mask |= BIT(offset);
-
- if (!prev_irq_mask) {
- raw_spin_lock_irqsave(&idio16gpio->lock, flags);
-
- ioread8(&idio16gpio->reg->irq_ctl);
-
- raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
- }
-}
-
-static int idio_16_irq_set_type(struct irq_data *data, unsigned int flow_type)
-{
- /* The only valid irq types are none and both-edges */
- if (flow_type != IRQ_TYPE_NONE &&
- (flow_type & IRQ_TYPE_EDGE_BOTH) != IRQ_TYPE_EDGE_BOTH)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct irq_chip idio_16_irqchip = {
- .name = "104-idio-16",
- .irq_ack = idio_16_irq_ack,
- .irq_mask = idio_16_irq_mask,
- .irq_unmask = idio_16_irq_unmask,
- .irq_set_type = idio_16_irq_set_type,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
+static const struct regmap_range idio_16_rd_ranges[] = {
+ regmap_reg_range(0x1, 0x2), regmap_reg_range(0x5, 0x5),
};
-
-static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
-{
- struct idio_16_gpio *const idio16gpio = dev_id;
- struct gpio_chip *const chip = &idio16gpio->chip;
- int gpio;
-
- for_each_set_bit(gpio, &idio16gpio->irq_mask, chip->ngpio)
- generic_handle_domain_irq(chip->irq.domain, gpio);
-
- raw_spin_lock(&idio16gpio->lock);
-
- iowrite8(0, &idio16gpio->reg->in0_7);
-
- raw_spin_unlock(&idio16gpio->lock);
-
- return IRQ_HANDLED;
-}
-
-#define IDIO_16_NGPIO 32
-static const char *idio_16_names[IDIO_16_NGPIO] = {
- "OUT0", "OUT1", "OUT2", "OUT3", "OUT4", "OUT5", "OUT6", "OUT7",
- "OUT8", "OUT9", "OUT10", "OUT11", "OUT12", "OUT13", "OUT14", "OUT15",
- "IIN0", "IIN1", "IIN2", "IIN3", "IIN4", "IIN5", "IIN6", "IIN7",
- "IIN8", "IIN9", "IIN10", "IIN11", "IIN12", "IIN13", "IIN14", "IIN15"
+static const struct regmap_range idio_16_precious_ranges[] = {
+ regmap_reg_range(0x2, 0x2),
+};
+static const struct regmap_access_table idio_16_wr_table = {
+ .yes_ranges = idio_16_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idio_16_wr_ranges),
+};
+static const struct regmap_access_table idio_16_rd_table = {
+ .yes_ranges = idio_16_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idio_16_rd_ranges),
+};
+static const struct regmap_access_table idio_16_precious_table = {
+ .yes_ranges = idio_16_precious_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idio_16_precious_ranges),
+};
+static const struct regmap_config idio_16_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .io_port = true,
+ .wr_table = &idio_16_wr_table,
+ .rd_table = &idio_16_rd_table,
+ .volatile_table = &idio_16_rd_table,
+ .precious_table = &idio_16_precious_table,
+ .cache_type = REGCACHE_FLAT,
+ .use_raw_spinlock = true,
};
-static int idio_16_irq_init_hw(struct gpio_chip *gc)
-{
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(gc);
-
- /* Disable IRQ by default */
- iowrite8(0, &idio16gpio->reg->irq_ctl);
- iowrite8(0, &idio16gpio->reg->in0_7);
+/* Only input lines (GPIO 16-31) support interrupts */
+#define IDIO_16_REGMAP_IRQ(_id) \
+ [16 + _id] = { \
+ .mask = BIT(_id), \
+ .type = { .types_supported = IRQ_TYPE_EDGE_BOTH }, \
+ }
- return 0;
-}
+static const struct regmap_irq idio_16_regmap_irqs[] = {
+ IDIO_16_REGMAP_IRQ(0), IDIO_16_REGMAP_IRQ(1), IDIO_16_REGMAP_IRQ(2), /* 0-2 */
+ IDIO_16_REGMAP_IRQ(3), IDIO_16_REGMAP_IRQ(4), IDIO_16_REGMAP_IRQ(5), /* 3-5 */
+ IDIO_16_REGMAP_IRQ(6), IDIO_16_REGMAP_IRQ(7), IDIO_16_REGMAP_IRQ(8), /* 6-8 */
+ IDIO_16_REGMAP_IRQ(9), IDIO_16_REGMAP_IRQ(10), IDIO_16_REGMAP_IRQ(11), /* 9-11 */
+ IDIO_16_REGMAP_IRQ(12), IDIO_16_REGMAP_IRQ(13), IDIO_16_REGMAP_IRQ(14), /* 12-14 */
+ IDIO_16_REGMAP_IRQ(15), /* 15 */
+};
static int idio_16_probe(struct device *dev, unsigned int id)
{
- struct idio_16_gpio *idio16gpio;
const char *const name = dev_name(dev);
- struct gpio_irq_chip *girq;
- int err;
-
- idio16gpio = devm_kzalloc(dev, sizeof(*idio16gpio), GFP_KERNEL);
- if (!idio16gpio)
- return -ENOMEM;
+ struct idio_16_regmap_config config = {};
+ void __iomem *regs;
+ struct regmap *map;
if (!devm_request_region(dev, base[id], IDIO_16_EXTENT, name)) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -224,54 +96,22 @@ static int idio_16_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- idio16gpio->reg = devm_ioport_map(dev, base[id], IDIO_16_EXTENT);
- if (!idio16gpio->reg)
+ regs = devm_ioport_map(dev, base[id], IDIO_16_EXTENT);
+ if (!regs)
return -ENOMEM;
- idio16gpio->chip.label = name;
- idio16gpio->chip.parent = dev;
- idio16gpio->chip.owner = THIS_MODULE;
- idio16gpio->chip.base = -1;
- idio16gpio->chip.ngpio = IDIO_16_NGPIO;
- idio16gpio->chip.names = idio_16_names;
- idio16gpio->chip.get_direction = idio_16_gpio_get_direction;
- idio16gpio->chip.direction_input = idio_16_gpio_direction_input;
- idio16gpio->chip.direction_output = idio_16_gpio_direction_output;
- idio16gpio->chip.get = idio_16_gpio_get;
- idio16gpio->chip.get_multiple = idio_16_gpio_get_multiple;
- idio16gpio->chip.set = idio_16_gpio_set;
- idio16gpio->chip.set_multiple = idio_16_gpio_set_multiple;
-
- idio_16_state_init(&idio16gpio->state);
- /* FET off states are represented by bit values of "1" */
- bitmap_fill(idio16gpio->state.out_state, IDIO_16_NOUT);
-
- girq = &idio16gpio->chip.irq;
- gpio_irq_chip_set_chip(girq, &idio_16_irqchip);
- /* This will let us handle the parent IRQ in the driver */
- girq->parent_handler = NULL;
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_edge_irq;
- girq->init_hw = idio_16_irq_init_hw;
+ map = devm_regmap_init_mmio(dev, regs, &idio_16_regmap_config);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map), "Unable to initialize register map\n");
- raw_spin_lock_init(&idio16gpio->lock);
-
- err = devm_gpiochip_add_data(dev, &idio16gpio->chip, idio16gpio);
- if (err) {
- dev_err(dev, "GPIO registering failed (%d)\n", err);
- return err;
- }
-
- err = devm_request_irq(dev, irq[id], idio_16_irq_handler, 0, name,
- idio16gpio);
- if (err) {
- dev_err(dev, "IRQ handler registering failed (%d)\n", err);
- return err;
- }
+ config.parent = dev;
+ config.map = map;
+ config.regmap_irqs = idio_16_regmap_irqs;
+ config.num_regmap_irqs = ARRAY_SIZE(idio_16_regmap_irqs);
+ config.irq = irq[id];
+ config.no_status = true;
- return 0;
+ return devm_idio_16_regmap_register(dev, &config);
}
static struct isa_driver idio_16_driver = {
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 0464f1ecd20d..c7ac5a9ffb1f 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -135,8 +135,6 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
priv->gc.ngpio = MMIO_74XX_BIT_CNT(priv->flags);
priv->gc.owner = THIS_MODULE;
- platform_set_drvdata(pdev, priv);
-
return devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
}
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index be1ed7ee5225..11edf1fe6c90 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -9,6 +9,7 @@
#include <linux/gpio/driver.h>
#include <linux/mfd/altera-a10sr.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/property.h>
@@ -104,7 +105,7 @@ static struct platform_driver altr_a10sr_gpio_driver = {
.probe = altr_a10sr_gpio_probe,
.driver = {
.name = "altr_a10sr_gpio",
- .of_match_table = of_match_ptr(altr_a10sr_gpio_of_match),
+ .of_match_table = altr_a10sr_gpio_of_match,
},
};
module_platform_driver(altr_a10sr_gpio_driver);
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index aa0a954b8392..f0c0c0f77eb0 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -9,8 +9,9 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
#include <linux/platform_data/gpio-ath79.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/irq.h>
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 70770429ba48..5321ef98f442 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -8,12 +8,14 @@
#include <linux/bitops.h>
#include <linux/err.h>
-#include <linux/io.h>
#include <linux/gpio/driver.h>
-#include <linux/of_device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#define BCM_GPIO_PASSWD 0x00a5a501
#define GPIO_PER_BANK 32
@@ -62,7 +64,6 @@ struct bcm_kona_gpio {
struct gpio_chip gpio_chip;
struct irq_domain *irq_domain;
struct bcm_kona_gpio_bank *banks;
- struct platform_device *pdev;
};
struct bcm_kona_gpio_bank {
@@ -556,19 +557,12 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
static int bcm_kona_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct of_device_id *match;
struct bcm_kona_gpio_bank *bank;
struct bcm_kona_gpio *kona_gpio;
struct gpio_chip *chip;
int ret;
int i;
- match = of_match_device(bcm_kona_gpio_of_match, dev);
- if (!match) {
- dev_err(dev, "Failed to find gpio controller\n");
- return -ENODEV;
- }
-
kona_gpio = devm_kzalloc(dev, sizeof(*kona_gpio), GFP_KERNEL);
if (!kona_gpio)
return -ENOMEM;
@@ -596,15 +590,13 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
if (!kona_gpio->banks)
return -ENOMEM;
- kona_gpio->pdev = pdev;
- platform_set_drvdata(pdev, kona_gpio);
chip->parent = dev;
chip->ngpio = kona_gpio->num_bank * GPIO_PER_BANK;
- kona_gpio->irq_domain = irq_domain_add_linear(dev->of_node,
- chip->ngpio,
- &bcm_kona_irq_ops,
- kona_gpio);
+ kona_gpio->irq_domain = irq_domain_create_linear(dev_fwnode(dev),
+ chip->ngpio,
+ &bcm_kona_irq_ops,
+ kona_gpio);
if (!kona_gpio->irq_domain) {
dev_err(dev, "Couldn't allocate IRQ domain\n");
return -ENXIO;
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 6566517fe0d8..bccdbfd5ec80 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -3,12 +3,12 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/module.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
enum gio_reg_index {
GIO_REG_ODEN = 0,
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index 75f6f8d4323e..d69a24dd4828 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -67,7 +67,7 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
}
-static const struct of_device_id __maybe_unused clps711x_gpio_ids[] = {
+static const struct of_device_id clps711x_gpio_ids[] = {
{ .compatible = "cirrus,ep7209-gpio" },
{ }
};
@@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(of, clps711x_gpio_ids);
static struct platform_driver clps711x_gpio_driver = {
.driver = {
.name = "clps711x-gpio",
- .of_match_table = of_match_ptr(clps711x_gpio_ids),
+ .of_match_table = clps711x_gpio_ids,
},
.probe = clps711x_gpio_probe,
};
diff --git a/drivers/gpio/gpio-creg-snps.c b/drivers/gpio/gpio-creg-snps.c
index 789384c6e178..4968232f70f2 100644
--- a/drivers/gpio/gpio-creg-snps.c
+++ b/drivers/gpio/gpio-creg-snps.c
@@ -8,7 +8,7 @@
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#define MAX_GPIO 32
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index fff510d86e31..8db5717bdabe 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -236,7 +236,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
for (i = 0; i < nirq; i++) {
chips->irqs[i] = platform_get_irq(pdev, i);
if (chips->irqs[i] < 0)
- return dev_err_probe(dev, chips->irqs[i], "IRQ not populated\n");
+ return chips->irqs[i];
}
chips->chip.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-ds4520.c b/drivers/gpio/gpio-ds4520.c
new file mode 100644
index 000000000000..1903deaef3e9
--- /dev/null
+++ b/drivers/gpio/gpio-ds4520.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Analog Devices, Inc.
+ * Driver for the DS4520 I/O Expander
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/i2c.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define DS4520_PULLUP0 0xF0
+#define DS4520_IO_CONTROL0 0xF2
+#define DS4520_IO_STATUS0 0xF8
+
+static const struct regmap_config ds4520_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ds4520_gpio_probe(struct i2c_client *client)
+{
+ struct gpio_regmap_config config = { };
+ struct device *dev = &client->dev;
+ struct regmap *regmap;
+ u32 ngpio;
+ u32 base;
+ int ret;
+
+ ret = device_property_read_u32(dev, "reg", &base);
+ if (ret)
+ return dev_err_probe(dev, ret, "Missing 'reg' property.\n");
+
+ ret = device_property_read_u32(dev, "ngpios", &ngpio);
+ if (ret)
+ return dev_err_probe(dev, ret, "Missing 'ngpios' property.\n");
+
+ regmap = devm_regmap_init_i2c(client, &ds4520_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to allocate register map\n");
+
+ config.regmap = regmap;
+ config.parent = dev;
+ config.ngpio = ngpio;
+
+ config.reg_dat_base = base + DS4520_IO_STATUS0;
+ config.reg_set_base = base + DS4520_PULLUP0;
+ config.reg_dir_out_base = base + DS4520_IO_CONTROL0;
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &config));
+}
+
+static const struct of_device_id ds4520_gpio_of_match_table[] = {
+ { .compatible = "adi,ds4520-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ds4520_gpio_of_match_table);
+
+static const struct i2c_device_id ds4520_gpio_id_table[] = {
+ { "ds4520-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ds4520_gpio_id_table);
+
+static struct i2c_driver ds4520_gpio_driver = {
+ .driver = {
+ .name = "ds4520-gpio",
+ .of_match_table = ds4520_gpio_of_match_table,
+ },
+ .probe = ds4520_gpio_probe,
+ .id_table = ds4520_gpio_id_table,
+};
+module_i2c_driver(ds4520_gpio_driver);
+
+MODULE_DESCRIPTION("DS4520 I/O Expander");
+MODULE_AUTHOR("Okan Sahin <okan.sahin@analog.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index 84352a6f4973..5320cf1de89c 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -9,7 +9,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
@@ -653,7 +653,6 @@ static int sprd_eic_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, sprd_eic);
return 0;
}
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index df1bdaae441c..5170fe7599cd 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -217,8 +217,6 @@ static int gpio_exar_probe(struct platform_device *pdev)
if (ret)
return ret;
- platform_set_drvdata(pdev, exar_gpio);
-
return 0;
}
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 31e26072f6ae..5ce59dcf02e3 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -250,8 +250,8 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
return PTR_ERR(g->base);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return irq ? irq : -EINVAL;
+ if (irq < 0)
+ return irq;
g->clk = devm_clk_get(dev, NULL);
if (!IS_ERR(g->clk)) {
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index 7bd4c2a4cc11..5dc49648d8e3 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -1,29 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for GE FPGA based GPIO
*
* Author: Martyn Welch <martyn.welch@ge.com>
*
* 2008 (c) GE Intelligent Platforms Embedded Systems, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
-/* TODO
+/*
+ * TODO:
*
- * Configuration of output modes (totem-pole/open-drain)
- * Interrupt configuration - interrupts are always generated the FPGA relies on
- * the I/O interrupt controllers mask to stop them propergating
+ * Configuration of output modes (totem-pole/open-drain).
+ * Interrupt configuration - interrupts are always generated, the FPGA relies
+ * on the I/O interrupt controllers mask to stop them from being propagated.
*/
-#include <linux/kernel.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
#define GEF_GPIO_DIRECT 0x00
#define GEF_GPIO_IN 0x04
@@ -52,46 +51,39 @@ MODULE_DEVICE_TABLE(of, gef_gpio_ids);
static int __init gef_gpio_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct gpio_chip *gc;
void __iomem *regs;
int ret;
- gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
if (!gc)
return -ENOMEM;
- regs = of_iomap(pdev->dev.of_node, 0);
- if (!regs)
- return -ENOMEM;
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
- ret = bgpio_init(gc, &pdev->dev, 4, regs + GEF_GPIO_IN,
- regs + GEF_GPIO_OUT, NULL, NULL,
- regs + GEF_GPIO_DIRECT, BGPIOF_BIG_ENDIAN_BYTE_ORDER);
- if (ret) {
- dev_err(&pdev->dev, "bgpio_init failed\n");
- goto err0;
- }
+ ret = bgpio_init(gc, dev, 4, regs + GEF_GPIO_IN, regs + GEF_GPIO_OUT,
+ NULL, NULL, regs + GEF_GPIO_DIRECT,
+ BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ if (ret)
+ return dev_err_probe(dev, ret, "bgpio_init failed\n");
/* Setup pointers to chip functions */
- gc->label = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
- if (!gc->label) {
- ret = -ENOMEM;
- goto err0;
- }
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev));
+ if (!gc->label)
+ return -ENOMEM;
gc->base = -1;
- gc->ngpio = (u16)(uintptr_t)of_device_get_match_data(&pdev->dev);
+ gc->ngpio = (uintptr_t)device_get_match_data(dev);
/* This function adds a memory mapped GPIO chip */
- ret = devm_gpiochip_add_data(&pdev->dev, gc, NULL);
+ ret = devm_gpiochip_add_data(dev, gc, NULL);
if (ret)
- goto err0;
+ return dev_err_probe(dev, ret, "GPIO chip registration failed\n");
return 0;
-err0:
- iounmap(regs);
- pr_err("%pOF: GPIO chip registration failed\n", pdev->dev.of_node);
- return ret;
};
static struct platform_driver gef_gpio_driver = {
@@ -103,5 +95,5 @@ static struct platform_driver gef_gpio_driver = {
module_platform_driver_probe(gef_gpio_driver, gef_gpio_probe);
MODULE_DESCRIPTION("GE I/O FPGA GPIO driver");
-MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index bea0e32c195d..0163c95f6dd7 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -19,10 +19,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/err.h>
diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c
index c208ac1c54a6..1bcfc1835dae 100644
--- a/drivers/gpio/gpio-hlwd.c
+++ b/drivers/gpio/gpio-hlwd.c
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c
index 13315242d220..53b1eb876a12 100644
--- a/drivers/gpio/gpio-idio-16.c
+++ b/drivers/gpio/gpio-idio-16.c
@@ -3,143 +3,169 @@
* GPIO library for the ACCES IDIO-16 family
* Copyright (C) 2022 William Breathitt Gray
*/
-#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/export.h>
-#include <linux/io.h>
+#include <linux/gpio/regmap.h>
#include <linux/module.h>
-#include <linux/spinlock.h>
+#include <linux/regmap.h>
#include <linux/types.h>
#include "gpio-idio-16.h"
#define DEFAULT_SYMBOL_NAMESPACE GPIO_IDIO_16
-/**
- * idio_16_get - get signal value at signal offset
- * @reg: ACCES IDIO-16 device registers
- * @state: ACCES IDIO-16 device state
- * @offset: offset of signal to get
- *
- * Returns the signal value (0=low, 1=high) for the signal at @offset.
- */
-int idio_16_get(struct idio_16 __iomem *const reg,
- struct idio_16_state *const state, const unsigned long offset)
-{
- const unsigned long mask = BIT(offset);
-
- if (offset < IDIO_16_NOUT)
- return test_bit(offset, state->out_state);
-
- if (offset < 24)
- return !!(ioread8(&reg->in0_7) & (mask >> IDIO_16_NOUT));
-
- if (offset < 32)
- return !!(ioread8(&reg->in8_15) & (mask >> 24));
-
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(idio_16_get);
-
-/**
- * idio_16_get_multiple - get multiple signal values at multiple signal offsets
- * @reg: ACCES IDIO-16 device registers
- * @state: ACCES IDIO-16 device state
- * @mask: mask of signals to get
- * @bits: bitmap to store signal values
- *
- * Stores in @bits the values (0=low, 1=high) for the signals defined by @mask.
- */
-void idio_16_get_multiple(struct idio_16 __iomem *const reg,
- struct idio_16_state *const state,
- const unsigned long *const mask,
- unsigned long *const bits)
+#define IDIO_16_DAT_BASE 0x0
+#define IDIO_16_OUT_BASE IDIO_16_DAT_BASE
+#define IDIO_16_IN_BASE (IDIO_16_DAT_BASE + 1)
+#define IDIO_16_CLEAR_INTERRUPT 0x1
+#define IDIO_16_ENABLE_IRQ 0x2
+#define IDIO_16_DEACTIVATE_INPUT_FILTERS 0x3
+#define IDIO_16_DISABLE_IRQ IDIO_16_ENABLE_IRQ
+#define IDIO_16_INTERRUPT_STATUS 0x6
+
+#define IDIO_16_NGPIO 32
+#define IDIO_16_NGPIO_PER_REG 8
+#define IDIO_16_REG_STRIDE 4
+
+struct idio_16_data {
+ struct regmap *map;
+ unsigned int irq_mask;
+};
+
+static int idio_16_handle_mask_sync(const int index, const unsigned int mask_buf_def,
+ const unsigned int mask_buf, void *const irq_drv_data)
{
- unsigned long flags;
- const unsigned long out_mask = GENMASK(IDIO_16_NOUT - 1, 0);
-
- spin_lock_irqsave(&state->lock, flags);
-
- bitmap_replace(bits, bits, state->out_state, &out_mask, IDIO_16_NOUT);
- if (*mask & GENMASK(23, 16))
- bitmap_set_value8(bits, ioread8(&reg->in0_7), 16);
- if (*mask & GENMASK(31, 24))
- bitmap_set_value8(bits, ioread8(&reg->in8_15), 24);
-
- spin_unlock_irqrestore(&state->lock, flags);
+ struct idio_16_data *const data = irq_drv_data;
+ const unsigned int prev_mask = data->irq_mask;
+ int err;
+ unsigned int val;
+
+ /* exit early if no change since the previous mask */
+ if (mask_buf == prev_mask)
+ return 0;
+
+ /* remember the current mask for the next mask sync */
+ data->irq_mask = mask_buf;
+
+ /* if all previously masked, enable interrupts when unmasking */
+ if (prev_mask == mask_buf_def) {
+ err = regmap_write(data->map, IDIO_16_CLEAR_INTERRUPT, 0x00);
+ if (err)
+ return err;
+ return regmap_read(data->map, IDIO_16_ENABLE_IRQ, &val);
+ }
+
+ /* if all are currently masked, disable interrupts */
+ if (mask_buf == mask_buf_def)
+ return regmap_write(data->map, IDIO_16_DISABLE_IRQ, 0x00);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(idio_16_get_multiple);
-/**
- * idio_16_set - set signal value at signal offset
- * @reg: ACCES IDIO-16 device registers
- * @state: ACCES IDIO-16 device state
- * @offset: offset of signal to set
- * @value: value of signal to set
- *
- * Assigns output @value for the signal at @offset.
- */
-void idio_16_set(struct idio_16 __iomem *const reg,
- struct idio_16_state *const state, const unsigned long offset,
- const unsigned long value)
+static int idio_16_reg_mask_xlate(struct gpio_regmap *const gpio, const unsigned int base,
+ const unsigned int offset, unsigned int *const reg,
+ unsigned int *const mask)
{
- unsigned long flags;
-
- if (offset >= IDIO_16_NOUT)
- return;
+ unsigned int stride;
- spin_lock_irqsave(&state->lock, flags);
+ /* Input lines start at GPIO 16 */
+ if (offset < 16) {
+ stride = offset / IDIO_16_NGPIO_PER_REG;
+ *reg = IDIO_16_OUT_BASE + stride * IDIO_16_REG_STRIDE;
+ } else {
+ stride = (offset - 16) / IDIO_16_NGPIO_PER_REG;
+ *reg = IDIO_16_IN_BASE + stride * IDIO_16_REG_STRIDE;
+ }
- __assign_bit(offset, state->out_state, value);
- if (offset < 8)
- iowrite8(bitmap_get_value8(state->out_state, 0), &reg->out0_7);
- else
- iowrite8(bitmap_get_value8(state->out_state, 8), &reg->out8_15);
+ *mask = BIT(offset % IDIO_16_NGPIO_PER_REG);
- spin_unlock_irqrestore(&state->lock, flags);
+ return 0;
}
-EXPORT_SYMBOL_GPL(idio_16_set);
-
-/**
- * idio_16_set_multiple - set signal values at multiple signal offsets
- * @reg: ACCES IDIO-16 device registers
- * @state: ACCES IDIO-16 device state
- * @mask: mask of signals to set
- * @bits: bitmap of signal output values
- *
- * Assigns output values defined by @bits for the signals defined by @mask.
- */
-void idio_16_set_multiple(struct idio_16 __iomem *const reg,
- struct idio_16_state *const state,
- const unsigned long *const mask,
- const unsigned long *const bits)
-{
- unsigned long flags;
- spin_lock_irqsave(&state->lock, flags);
-
- bitmap_replace(state->out_state, state->out_state, bits, mask,
- IDIO_16_NOUT);
- if (*mask & GENMASK(7, 0))
- iowrite8(bitmap_get_value8(state->out_state, 0), &reg->out0_7);
- if (*mask & GENMASK(15, 8))
- iowrite8(bitmap_get_value8(state->out_state, 8), &reg->out8_15);
-
- spin_unlock_irqrestore(&state->lock, flags);
-}
-EXPORT_SYMBOL_GPL(idio_16_set_multiple);
+static const char *idio_16_names[IDIO_16_NGPIO] = {
+ "OUT0", "OUT1", "OUT2", "OUT3", "OUT4", "OUT5", "OUT6", "OUT7",
+ "OUT8", "OUT9", "OUT10", "OUT11", "OUT12", "OUT13", "OUT14", "OUT15",
+ "IIN0", "IIN1", "IIN2", "IIN3", "IIN4", "IIN5", "IIN6", "IIN7",
+ "IIN8", "IIN9", "IIN10", "IIN11", "IIN12", "IIN13", "IIN14", "IIN15",
+};
/**
- * idio_16_state_init - initialize idio_16_state structure
- * @state: ACCES IDIO-16 device state
+ * devm_idio_16_regmap_register - Register an IDIO-16 GPIO device
+ * @dev: device that is registering this IDIO-16 GPIO device
+ * @config: configuration for idio_16_regmap_config
*
- * Initializes the ACCES IDIO-16 device @state for use in idio-16 library
- * functions.
+ * Registers an IDIO-16 GPIO device. Returns 0 on success and negative error number on failure.
*/
-void idio_16_state_init(struct idio_16_state *const state)
+int devm_idio_16_regmap_register(struct device *const dev,
+ const struct idio_16_regmap_config *const config)
{
- spin_lock_init(&state->lock);
+ struct gpio_regmap_config gpio_config = {};
+ int err;
+ struct idio_16_data *data;
+ struct regmap_irq_chip *chip;
+ struct regmap_irq_chip_data *chip_data;
+
+ if (!config->parent)
+ return -EINVAL;
+
+ if (!config->map)
+ return -EINVAL;
+
+ if (!config->regmap_irqs)
+ return -EINVAL;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->map = config->map;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->name = dev_name(dev);
+ chip->status_base = IDIO_16_INTERRUPT_STATUS;
+ chip->mask_base = IDIO_16_ENABLE_IRQ;
+ chip->ack_base = IDIO_16_CLEAR_INTERRUPT;
+ chip->no_status = config->no_status;
+ chip->num_regs = 1;
+ chip->irqs = config->regmap_irqs;
+ chip->num_irqs = config->num_regmap_irqs;
+ chip->handle_mask_sync = idio_16_handle_mask_sync;
+ chip->irq_drv_data = data;
+
+ /* Disable IRQ to prevent spurious interrupts before we're ready */
+ err = regmap_write(data->map, IDIO_16_DISABLE_IRQ, 0x00);
+ if (err)
+ return err;
+
+ err = devm_regmap_add_irq_chip(dev, data->map, config->irq, 0, 0, chip, &chip_data);
+ if (err)
+ return dev_err_probe(dev, err, "IRQ registration failed\n");
+
+ if (config->filters) {
+ /* Deactivate input filters */
+ err = regmap_write(data->map, IDIO_16_DEACTIVATE_INPUT_FILTERS, 0x00);
+ if (err)
+ return err;
+ }
+
+ gpio_config.parent = config->parent;
+ gpio_config.regmap = data->map;
+ gpio_config.ngpio = IDIO_16_NGPIO;
+ gpio_config.names = idio_16_names;
+ gpio_config.reg_dat_base = GPIO_REGMAP_ADDR(IDIO_16_DAT_BASE);
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(IDIO_16_DAT_BASE);
+ gpio_config.ngpio_per_reg = IDIO_16_NGPIO_PER_REG;
+ gpio_config.reg_stride = IDIO_16_REG_STRIDE;
+ gpio_config.irq_domain = regmap_irq_get_domain(chip_data);
+ gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate;
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
-EXPORT_SYMBOL_GPL(idio_16_state_init);
+EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register);
MODULE_AUTHOR("William Breathitt Gray");
MODULE_DESCRIPTION("ACCES IDIO-16 GPIO Library");
diff --git a/drivers/gpio/gpio-idio-16.h b/drivers/gpio/gpio-idio-16.h
index 928f8251a2bd..93b08ad73065 100644
--- a/drivers/gpio/gpio-idio-16.h
+++ b/drivers/gpio/gpio-idio-16.h
@@ -3,69 +3,30 @@
#ifndef _IDIO_16_H_
#define _IDIO_16_H_
-#include <linux/spinlock.h>
-#include <linux/types.h>
+struct device;
+struct regmap;
+struct regmap_irq;
/**
- * struct idio_16 - IDIO-16 registers structure
- * @out0_7: Read: FET Drive Outputs 0-7
- * Write: FET Drive Outputs 0-7
- * @in0_7: Read: Isolated Inputs 0-7
- * Write: Clear Interrupt
- * @irq_ctl: Read: Enable IRQ
- * Write: Disable IRQ
- * @filter_ctl: Read: Activate Input Filters 0-15
- * Write: Deactivate Input Filters 0-15
- * @out8_15: Read: FET Drive Outputs 8-15
- * Write: FET Drive Outputs 8-15
- * @in8_15: Read: Isolated Inputs 8-15
- * Write: Unused
- * @irq_status: Read: Interrupt status
- * Write: Unused
+ * struct idio_16_regmap_config - Configuration for the IDIO-16 register map
+ * @parent: parent device
+ * @map: regmap for the IDIO-16 device
+ * @regmap_irqs: descriptors for individual IRQs
+ * @num_regmap_irqs: number of IRQ descriptors
+ * @irq: IRQ number for the IDIO-16 device
+ * @no_status: device has no status register
+ * @filters: device has input filters
*/
-struct idio_16 {
- u8 out0_7;
- u8 in0_7;
- u8 irq_ctl;
- u8 filter_ctl;
- u8 out8_15;
- u8 in8_15;
- u8 irq_status;
+struct idio_16_regmap_config {
+ struct device *parent;
+ struct regmap *map;
+ const struct regmap_irq *regmap_irqs;
+ int num_regmap_irqs;
+ unsigned int irq;
+ bool no_status;
+ bool filters;
};
-#define IDIO_16_NOUT 16
-
-/**
- * struct idio_16_state - IDIO-16 state structure
- * @lock: synchronization lock for accessing device state
- * @out_state: output signals state
- */
-struct idio_16_state {
- spinlock_t lock;
- DECLARE_BITMAP(out_state, IDIO_16_NOUT);
-};
-
-/**
- * idio_16_get_direction - get the I/O direction for a signal offset
- * @offset: offset of signal to get direction
- *
- * Returns the signal direction (0=output, 1=input) for the signal at @offset.
- */
-static inline int idio_16_get_direction(const unsigned long offset)
-{
- return (offset >= IDIO_16_NOUT) ? 1 : 0;
-}
-
-int idio_16_get(struct idio_16 __iomem *reg, struct idio_16_state *state,
- unsigned long offset);
-void idio_16_get_multiple(struct idio_16 __iomem *reg,
- struct idio_16_state *state,
- const unsigned long *mask, unsigned long *bits);
-void idio_16_set(struct idio_16 __iomem *reg, struct idio_16_state *state,
- unsigned long offset, unsigned long value);
-void idio_16_set_multiple(struct idio_16 __iomem *reg,
- struct idio_16_state *state,
- const unsigned long *mask, const unsigned long *bits);
-void idio_16_state_init(struct idio_16_state *state);
+int devm_idio_16_regmap_register(struct device *dev, const struct idio_16_regmap_config *config);
#endif /* _IDIO_16_H_ */
diff --git a/drivers/gpio/gpio-imx-scu.c b/drivers/gpio/gpio-imx-scu.c
index e190bde5397d..13baf465aedf 100644
--- a/drivers/gpio/gpio-imx-scu.c
+++ b/drivers/gpio/gpio-imx-scu.c
@@ -6,6 +6,7 @@
* to control the PIN resources on SCU domain.
*/
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
@@ -103,7 +104,7 @@ static int imx_scu_gpio_probe(struct platform_device *pdev)
gc = &priv->chip;
gc->base = -1;
gc->parent = dev;
- gc->ngpio = sizeof(scu_rsrc_arr)/sizeof(unsigned int);
+ gc->ngpio = ARRAY_SIZE(scu_rsrc_arr);
gc->label = dev_name(dev);
gc->get = imx_scu_gpio_get;
gc->set = imx_scu_gpio_set;
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index 1e29de1671d4..dde6cf3a5779 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -302,7 +302,7 @@ static const struct of_device_id ixp4xx_gpio_of_match[] = {
static struct platform_driver ixp4xx_gpio_driver = {
.driver = {
.name = "ixp4xx-gpio",
- .of_match_table = of_match_ptr(ixp4xx_gpio_of_match),
+ .of_match_table = ixp4xx_gpio_of_match,
},
.probe = ixp4xx_gpio_probe,
};
diff --git a/drivers/gpio/gpio-logicvc.c b/drivers/gpio/gpio-logicvc.c
index 992cc958a43f..05d62011f335 100644
--- a/drivers/gpio/gpio-logicvc.c
+++ b/drivers/gpio/gpio-logicvc.c
@@ -8,7 +8,6 @@
#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -139,8 +138,6 @@ static int logicvc_gpio_probe(struct platform_device *pdev)
logicvc->chip.set = logicvc_gpio_set;
logicvc->chip.direction_output = logicvc_gpio_direction_output;
- platform_set_drvdata(pdev, logicvc);
-
return devm_gpiochip_add_data(dev, &logicvc->chip, logicvc);
}
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index 79edd5db49d2..8e58242f5123 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -199,8 +199,6 @@ static int lp3943_gpio_probe(struct platform_device *pdev)
lp3943_gpio->chip = lp3943_gpio_chip;
lp3943_gpio->chip.parent = &pdev->dev;
- platform_set_drvdata(pdev, lp3943_gpio);
-
return devm_gpiochip_add_data(&pdev->dev, &lp3943_gpio->chip,
lp3943_gpio);
}
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index d2b65cfb336e..5ef8af824980 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -525,17 +525,15 @@ static int lpc32xx_gpio_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
static const struct of_device_id lpc32xx_gpio_of_match[] = {
{ .compatible = "nxp,lpc3220-gpio", },
{ },
};
-#endif
static struct platform_driver lpc32xx_gpio_driver = {
.driver = {
.name = "lpc32xx-gpio",
- .of_match_table = of_match_ptr(lpc32xx_gpio_of_match),
+ .of_match_table = lpc32xx_gpio_of_match,
},
.probe = lpc32xx_gpio_probe,
};
diff --git a/drivers/gpio/gpio-max3191x.c b/drivers/gpio/gpio-max3191x.c
index 161c4751c5f7..bbacc714632b 100644
--- a/drivers/gpio/gpio-max3191x.c
+++ b/drivers/gpio/gpio-max3191x.c
@@ -457,7 +457,6 @@ static int __init max3191x_register_driver(struct spi_driver *sdrv)
return spi_register_driver(sdrv);
}
-#ifdef CONFIG_OF
static const struct of_device_id max3191x_of_id[] = {
{ .compatible = "maxim,max31910" },
{ .compatible = "maxim,max31911" },
@@ -468,7 +467,6 @@ static const struct of_device_id max3191x_of_id[] = {
{ }
};
MODULE_DEVICE_TABLE(of, max3191x_of_id);
-#endif
static const struct spi_device_id max3191x_spi_id[] = {
{ "max31910" },
@@ -484,7 +482,7 @@ MODULE_DEVICE_TABLE(spi, max3191x_spi_id);
static struct spi_driver max3191x_driver = {
.driver = {
.name = "max3191x",
- .of_match_table = of_match_ptr(max3191x_of_id),
+ .of_match_table = max3191x_of_id,
},
.probe = max3191x_probe,
.remove = max3191x_remove,
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index fca9ca68e387..49d362907bc7 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -18,8 +18,6 @@
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/platform_data/max732x.h>
-#include <linux/of.h>
-
/*
* Each port of MAX732x (including MAX7319) falls into one of the
@@ -114,7 +112,6 @@ static const struct i2c_device_id max732x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max732x_id);
-#ifdef CONFIG_OF
static const struct of_device_id max732x_of_table[] = {
{ .compatible = "maxim,max7319" },
{ .compatible = "maxim,max7320" },
@@ -128,7 +125,6 @@ static const struct of_device_id max732x_of_table[] = {
{ }
};
MODULE_DEVICE_TABLE(of, max732x_of_table);
-#endif
struct max732x_chip {
struct gpio_chip gpio_chip;
@@ -709,7 +705,7 @@ static int max732x_probe(struct i2c_client *client)
static struct i2c_driver max732x_driver = {
.driver = {
.name = "max732x",
- .of_match_table = of_match_ptr(max732x_of_table),
+ .of_match_table = max732x_of_table,
},
.probe = max732x_probe,
.id_table = max732x_id,
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index c18b60e39a94..8c2a5609161f 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -331,8 +331,6 @@ static int max77620_gpio_probe(struct platform_device *pdev)
girq->init_hw = max77620_gpio_irq_init_hw;
girq->threaded = true;
- platform_set_drvdata(pdev, mgpio);
-
ret = devm_gpiochip_add_data(&pdev->dev, &mgpio->gpio_chip, mgpio);
if (ret < 0) {
dev_err(&pdev->dev, "gpio_init: Failed to add max77620_gpio\n");
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 37c5363e391e..ca7eb5e8bfaa 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -10,11 +10,11 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/clk.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/ioport.h>
-#include <linux/of_device.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
index e30cee108986..7a3e1760fc5b 100644
--- a/drivers/gpio/gpio-mlxbf3.c
+++ b/drivers/gpio/gpio-mlxbf3.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only or BSD-3-Clause
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
/* Copyright (C) 2022 NVIDIA CORPORATION & AFFILIATES */
#include <linux/bitfield.h>
@@ -19,6 +19,8 @@
* gpio[1]: HOST_GPIO32->HOST_GPIO55
*/
#define MLXBF3_GPIO_MAX_PINS_PER_BLOCK 32
+#define MLXBF3_GPIO_MAX_PINS_BLOCK0 32
+#define MLXBF3_GPIO_MAX_PINS_BLOCK1 24
/*
* fw_gpio[x] block registers and their offset
@@ -158,6 +160,26 @@ static const struct irq_chip gpio_mlxbf3_irqchip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
+static int mlxbf3_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+ unsigned int id;
+
+ switch(chip->ngpio) {
+ case MLXBF3_GPIO_MAX_PINS_BLOCK0:
+ id = 0;
+ break;
+ case MLXBF3_GPIO_MAX_PINS_BLOCK1:
+ id = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return gpiochip_add_pin_range(chip, "MLNXBF34:00",
+ chip->base, id * MLXBF3_GPIO_MAX_PINS_PER_BLOCK,
+ chip->ngpio);
+}
+
static int mlxbf3_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -197,6 +219,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
gc->owner = THIS_MODULE;
+ gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
irq = platform_get_irq(pdev, 0);
if (irq >= 0) {
@@ -243,6 +266,7 @@ static struct platform_driver mlxbf3_gpio_driver = {
};
module_platform_driver(mlxbf3_gpio_driver);
+MODULE_SOFTDEP("pre: pinctrl-mlxbf3");
MODULE_DESCRIPTION("NVIDIA BlueField-3 GPIO Driver");
MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index d9dff3dc92ae..74fdf0d87b2c 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -60,6 +60,8 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
#include <linux/of.h>
#include <linux/of_device.h>
+#include "gpiolib.h"
+
static void bgpio_write8(void __iomem *reg, unsigned long data)
{
writeb(data, reg);
@@ -614,10 +616,15 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
gc->parent = dev;
gc->label = dev_name(dev);
gc->base = -1;
- gc->ngpio = gc->bgpio_bits;
gc->request = bgpio_request;
gc->be_bits = !!(flags & BGPIOF_BIG_ENDIAN);
+ ret = gpiochip_get_ngpios(gc, dev);
+ if (ret)
+ gc->ngpio = gc->bgpio_bits;
+ else
+ gc->bgpio_bits = roundup_pow_of_two(round_up(gc->ngpio, 8));
+
ret = bgpio_setup_io(gc, dat, set, clr, flags);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 3b0bfff8c778..b49e3ca64015 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -10,7 +10,7 @@
#include <linux/slab.h>
#include <linux/gpio/legacy-of-mm-gpiochip.h>
#include <linux/io.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/module.h>
#include <asm/mpc52xx.h>
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 5979a36bf754..ebf2f511df59 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -9,12 +9,10 @@
#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/property.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index 036ad2324892..2f448eb23abb 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -6,7 +6,6 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/gpio/driver.h>
#include <linux/module.h>
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 9d0cec4b82a3..4cb455b2bdee 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -17,12 +17,12 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/gpio/driver.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/bug.h>
#define IMX_SCU_WAKEUP_OFF 0
@@ -62,6 +62,7 @@ struct mxc_gpio_port {
struct clk *clk;
int irq;
int irq_high;
+ void (*mx_irq_handler)(struct irq_desc *desc);
struct irq_domain *domain;
struct gpio_chip gc;
struct device *dev;
@@ -382,6 +383,41 @@ static int mxc_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return irq_find_mapping(port->domain, offset);
}
+static int mxc_gpio_request(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret;
+
+ ret = gpiochip_generic_request(chip, offset);
+ if (ret)
+ return ret;
+
+ return pm_runtime_resume_and_get(chip->parent);
+}
+
+static void mxc_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ gpiochip_generic_free(chip, offset);
+ pm_runtime_put(chip->parent);
+}
+
+static void mxc_update_irq_chained_handler(struct mxc_gpio_port *port, bool enable)
+{
+ if (enable)
+ irq_set_chained_handler_and_data(port->irq, port->mx_irq_handler, port);
+ else
+ irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+
+ /* setup handler for GPIO 16 to 31 */
+ if (port->irq_high > 0) {
+ if (enable)
+ irq_set_chained_handler_and_data(port->irq_high,
+ port->mx_irq_handler,
+ port);
+ else
+ irq_set_chained_handler_and_data(port->irq_high, NULL, NULL);
+ }
+}
+
static int mxc_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -416,19 +452,17 @@ static int mxc_gpio_probe(struct platform_device *pdev)
return port->irq;
/* the controller clock is optional */
- port->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ port->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR(port->clk))
return PTR_ERR(port->clk);
- err = clk_prepare_enable(port->clk);
- if (err) {
- dev_err(&pdev->dev, "Unable to enable clock.\n");
- return err;
- }
-
if (of_device_is_compatible(np, "fsl,imx7d-gpio"))
port->power_off = true;
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
/* disable the interrupt and clear the status */
writel(0, port->base + GPIO_IMR);
writel(~0, port->base + GPIO_ISR);
@@ -439,18 +473,12 @@ static int mxc_gpio_probe(struct platform_device *pdev)
* the handler is needed only once, but doing it for every port
* is more robust and easier.
*/
- irq_set_chained_handler(port->irq, mx2_gpio_irq_handler);
- } else {
- /* setup one handler for each entry */
- irq_set_chained_handler_and_data(port->irq,
- mx3_gpio_irq_handler, port);
- if (port->irq_high > 0)
- /* setup handler for GPIO 16 to 31 */
- irq_set_chained_handler_and_data(port->irq_high,
- mx3_gpio_irq_handler,
- port);
- }
+ port->irq_high = -1;
+ port->mx_irq_handler = mx2_gpio_irq_handler;
+ } else
+ port->mx_irq_handler = mx3_gpio_irq_handler;
+ mxc_update_irq_chained_handler(port, true);
err = bgpio_init(&port->gc, &pdev->dev, 4,
port->base + GPIO_PSR,
port->base + GPIO_DR, NULL,
@@ -459,8 +487,8 @@ static int mxc_gpio_probe(struct platform_device *pdev)
if (err)
goto out_bgio;
- port->gc.request = gpiochip_generic_request;
- port->gc.free = gpiochip_generic_free;
+ port->gc.request = mxc_gpio_request;
+ port->gc.free = mxc_gpio_free;
port->gc.to_irq = mxc_gpio_to_irq;
port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
pdev->id * 32;
@@ -482,6 +510,8 @@ static int mxc_gpio_probe(struct platform_device *pdev)
goto out_bgio;
}
+ irq_domain_set_pm_device(port->domain, &pdev->dev);
+
/* gpio-mxc can be a generic irq chip */
err = mxc_gpio_init_gc(port, irq_base);
if (err < 0)
@@ -490,13 +520,15 @@ static int mxc_gpio_probe(struct platform_device *pdev)
list_add_tail(&port->node, &mxc_gpio_ports);
platform_set_drvdata(pdev, port);
+ pm_runtime_put_autosuspend(&pdev->dev);
return 0;
out_irqdomain_remove:
irq_domain_remove(port->domain);
out_bgio:
- clk_disable_unprepare(port->clk);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
return err;
}
@@ -572,7 +604,35 @@ static bool mxc_gpio_set_pad_wakeup(struct mxc_gpio_port *port, bool enable)
return ret;
}
-static int __maybe_unused mxc_gpio_noirq_suspend(struct device *dev)
+static int mxc_gpio_runtime_suspend(struct device *dev)
+{
+ struct mxc_gpio_port *port = dev_get_drvdata(dev);
+
+ mxc_gpio_save_regs(port);
+ clk_disable_unprepare(port->clk);
+ mxc_update_irq_chained_handler(port, false);
+
+ return 0;
+}
+
+static int mxc_gpio_runtime_resume(struct device *dev)
+{
+ struct mxc_gpio_port *port = dev_get_drvdata(dev);
+ int ret;
+
+ mxc_update_irq_chained_handler(port, true);
+ ret = clk_prepare_enable(port->clk);
+ if (ret) {
+ mxc_update_irq_chained_handler(port, false);
+ return ret;
+ }
+
+ mxc_gpio_restore_regs(port);
+
+ return 0;
+}
+
+static int mxc_gpio_noirq_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mxc_gpio_port *port = platform_get_drvdata(pdev);
@@ -583,7 +643,7 @@ static int __maybe_unused mxc_gpio_noirq_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mxc_gpio_noirq_resume(struct device *dev)
+static int mxc_gpio_noirq_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct mxc_gpio_port *port = platform_get_drvdata(pdev);
@@ -596,15 +656,20 @@ static int __maybe_unused mxc_gpio_noirq_resume(struct device *dev)
}
static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mxc_gpio_noirq_suspend, mxc_gpio_noirq_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mxc_gpio_noirq_suspend, mxc_gpio_noirq_resume)
+ RUNTIME_PM_OPS(mxc_gpio_runtime_suspend, mxc_gpio_runtime_resume, NULL)
};
static int mxc_gpio_syscore_suspend(void)
{
struct mxc_gpio_port *port;
+ int ret;
/* walk through all ports */
list_for_each_entry(port, &mxc_gpio_ports, node) {
+ ret = clk_prepare_enable(port->clk);
+ if (ret)
+ return ret;
mxc_gpio_save_regs(port);
clk_disable_unprepare(port->clk);
}
@@ -625,6 +690,7 @@ static void mxc_gpio_syscore_resume(void)
return;
}
mxc_gpio_restore_regs(port);
+ clk_disable_unprepare(port->clk);
}
}
@@ -638,7 +704,7 @@ static struct platform_driver mxc_gpio_driver = {
.name = "gpio-mxc",
.of_match_table = mxc_gpio_dt_ids,
.suppress_bind_attrs = true,
- .pm = &mxc_gpio_dev_pm_ops,
+ .pm = pm_ptr(&mxc_gpio_dev_pm_ops),
},
.probe = mxc_gpio_probe,
};
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 390e619a2831..024ad077e98d 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -14,7 +14,6 @@
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gpio/driver.h>
@@ -272,7 +271,7 @@ static int mxs_gpio_probe(struct platform_device *pdev)
port->id = of_alias_get_id(np, "gpio");
if (port->id < 0)
return port->id;
- port->devid = (enum mxs_gpio_id)of_device_get_match_data(&pdev->dev);
+ port->devid = (uintptr_t)of_device_get_match_data(&pdev->dev);
port->dev = &pdev->dev;
port->irq = platform_get_irq(pdev, 0);
if (port->irq < 0)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index a08be5bf6808..a927680c66f8 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -22,7 +22,6 @@
#include <linux/pm_runtime.h>
#include <linux/pm.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/gpio/driver.h>
#include <linux/bitops.h>
#include <linux/platform_data/gpio-omap.h>
@@ -1413,11 +1412,8 @@ static int omap_gpio_probe(struct platform_device *pdev)
bank->dev = dev;
bank->irq = platform_get_irq(pdev, 0);
- if (bank->irq <= 0) {
- if (!bank->irq)
- bank->irq = -ENXIO;
- return dev_err_probe(dev, bank->irq, "can't get irq resource\n");
- }
+ if (bank->irq < 0)
+ return bank->irq;
bank->chip.parent = dev;
bank->chip.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index bac10c2faf56..28dba7048509 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/mfd/palmas.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
struct palmas_gpio {
@@ -184,7 +183,6 @@ static int palmas_gpio_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, palmas_gpio);
return ret;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index a806a3c1b801..bdd50a78e414 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -108,6 +108,7 @@ static const struct i2c_device_id pca953x_id[] = {
{ "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
{ "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
+ { "tca9538", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca9539", 16 | PCA953X_TYPE | PCA_INT, },
{ "tca9554", 8 | PCA953X_TYPE | PCA_INT, },
{ "xra1202", 8 | PCA953X_TYPE },
@@ -1051,7 +1052,6 @@ out:
static int pca953x_probe(struct i2c_client *client)
{
- const struct i2c_device_id *i2c_id = i2c_client_get_device_id(client);
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
int irq_base = 0;
@@ -1090,6 +1090,9 @@ static int pca953x_probe(struct i2c_client *client)
}
chip->client = client;
+ chip->driver_data = (uintptr_t)i2c_get_match_data(client);
+ if (!chip->driver_data)
+ return -ENODEV;
reg = devm_regulator_get(&client->dev, "vcc");
if (IS_ERR(reg))
@@ -1102,20 +1105,6 @@ static int pca953x_probe(struct i2c_client *client)
}
chip->regulator = reg;
- if (i2c_id) {
- chip->driver_data = i2c_id->driver_data;
- } else {
- const void *match;
-
- match = device_get_match_data(&client->dev);
- if (!match) {
- ret = -ENODEV;
- goto err_exit;
- }
-
- chip->driver_data = (uintptr_t)match;
- }
-
i2c_set_clientdata(client, chip);
pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
@@ -1354,6 +1343,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "ti,tca6408", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
+ { .compatible = "ti,tca9538", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "ti,tca9539", .data = OF_953X(16, PCA_INT), },
{ .compatible = "onnn,cat9554", .data = OF_953X( 8, PCA_INT), },
diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
index d8db80ef1293..d37ba4049368 100644
--- a/drivers/gpio/gpio-pca9570.c
+++ b/drivers/gpio/gpio-pca9570.c
@@ -30,7 +30,7 @@ struct pca9570_chip_data {
/**
* struct pca9570 - GPIO driver data
* @chip: GPIO controller chip
- * @p_data: GPIO controller platform data
+ * @chip_data: GPIO controller platform data
* @lock: Protects write sequences
* @out: Buffer for device register
*/
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index c4c785548408..53b69abe6787 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -36,19 +36,19 @@ static const struct i2c_device_id pcf857x_id[] = {
MODULE_DEVICE_TABLE(i2c, pcf857x_id);
static const struct of_device_id pcf857x_of_table[] = {
- { .compatible = "nxp,pcf8574" },
- { .compatible = "nxp,pcf8574a" },
- { .compatible = "nxp,pca8574" },
- { .compatible = "nxp,pca9670" },
- { .compatible = "nxp,pca9672" },
- { .compatible = "nxp,pca9674" },
- { .compatible = "nxp,pcf8575" },
- { .compatible = "nxp,pca8575" },
- { .compatible = "nxp,pca9671" },
- { .compatible = "nxp,pca9673" },
- { .compatible = "nxp,pca9675" },
- { .compatible = "maxim,max7328" },
- { .compatible = "maxim,max7329" },
+ { .compatible = "nxp,pcf8574", (void *)8 },
+ { .compatible = "nxp,pcf8574a", (void *)8 },
+ { .compatible = "nxp,pca8574", (void *)8 },
+ { .compatible = "nxp,pca9670", (void *)8 },
+ { .compatible = "nxp,pca9672", (void *)8 },
+ { .compatible = "nxp,pca9674", (void *)8 },
+ { .compatible = "nxp,pcf8575", (void *)16 },
+ { .compatible = "nxp,pca8575", (void *)16 },
+ { .compatible = "nxp,pca9671", (void *)16 },
+ { .compatible = "nxp,pca9673", (void *)16 },
+ { .compatible = "nxp,pca9675", (void *)16 },
+ { .compatible = "maxim,max7328", (void *)8 },
+ { .compatible = "maxim,max7329", (void *)8 },
{ }
};
MODULE_DEVICE_TABLE(of, pcf857x_of_table);
@@ -272,7 +272,6 @@ static const struct irq_chip pcf857x_irq_chip = {
static int pcf857x_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct pcf857x *gpio;
unsigned int n_latch = 0;
int status;
@@ -296,7 +295,7 @@ static int pcf857x_probe(struct i2c_client *client)
gpio->chip.set_multiple = pcf857x_set_multiple;
gpio->chip.direction_input = pcf857x_input;
gpio->chip.direction_output = pcf857x_output;
- gpio->chip.ngpio = id->driver_data;
+ gpio->chip.ngpio = (uintptr_t)i2c_get_match_data(client);
/* NOTE: the OnSemi jlc1562b is also largely compatible with
* these parts, notably for output. It has a low-resolution
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 6726c32e31e6..44c0a21b1d1d 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -5,214 +5,75 @@
*/
#include <linux/bits.h>
#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/gpio/driver.h>
-#include <linux/interrupt.h>
-#include <linux/irqdesc.h>
+#include <linux/err.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/spinlock.h>
+#include <linux/regmap.h>
#include <linux/types.h>
#include "gpio-idio-16.h"
-/**
- * struct idio_16_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @lock: synchronization lock to prevent I/O race conditions
- * @reg: I/O address offset for the GPIO device registers
- * @state: ACCES IDIO-16 device state
- * @irq_mask: I/O bits affected by interrupts
- */
-struct idio_16_gpio {
- struct gpio_chip chip;
- raw_spinlock_t lock;
- struct idio_16 __iomem *reg;
- struct idio_16_state state;
- unsigned long irq_mask;
+static const struct regmap_range idio_16_wr_ranges[] = {
+ regmap_reg_range(0x0, 0x2), regmap_reg_range(0x3, 0x4),
};
-
-static int idio_16_gpio_get_direction(struct gpio_chip *chip,
- unsigned int offset)
-{
- if (idio_16_get_direction(offset))
- return GPIO_LINE_DIRECTION_IN;
-
- return GPIO_LINE_DIRECTION_OUT;
-}
-
-static int idio_16_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- return 0;
-}
-
-static int idio_16_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- chip->set(chip, offset, value);
- return 0;
-}
-
-static int idio_16_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
-
- return idio_16_get(idio16gpio->reg, &idio16gpio->state, offset);
-}
-
-static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
-{
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
-
- idio_16_get_multiple(idio16gpio->reg, &idio16gpio->state, mask, bits);
- return 0;
-}
-
-static void idio_16_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
-
- idio_16_set(idio16gpio->reg, &idio16gpio->state, offset, value);
-}
-
-static void idio_16_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
-{
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
-
- idio_16_set_multiple(idio16gpio->reg, &idio16gpio->state, mask, bits);
-}
-
-static void idio_16_irq_ack(struct irq_data *data)
-{
-}
-
-static void idio_16_irq_mask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned long mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
-
- idio16gpio->irq_mask &= ~mask;
-
- if (!idio16gpio->irq_mask) {
- raw_spin_lock_irqsave(&idio16gpio->lock, flags);
-
- iowrite8(0, &idio16gpio->reg->irq_ctl);
-
- raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
- }
-
- gpiochip_disable_irq(chip, irqd_to_hwirq(data));
-}
-
-static void idio_16_irq_unmask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned long mask = BIT(irqd_to_hwirq(data));
- const unsigned long prev_irq_mask = idio16gpio->irq_mask;
- unsigned long flags;
-
- gpiochip_enable_irq(chip, irqd_to_hwirq(data));
-
- idio16gpio->irq_mask |= mask;
-
- if (!prev_irq_mask) {
- raw_spin_lock_irqsave(&idio16gpio->lock, flags);
-
- ioread8(&idio16gpio->reg->irq_ctl);
-
- raw_spin_unlock_irqrestore(&idio16gpio->lock, flags);
- }
-}
-
-static int idio_16_irq_set_type(struct irq_data *data, unsigned int flow_type)
-{
- /* The only valid irq types are none and both-edges */
- if (flow_type != IRQ_TYPE_NONE &&
- (flow_type & IRQ_TYPE_EDGE_BOTH) != IRQ_TYPE_EDGE_BOTH)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct irq_chip idio_16_irqchip = {
- .name = "pci-idio-16",
- .irq_ack = idio_16_irq_ack,
- .irq_mask = idio_16_irq_mask,
- .irq_unmask = idio_16_irq_unmask,
- .irq_set_type = idio_16_irq_set_type,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
+static const struct regmap_range idio_16_rd_ranges[] = {
+ regmap_reg_range(0x1, 0x2), regmap_reg_range(0x5, 0x6),
};
-
-static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
-{
- struct idio_16_gpio *const idio16gpio = dev_id;
- unsigned int irq_status;
- struct gpio_chip *const chip = &idio16gpio->chip;
- int gpio;
-
- raw_spin_lock(&idio16gpio->lock);
-
- irq_status = ioread8(&idio16gpio->reg->irq_status);
-
- raw_spin_unlock(&idio16gpio->lock);
-
- /* Make sure our device generated IRQ */
- if (!(irq_status & 0x3) || !(irq_status & 0x4))
- return IRQ_NONE;
-
- for_each_set_bit(gpio, &idio16gpio->irq_mask, chip->ngpio)
- generic_handle_domain_irq(chip->irq.domain, gpio);
-
- raw_spin_lock(&idio16gpio->lock);
-
- /* Clear interrupt */
- iowrite8(0, &idio16gpio->reg->in0_7);
-
- raw_spin_unlock(&idio16gpio->lock);
-
- return IRQ_HANDLED;
-}
-
-#define IDIO_16_NGPIO 32
-static const char *idio_16_names[IDIO_16_NGPIO] = {
- "OUT0", "OUT1", "OUT2", "OUT3", "OUT4", "OUT5", "OUT6", "OUT7",
- "OUT8", "OUT9", "OUT10", "OUT11", "OUT12", "OUT13", "OUT14", "OUT15",
- "IIN0", "IIN1", "IIN2", "IIN3", "IIN4", "IIN5", "IIN6", "IIN7",
- "IIN8", "IIN9", "IIN10", "IIN11", "IIN12", "IIN13", "IIN14", "IIN15"
+static const struct regmap_range idio_16_precious_ranges[] = {
+ regmap_reg_range(0x2, 0x2),
+};
+static const struct regmap_access_table idio_16_wr_table = {
+ .yes_ranges = idio_16_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idio_16_wr_ranges),
+};
+static const struct regmap_access_table idio_16_rd_table = {
+ .yes_ranges = idio_16_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idio_16_rd_ranges),
+};
+static const struct regmap_access_table idio_16_precious_table = {
+ .yes_ranges = idio_16_precious_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idio_16_precious_ranges),
+};
+static const struct regmap_config idio_16_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .io_port = true,
+ .wr_table = &idio_16_wr_table,
+ .rd_table = &idio_16_rd_table,
+ .volatile_table = &idio_16_rd_table,
+ .precious_table = &idio_16_precious_table,
+ .cache_type = REGCACHE_FLAT,
+ .use_raw_spinlock = true,
};
-static int idio_16_irq_init_hw(struct gpio_chip *gc)
-{
- struct idio_16_gpio *const idio16gpio = gpiochip_get_data(gc);
-
- /* Disable IRQ by default and clear any pending interrupt */
- iowrite8(0, &idio16gpio->reg->irq_ctl);
- iowrite8(0, &idio16gpio->reg->in0_7);
+/* Only input lines (GPIO 16-31) support interrupts */
+#define IDIO_16_REGMAP_IRQ(_id) \
+ [16 + _id] = { \
+ .mask = BIT(2), \
+ .type = { .types_supported = IRQ_TYPE_EDGE_BOTH }, \
+ }
- return 0;
-}
+static const struct regmap_irq idio_16_regmap_irqs[] = {
+ IDIO_16_REGMAP_IRQ(0), IDIO_16_REGMAP_IRQ(1), IDIO_16_REGMAP_IRQ(2), /* 0-2 */
+ IDIO_16_REGMAP_IRQ(3), IDIO_16_REGMAP_IRQ(4), IDIO_16_REGMAP_IRQ(5), /* 3-5 */
+ IDIO_16_REGMAP_IRQ(6), IDIO_16_REGMAP_IRQ(7), IDIO_16_REGMAP_IRQ(8), /* 6-8 */
+ IDIO_16_REGMAP_IRQ(9), IDIO_16_REGMAP_IRQ(10), IDIO_16_REGMAP_IRQ(11), /* 9-11 */
+ IDIO_16_REGMAP_IRQ(12), IDIO_16_REGMAP_IRQ(13), IDIO_16_REGMAP_IRQ(14), /* 12-14 */
+ IDIO_16_REGMAP_IRQ(15), /* 15 */
+};
static int idio_16_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *const dev = &pdev->dev;
- struct idio_16_gpio *idio16gpio;
int err;
const size_t pci_bar_index = 2;
const char *const name = pci_name(pdev);
- struct gpio_irq_chip *girq;
-
- idio16gpio = devm_kzalloc(dev, sizeof(*idio16gpio), GFP_KERNEL);
- if (!idio16gpio)
- return -ENOMEM;
+ struct idio_16_regmap_config config = {};
+ void __iomem *regs;
+ struct regmap *map;
err = pcim_enable_device(pdev);
if (err) {
@@ -226,53 +87,20 @@ static int idio_16_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- idio16gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
+ regs = pcim_iomap_table(pdev)[pci_bar_index];
- /* Deactivate input filters */
- iowrite8(0, &idio16gpio->reg->filter_ctl);
+ map = devm_regmap_init_mmio(dev, regs, &idio_16_regmap_config);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map), "Unable to initialize register map\n");
- idio16gpio->chip.label = name;
- idio16gpio->chip.parent = dev;
- idio16gpio->chip.owner = THIS_MODULE;
- idio16gpio->chip.base = -1;
- idio16gpio->chip.ngpio = IDIO_16_NGPIO;
- idio16gpio->chip.names = idio_16_names;
- idio16gpio->chip.get_direction = idio_16_gpio_get_direction;
- idio16gpio->chip.direction_input = idio_16_gpio_direction_input;
- idio16gpio->chip.direction_output = idio_16_gpio_direction_output;
- idio16gpio->chip.get = idio_16_gpio_get;
- idio16gpio->chip.get_multiple = idio_16_gpio_get_multiple;
- idio16gpio->chip.set = idio_16_gpio_set;
- idio16gpio->chip.set_multiple = idio_16_gpio_set_multiple;
-
- idio_16_state_init(&idio16gpio->state);
-
- girq = &idio16gpio->chip.irq;
- gpio_irq_chip_set_chip(girq, &idio_16_irqchip);
- /* This will let us handle the parent IRQ in the driver */
- girq->parent_handler = NULL;
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_edge_irq;
- girq->init_hw = idio_16_irq_init_hw;
-
- raw_spin_lock_init(&idio16gpio->lock);
-
- err = devm_gpiochip_add_data(dev, &idio16gpio->chip, idio16gpio);
- if (err) {
- dev_err(dev, "GPIO registering failed (%d)\n", err);
- return err;
- }
-
- err = devm_request_irq(dev, pdev->irq, idio_16_irq_handler, IRQF_SHARED,
- name, idio16gpio);
- if (err) {
- dev_err(dev, "IRQ handler registering failed (%d)\n", err);
- return err;
- }
+ config.parent = dev;
+ config.map = map;
+ config.regmap_irqs = idio_16_regmap_irqs;
+ config.num_regmap_irqs = ARRAY_SIZE(idio_16_regmap_irqs);
+ config.irq = pdev->irq;
+ config.filters = true;
- return 0;
+ return devm_idio_16_regmap_register(dev, &config);
}
static const struct pci_device_id idio_16_pci_dev_id[] = {
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
index 463c0613abb9..2efd1b1a0805 100644
--- a/drivers/gpio/gpio-pcie-idio-24.c
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -6,16 +6,15 @@
* This driver supports the following ACCES devices: PCIe-IDIO-24,
* PCIe-IDI-24, PCIe-IDO-24, and PCIe-IDIO-12.
*/
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/gpio/driver.h>
-#include <linux/interrupt.h>
-#include <linux/irqdesc.h>
+#include <linux/err.h>
+#include <linux/gpio/regmap.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/regmap.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -59,422 +58,224 @@
#define PLX_PEX8311_PCI_LCS_INTCSR 0x68
#define INTCSR_INTERNAL_PCI_WIRE BIT(8)
#define INTCSR_LOCAL_INPUT BIT(11)
+#define IDIO_24_ENABLE_IRQ (INTCSR_INTERNAL_PCI_WIRE | INTCSR_LOCAL_INPUT)
+
+#define IDIO_24_OUT_BASE 0x0
+#define IDIO_24_TTLCMOS_OUT_REG 0x3
+#define IDIO_24_IN_BASE 0x4
+#define IDIO_24_TTLCMOS_IN_REG 0x7
+#define IDIO_24_COS_STATUS_BASE 0x8
+#define IDIO_24_CONTROL_REG 0xC
+#define IDIO_24_COS_ENABLE 0xE
+#define IDIO_24_SOFT_RESET 0xF
+
+#define CONTROL_REG_OUT_MODE BIT(1)
+
+#define COS_ENABLE_RISING BIT(1)
+#define COS_ENABLE_FALLING BIT(4)
+#define COS_ENABLE_BOTH (COS_ENABLE_RISING | COS_ENABLE_FALLING)
+
+static const struct regmap_config pex8311_intcsr_regmap_config = {
+ .name = "pex8311_intcsr",
+ .reg_bits = 32,
+ .reg_stride = 1,
+ .reg_base = PLX_PEX8311_PCI_LCS_INTCSR,
+ .val_bits = 32,
+ .io_port = true,
+};
-/**
- * struct idio_24_gpio_reg - GPIO device registers structure
- * @out0_7: Read: FET Outputs 0-7
- * Write: FET Outputs 0-7
- * @out8_15: Read: FET Outputs 8-15
- * Write: FET Outputs 8-15
- * @out16_23: Read: FET Outputs 16-23
- * Write: FET Outputs 16-23
- * @ttl_out0_7: Read: TTL/CMOS Outputs 0-7
- * Write: TTL/CMOS Outputs 0-7
- * @in0_7: Read: Isolated Inputs 0-7
- * Write: Reserved
- * @in8_15: Read: Isolated Inputs 8-15
- * Write: Reserved
- * @in16_23: Read: Isolated Inputs 16-23
- * Write: Reserved
- * @ttl_in0_7: Read: TTL/CMOS Inputs 0-7
- * Write: Reserved
- * @cos0_7: Read: COS Status Inputs 0-7
- * Write: COS Clear Inputs 0-7
- * @cos8_15: Read: COS Status Inputs 8-15
- * Write: COS Clear Inputs 8-15
- * @cos16_23: Read: COS Status Inputs 16-23
- * Write: COS Clear Inputs 16-23
- * @cos_ttl0_7: Read: COS Status TTL/CMOS 0-7
- * Write: COS Clear TTL/CMOS 0-7
- * @ctl: Read: Control Register
- * Write: Control Register
- * @reserved: Read: Reserved
- * Write: Reserved
- * @cos_enable: Read: COS Enable
- * Write: COS Enable
- * @soft_reset: Read: IRQ Output Pin Status
- * Write: Software Board Reset
- */
-struct idio_24_gpio_reg {
- u8 out0_7;
- u8 out8_15;
- u8 out16_23;
- u8 ttl_out0_7;
- u8 in0_7;
- u8 in8_15;
- u8 in16_23;
- u8 ttl_in0_7;
- u8 cos0_7;
- u8 cos8_15;
- u8 cos16_23;
- u8 cos_ttl0_7;
- u8 ctl;
- u8 reserved;
- u8 cos_enable;
- u8 soft_reset;
+static const struct regmap_range idio_24_wr_ranges[] = {
+ regmap_reg_range(0x0, 0x3), regmap_reg_range(0x8, 0xC),
+ regmap_reg_range(0xE, 0xF),
+};
+static const struct regmap_range idio_24_rd_ranges[] = {
+ regmap_reg_range(0x0, 0xC), regmap_reg_range(0xE, 0xF),
+};
+static const struct regmap_range idio_24_volatile_ranges[] = {
+ regmap_reg_range(0x4, 0xB), regmap_reg_range(0xF, 0xF),
+};
+static const struct regmap_access_table idio_24_wr_table = {
+ .yes_ranges = idio_24_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idio_24_wr_ranges),
+};
+static const struct regmap_access_table idio_24_rd_table = {
+ .yes_ranges = idio_24_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idio_24_rd_ranges),
+};
+static const struct regmap_access_table idio_24_volatile_table = {
+ .yes_ranges = idio_24_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idio_24_volatile_ranges),
+};
+
+static const struct regmap_config idio_24_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .io_port = true,
+ .wr_table = &idio_24_wr_table,
+ .rd_table = &idio_24_rd_table,
+ .volatile_table = &idio_24_volatile_table,
+ .cache_type = REGCACHE_FLAT,
+ .use_raw_spinlock = true,
+};
+
+#define IDIO_24_NGPIO_PER_REG 8
+#define IDIO_24_REGMAP_IRQ(_id) \
+ [24 + _id] = { \
+ .reg_offset = (_id) / IDIO_24_NGPIO_PER_REG, \
+ .mask = BIT((_id) % IDIO_24_NGPIO_PER_REG), \
+ .type = { .types_supported = IRQ_TYPE_EDGE_BOTH }, \
+ }
+#define IDIO_24_IIN_IRQ(_id) IDIO_24_REGMAP_IRQ(_id)
+#define IDIO_24_TTL_IRQ(_id) IDIO_24_REGMAP_IRQ(24 + _id)
+
+static const struct regmap_irq idio_24_regmap_irqs[] = {
+ IDIO_24_IIN_IRQ(0), IDIO_24_IIN_IRQ(1), IDIO_24_IIN_IRQ(2), /* IIN 0-2 */
+ IDIO_24_IIN_IRQ(3), IDIO_24_IIN_IRQ(4), IDIO_24_IIN_IRQ(5), /* IIN 3-5 */
+ IDIO_24_IIN_IRQ(6), IDIO_24_IIN_IRQ(7), IDIO_24_IIN_IRQ(8), /* IIN 6-8 */
+ IDIO_24_IIN_IRQ(9), IDIO_24_IIN_IRQ(10), IDIO_24_IIN_IRQ(11), /* IIN 9-11 */
+ IDIO_24_IIN_IRQ(12), IDIO_24_IIN_IRQ(13), IDIO_24_IIN_IRQ(14), /* IIN 12-14 */
+ IDIO_24_IIN_IRQ(15), IDIO_24_IIN_IRQ(16), IDIO_24_IIN_IRQ(17), /* IIN 15-17 */
+ IDIO_24_IIN_IRQ(18), IDIO_24_IIN_IRQ(19), IDIO_24_IIN_IRQ(20), /* IIN 18-20 */
+ IDIO_24_IIN_IRQ(21), IDIO_24_IIN_IRQ(22), IDIO_24_IIN_IRQ(23), /* IIN 21-23 */
+ IDIO_24_TTL_IRQ(0), IDIO_24_TTL_IRQ(1), IDIO_24_TTL_IRQ(2), /* TTL 0-2 */
+ IDIO_24_TTL_IRQ(3), IDIO_24_TTL_IRQ(4), IDIO_24_TTL_IRQ(5), /* TTL 3-5 */
+ IDIO_24_TTL_IRQ(6), IDIO_24_TTL_IRQ(7), /* TTL 6-7 */
};
/**
* struct idio_24_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
+ * @map: regmap for the device
* @lock: synchronization lock to prevent I/O race conditions
- * @reg: I/O address offset for the GPIO device registers
- * @irq_mask: I/O bits affected by interrupts
+ * @irq_type: type configuration for IRQs
*/
struct idio_24_gpio {
- struct gpio_chip chip;
+ struct regmap *map;
raw_spinlock_t lock;
- __u8 __iomem *plx;
- struct idio_24_gpio_reg __iomem *reg;
- unsigned long irq_mask;
+ u8 irq_type;
};
-static int idio_24_gpio_get_direction(struct gpio_chip *chip,
- unsigned int offset)
-{
- struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
- const unsigned long out_mode_mask = BIT(1);
-
- /* FET Outputs */
- if (offset < 24)
- return GPIO_LINE_DIRECTION_OUT;
-
- /* Isolated Inputs */
- if (offset < 48)
- return GPIO_LINE_DIRECTION_IN;
-
- /* TTL/CMOS I/O */
- /* OUT MODE = 1 when TTL/CMOS Output Mode is set */
- if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
- return GPIO_LINE_DIRECTION_OUT;
-
- return GPIO_LINE_DIRECTION_IN;
-}
-
-static int idio_24_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
- unsigned long flags;
- unsigned int ctl_state;
- const unsigned long out_mode_mask = BIT(1);
-
- /* TTL/CMOS I/O */
- if (offset > 47) {
- raw_spin_lock_irqsave(&idio24gpio->lock, flags);
-
- /* Clear TTL/CMOS Output Mode */
- ctl_state = ioread8(&idio24gpio->reg->ctl) & ~out_mode_mask;
- iowrite8(ctl_state, &idio24gpio->reg->ctl);
-
- raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
- }
-
- return 0;
-}
-
-static int idio_24_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
- unsigned long flags;
- unsigned int ctl_state;
- const unsigned long out_mode_mask = BIT(1);
-
- /* TTL/CMOS I/O */
- if (offset > 47) {
- raw_spin_lock_irqsave(&idio24gpio->lock, flags);
-
- /* Set TTL/CMOS Output Mode */
- ctl_state = ioread8(&idio24gpio->reg->ctl) | out_mode_mask;
- iowrite8(ctl_state, &idio24gpio->reg->ctl);
-
- raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
- }
-
- chip->set(chip, offset, value);
- return 0;
-}
-
-static int idio_24_gpio_get(struct gpio_chip *chip, unsigned int offset)
+static int idio_24_handle_mask_sync(const int index, const unsigned int mask_buf_def,
+ const unsigned int mask_buf, void *const irq_drv_data)
{
- struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
- const unsigned long offset_mask = BIT(offset % 8);
- const unsigned long out_mode_mask = BIT(1);
-
- /* FET Outputs */
- if (offset < 8)
- return !!(ioread8(&idio24gpio->reg->out0_7) & offset_mask);
+ const unsigned int type_mask = COS_ENABLE_BOTH << index;
+ struct idio_24_gpio *const idio24gpio = irq_drv_data;
+ u8 type;
+ int ret;
- if (offset < 16)
- return !!(ioread8(&idio24gpio->reg->out8_15) & offset_mask);
-
- if (offset < 24)
- return !!(ioread8(&idio24gpio->reg->out16_23) & offset_mask);
-
- /* Isolated Inputs */
- if (offset < 32)
- return !!(ioread8(&idio24gpio->reg->in0_7) & offset_mask);
-
- if (offset < 40)
- return !!(ioread8(&idio24gpio->reg->in8_15) & offset_mask);
-
- if (offset < 48)
- return !!(ioread8(&idio24gpio->reg->in16_23) & offset_mask);
+ raw_spin_lock(&idio24gpio->lock);
- /* TTL/CMOS Outputs */
- if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
- return !!(ioread8(&idio24gpio->reg->ttl_out0_7) & offset_mask);
+ /* if all are masked, then disable interrupts, else set to type */
+ type = (mask_buf == mask_buf_def) ? ~type_mask : idio24gpio->irq_type;
- /* TTL/CMOS Inputs */
- return !!(ioread8(&idio24gpio->reg->ttl_in0_7) & offset_mask);
-}
+ ret = regmap_update_bits(idio24gpio->map, IDIO_24_COS_ENABLE, type_mask, type);
-static int idio_24_gpio_get_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
-{
- struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- void __iomem *ports[] = {
- &idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15,
- &idio24gpio->reg->out16_23, &idio24gpio->reg->in0_7,
- &idio24gpio->reg->in8_15, &idio24gpio->reg->in16_23,
- };
- size_t index;
- unsigned long port_state;
- const unsigned long out_mode_mask = BIT(1);
-
- /* clear bits array to a clean slate */
- bitmap_zero(bits, chip->ngpio);
-
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- index = offset / 8;
-
- /* read bits from current gpio port (port 6 is TTL GPIO) */
- if (index < 6)
- port_state = ioread8(ports[index]);
- else if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
- port_state = ioread8(&idio24gpio->reg->ttl_out0_7);
- else
- port_state = ioread8(&idio24gpio->reg->ttl_in0_7);
-
- port_state &= gpio_mask;
-
- bitmap_set_value8(bits, port_state, offset);
- }
+ raw_spin_unlock(&idio24gpio->lock);
- return 0;
+ return ret;
}
-static void idio_24_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int idio_24_set_type_config(unsigned int **const buf, const unsigned int type,
+ const struct regmap_irq *const irq_data, const int idx,
+ void *const irq_drv_data)
{
- struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
- const unsigned long out_mode_mask = BIT(1);
- void __iomem *base;
- const unsigned int mask = BIT(offset % 8);
- unsigned long flags;
- unsigned int out_state;
-
- /* Isolated Inputs */
- if (offset > 23 && offset < 48)
- return;
-
- /* TTL/CMOS Inputs */
- if (offset > 47 && !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask))
- return;
-
- /* TTL/CMOS Outputs */
- if (offset > 47)
- base = &idio24gpio->reg->ttl_out0_7;
- /* FET Outputs */
- else if (offset > 15)
- base = &idio24gpio->reg->out16_23;
- else if (offset > 7)
- base = &idio24gpio->reg->out8_15;
- else
- base = &idio24gpio->reg->out0_7;
-
- raw_spin_lock_irqsave(&idio24gpio->lock, flags);
-
- if (value)
- out_state = ioread8(base) | mask;
- else
- out_state = ioread8(base) & ~mask;
-
- iowrite8(out_state, base);
-
- raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
-}
-
-static void idio_24_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
-{
- struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- void __iomem *ports[] = {
- &idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15,
- &idio24gpio->reg->out16_23
- };
- size_t index;
- unsigned long bitmask;
- unsigned long flags;
- unsigned long out_state;
- const unsigned long out_mode_mask = BIT(1);
-
- for_each_set_clump8(offset, gpio_mask, mask, ARRAY_SIZE(ports) * 8) {
- index = offset / 8;
-
- bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
-
- raw_spin_lock_irqsave(&idio24gpio->lock, flags);
-
- /* read bits from current gpio port (port 6 is TTL GPIO) */
- if (index < 6) {
- out_state = ioread8(ports[index]);
- } else if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask) {
- out_state = ioread8(&idio24gpio->reg->ttl_out0_7);
- } else {
- /* skip TTL GPIO if set for input */
- raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
- continue;
- }
-
- /* set requested bit states */
- out_state &= ~gpio_mask;
- out_state |= bitmask;
-
- /* write bits for current gpio port (port 6 is TTL GPIO) */
- if (index < 6)
- iowrite8(out_state, ports[index]);
- else
- iowrite8(out_state, &idio24gpio->reg->ttl_out0_7);
-
- raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+ const unsigned int offset = irq_data->reg_offset;
+ const unsigned int rising = COS_ENABLE_RISING << offset;
+ const unsigned int falling = COS_ENABLE_FALLING << offset;
+ const unsigned int mask = COS_ENABLE_BOTH << offset;
+ struct idio_24_gpio *const idio24gpio = irq_drv_data;
+ unsigned int new;
+ unsigned int cos_enable;
+ int ret;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ new = rising;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ new = falling;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ new = mask;
+ break;
+ default:
+ return -EINVAL;
}
-}
-
-static void idio_24_irq_ack(struct irq_data *data)
-{
-}
-static void idio_24_irq_mask(struct irq_data *data)
-{
- struct gpio_chip *const chip = irq_data_get_irq_chip_data(data);
- struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
- unsigned long flags;
- const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
- unsigned char new_irq_mask;
- const unsigned long bank_offset = bit_offset / 8;
- unsigned char cos_enable_state;
-
- raw_spin_lock_irqsave(&idio24gpio->lock, flags);
-
- idio24gpio->irq_mask &= ~BIT(bit_offset);
- new_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
+ raw_spin_lock(&idio24gpio->lock);
- if (!new_irq_mask) {
- cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
+ /* replace old bitmap with new bitmap */
+ idio24gpio->irq_type = (idio24gpio->irq_type & ~mask) | (new & mask);
- /* Disable Rising Edge detection */
- cos_enable_state &= ~BIT(bank_offset);
- /* Disable Falling Edge detection */
- cos_enable_state &= ~BIT(bank_offset + 4);
+ ret = regmap_read(idio24gpio->map, IDIO_24_COS_ENABLE, &cos_enable);
+ if (ret)
+ goto exit_unlock;
- iowrite8(cos_enable_state, &idio24gpio->reg->cos_enable);
+ /* if COS is currently enabled then update the edge type */
+ if (cos_enable & mask) {
+ ret = regmap_update_bits(idio24gpio->map, IDIO_24_COS_ENABLE, mask,
+ idio24gpio->irq_type);
+ if (ret)
+ goto exit_unlock;
}
- raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+exit_unlock:
+ raw_spin_unlock(&idio24gpio->lock);
- gpiochip_disable_irq(chip, irqd_to_hwirq(data));
+ return ret;
}
-static void idio_24_irq_unmask(struct irq_data *data)
+static int idio_24_reg_mask_xlate(struct gpio_regmap *const gpio, const unsigned int base,
+ const unsigned int offset, unsigned int *const reg,
+ unsigned int *const mask)
{
- struct gpio_chip *const chip = irq_data_get_irq_chip_data(data);
- struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
- unsigned long flags;
- unsigned char prev_irq_mask;
- const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
- const unsigned long bank_offset = bit_offset / 8;
- unsigned char cos_enable_state;
-
- gpiochip_enable_irq(chip, irqd_to_hwirq(data));
-
- raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+ const unsigned int out_stride = offset / IDIO_24_NGPIO_PER_REG;
+ const unsigned int in_stride = (offset - 24) / IDIO_24_NGPIO_PER_REG;
+ struct regmap *const map = gpio_regmap_get_drvdata(gpio);
+ int err;
+ unsigned int ctrl_reg;
- prev_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
- idio24gpio->irq_mask |= BIT(bit_offset);
+ switch (base) {
+ case IDIO_24_OUT_BASE:
+ *mask = BIT(offset % IDIO_24_NGPIO_PER_REG);
- if (!prev_irq_mask) {
- cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
+ /* FET Outputs */
+ if (offset < 24) {
+ *reg = IDIO_24_OUT_BASE + out_stride;
+ return 0;
+ }
- /* Enable Rising Edge detection */
- cos_enable_state |= BIT(bank_offset);
- /* Enable Falling Edge detection */
- cos_enable_state |= BIT(bank_offset + 4);
+ /* Isolated Inputs */
+ if (offset < 48) {
+ *reg = IDIO_24_IN_BASE + in_stride;
+ return 0;
+ }
- iowrite8(cos_enable_state, &idio24gpio->reg->cos_enable);
- }
+ err = regmap_read(map, IDIO_24_CONTROL_REG, &ctrl_reg);
+ if (err)
+ return err;
- raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
-}
+ /* TTL/CMOS Outputs */
+ if (ctrl_reg & CONTROL_REG_OUT_MODE) {
+ *reg = IDIO_24_TTLCMOS_OUT_REG;
+ return 0;
+ }
-static int idio_24_irq_set_type(struct irq_data *data, unsigned int flow_type)
-{
- /* The only valid irq types are none and both-edges */
- if (flow_type != IRQ_TYPE_NONE &&
- (flow_type & IRQ_TYPE_EDGE_BOTH) != IRQ_TYPE_EDGE_BOTH)
+ /* TTL/CMOS Inputs */
+ *reg = IDIO_24_TTLCMOS_IN_REG;
+ return 0;
+ case IDIO_24_CONTROL_REG:
+ /* We can only set direction for TTL/CMOS lines */
+ if (offset < 48)
+ return -EOPNOTSUPP;
+
+ *reg = IDIO_24_CONTROL_REG;
+ *mask = CONTROL_REG_OUT_MODE;
+ return 0;
+ default:
+ /* Should never reach this path */
return -EINVAL;
-
- return 0;
-}
-
-static const struct irq_chip idio_24_irqchip = {
- .name = "pcie-idio-24",
- .irq_ack = idio_24_irq_ack,
- .irq_mask = idio_24_irq_mask,
- .irq_unmask = idio_24_irq_unmask,
- .irq_set_type = idio_24_irq_set_type,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
-};
-
-static irqreturn_t idio_24_irq_handler(int irq, void *dev_id)
-{
- struct idio_24_gpio *const idio24gpio = dev_id;
- unsigned long irq_status;
- struct gpio_chip *const chip = &idio24gpio->chip;
- unsigned long irq_mask;
- int gpio;
-
- raw_spin_lock(&idio24gpio->lock);
-
- /* Read Change-Of-State status */
- irq_status = ioread32(&idio24gpio->reg->cos0_7);
-
- raw_spin_unlock(&idio24gpio->lock);
-
- /* Make sure our device generated IRQ */
- if (!irq_status)
- return IRQ_NONE;
-
- /* Handle only unmasked IRQ */
- irq_mask = idio24gpio->irq_mask & irq_status;
-
- for_each_set_bit(gpio, &irq_mask, chip->ngpio - 24)
- generic_handle_domain_irq(chip->irq.domain, gpio + 24);
-
- raw_spin_lock(&idio24gpio->lock);
-
- /* Clear Change-Of-State status */
- iowrite32(irq_status, &idio24gpio->reg->cos0_7);
-
- raw_spin_unlock(&idio24gpio->lock);
-
- return IRQ_HANDLED;
+ }
}
#define IDIO_24_NGPIO 56
@@ -496,11 +297,12 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
const size_t pci_plx_bar_index = 1;
const size_t pci_bar_index = 2;
const char *const name = pci_name(pdev);
- struct gpio_irq_chip *girq;
-
- idio24gpio = devm_kzalloc(dev, sizeof(*idio24gpio), GFP_KERNEL);
- if (!idio24gpio)
- return -ENOMEM;
+ struct gpio_regmap_config gpio_config = {};
+ void __iomem *pex8311_regs;
+ void __iomem *idio_24_regs;
+ struct regmap *intcsr_map;
+ struct regmap_irq_chip *chip;
+ struct regmap_irq_chip_data *chip_data;
err = pcim_enable_device(pdev);
if (err) {
@@ -514,57 +316,72 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- idio24gpio->plx = pcim_iomap_table(pdev)[pci_plx_bar_index];
- idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
-
- idio24gpio->chip.label = name;
- idio24gpio->chip.parent = dev;
- idio24gpio->chip.owner = THIS_MODULE;
- idio24gpio->chip.base = -1;
- idio24gpio->chip.ngpio = IDIO_24_NGPIO;
- idio24gpio->chip.names = idio_24_names;
- idio24gpio->chip.get_direction = idio_24_gpio_get_direction;
- idio24gpio->chip.direction_input = idio_24_gpio_direction_input;
- idio24gpio->chip.direction_output = idio_24_gpio_direction_output;
- idio24gpio->chip.get = idio_24_gpio_get;
- idio24gpio->chip.get_multiple = idio_24_gpio_get_multiple;
- idio24gpio->chip.set = idio_24_gpio_set;
- idio24gpio->chip.set_multiple = idio_24_gpio_set_multiple;
-
- girq = &idio24gpio->chip.irq;
- gpio_irq_chip_set_chip(girq, &idio_24_irqchip);
- /* This will let us handle the parent IRQ in the driver */
- girq->parent_handler = NULL;
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_edge_irq;
+ pex8311_regs = pcim_iomap_table(pdev)[pci_plx_bar_index];
+ idio_24_regs = pcim_iomap_table(pdev)[pci_bar_index];
+
+ intcsr_map = devm_regmap_init_mmio(dev, pex8311_regs, &pex8311_intcsr_regmap_config);
+ if (IS_ERR(intcsr_map))
+ return dev_err_probe(dev, PTR_ERR(intcsr_map),
+ "Unable to initialize PEX8311 register map\n");
+
+ idio24gpio = devm_kzalloc(dev, sizeof(*idio24gpio), GFP_KERNEL);
+ if (!idio24gpio)
+ return -ENOMEM;
+
+ idio24gpio->map = devm_regmap_init_mmio(dev, idio_24_regs, &idio_24_regmap_config);
+ if (IS_ERR(idio24gpio->map))
+ return dev_err_probe(dev, PTR_ERR(idio24gpio->map),
+ "Unable to initialize register map\n");
raw_spin_lock_init(&idio24gpio->lock);
+ /* Initialize all IRQ type configuration to IRQ_TYPE_EDGE_BOTH */
+ idio24gpio->irq_type = GENMASK(7, 0);
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->name = name;
+ chip->status_base = IDIO_24_COS_STATUS_BASE;
+ chip->mask_base = IDIO_24_COS_ENABLE;
+ chip->ack_base = IDIO_24_COS_STATUS_BASE;
+ chip->num_regs = 4;
+ chip->irqs = idio_24_regmap_irqs;
+ chip->num_irqs = ARRAY_SIZE(idio_24_regmap_irqs);
+ chip->handle_mask_sync = idio_24_handle_mask_sync;
+ chip->set_type_config = idio_24_set_type_config;
+ chip->irq_drv_data = idio24gpio;
+
/* Software board reset */
- iowrite8(0, &idio24gpio->reg->soft_reset);
+ err = regmap_write(idio24gpio->map, IDIO_24_SOFT_RESET, 0);
+ if (err)
+ return err;
/*
* enable PLX PEX8311 internal PCI wire interrupt and local interrupt
* input
*/
- iowrite8((INTCSR_INTERNAL_PCI_WIRE | INTCSR_LOCAL_INPUT) >> 8,
- idio24gpio->plx + PLX_PEX8311_PCI_LCS_INTCSR + 1);
-
- err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio);
- if (err) {
- dev_err(dev, "GPIO registering failed (%d)\n", err);
+ err = regmap_update_bits(intcsr_map, 0x0, IDIO_24_ENABLE_IRQ, IDIO_24_ENABLE_IRQ);
+ if (err)
return err;
- }
-
- err = devm_request_irq(dev, pdev->irq, idio_24_irq_handler, IRQF_SHARED,
- name, idio24gpio);
- if (err) {
- dev_err(dev, "IRQ handler registering failed (%d)\n", err);
- return err;
- }
- return 0;
+ err = devm_regmap_add_irq_chip(dev, idio24gpio->map, pdev->irq, 0, 0, chip, &chip_data);
+ if (err)
+ return dev_err_probe(dev, err, "IRQ registration failed\n");
+
+ gpio_config.parent = dev;
+ gpio_config.regmap = idio24gpio->map;
+ gpio_config.ngpio = IDIO_24_NGPIO;
+ gpio_config.names = idio_24_names;
+ gpio_config.reg_dat_base = GPIO_REGMAP_ADDR(IDIO_24_OUT_BASE);
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(IDIO_24_OUT_BASE);
+ gpio_config.reg_dir_out_base = GPIO_REGMAP_ADDR(IDIO_24_CONTROL_REG);
+ gpio_config.ngpio_per_reg = IDIO_24_NGPIO_PER_REG;
+ gpio_config.irq_domain = regmap_irq_get_domain(chip_data);
+ gpio_config.reg_mask_xlate = idio_24_reg_mask_xlate;
+ gpio_config.drvdata = idio24gpio->map;
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
static const struct pci_device_id idio_24_pci_dev_id[] = {
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index 67071bea08c2..e3013e778e15 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
- * Andrew F. Davis <afd@ti.com>
+ * Copyright (C) 2015-2023 Texas Instruments Incorporated - https://www.ti.com/
+ * Andrew Davis <afd@ti.com>
*/
#include <linux/bitmap.h>
@@ -116,6 +116,11 @@ static const struct gpio_chip template_chip = {
.can_sleep = true,
};
+static void pisosr_mutex_destroy(void *lock)
+{
+ mutex_destroy(lock);
+}
+
static int pisosr_gpio_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
@@ -126,8 +131,6 @@ static int pisosr_gpio_probe(struct spi_device *spi)
if (!gpio)
return -ENOMEM;
- spi_set_drvdata(spi, gpio);
-
gpio->chip = template_chip;
gpio->chip.parent = dev;
of_property_read_u16(dev->of_node, "ngpios", &gpio->chip.ngpio);
@@ -145,8 +148,11 @@ static int pisosr_gpio_probe(struct spi_device *spi)
"Unable to allocate load GPIO\n");
mutex_init(&gpio->lock);
+ ret = devm_add_action_or_reset(dev, pisosr_mutex_destroy, &gpio->lock);
+ if (ret)
+ return ret;
- ret = gpiochip_add_data(&gpio->chip, gpio);
+ ret = devm_gpiochip_add_data(dev, &gpio->chip, gpio);
if (ret < 0) {
dev_err(dev, "Unable to register gpiochip\n");
return ret;
@@ -155,15 +161,6 @@ static int pisosr_gpio_probe(struct spi_device *spi)
return 0;
}
-static void pisosr_gpio_remove(struct spi_device *spi)
-{
- struct pisosr_gpio *gpio = spi_get_drvdata(spi);
-
- gpiochip_remove(&gpio->chip);
-
- mutex_destroy(&gpio->lock);
-}
-
static const struct spi_device_id pisosr_gpio_id_table[] = {
{ "pisosr-gpio", },
{ /* sentinel */ }
@@ -182,11 +179,10 @@ static struct spi_driver pisosr_gpio_driver = {
.of_match_table = pisosr_gpio_of_match_table,
},
.probe = pisosr_gpio_probe,
- .remove = pisosr_gpio_remove,
.id_table = pisosr_gpio_id_table,
};
module_spi_driver(pisosr_gpio_driver);
-MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_AUTHOR("Andrew Davis <afd@ti.com>");
MODULE_DESCRIPTION("SPI Compatible PISO Shift Register GPIO Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index c3e4d90f6b18..2b9b7be9b8fd 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -8,7 +8,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -363,7 +363,6 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, pmic_eic);
return 0;
}
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index a1630ed4b741..7e9f7a32d3ee 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -20,7 +20,6 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c
index ecb0d3800dfe..9d1b95e429f1 100644
--- a/drivers/gpio/gpio-raspberrypi-exp.c
+++ b/drivers/gpio/gpio-raspberrypi-exp.c
@@ -234,7 +234,7 @@ static int rpi_exp_gpio_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(dev, &rpi_gpio->gc, rpi_gpio);
}
-static const struct of_device_id rpi_exp_gpio_ids[] __maybe_unused = {
+static const struct of_device_id rpi_exp_gpio_ids[] = {
{ .compatible = "raspberrypi,firmware-gpio" },
{ }
};
@@ -243,7 +243,7 @@ MODULE_DEVICE_TABLE(of, rpi_exp_gpio_ids);
static struct platform_driver rpi_exp_gpio_driver = {
.driver = {
.name = MODULE_NAME,
- .of_match_table = of_match_ptr(rpi_exp_gpio_ids),
+ .of_match_table = rpi_exp_gpio_ids,
},
.probe = rpi_exp_gpio_probe,
};
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 4fae3ebea790..c34dcadaee36 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -121,8 +121,6 @@ static int rc5t583_gpio_probe(struct platform_device *pdev)
if (pdata && pdata->gpio_base)
rc5t583_gpio->gpio_chip.base = pdata->gpio_base;
- platform_set_drvdata(pdev, rc5t583_gpio);
-
return devm_gpiochip_add_data(&pdev->dev, &rc5t583_gpio->gpio_chip,
rc5t583_gpio);
}
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 2525adb52f4f..86e69cde04da 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -15,7 +15,6 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index e5de15a2ab9a..b35b9604413f 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -17,10 +17,10 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "../pinctrl/core.h"
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index 767c33ae3213..d89da7300ddd 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -189,7 +189,6 @@ static int sama5d2_piobu_probe(struct platform_device *pdev)
if (!piobu)
return -ENOMEM;
- platform_set_drvdata(pdev, piobu);
piobu->chip.label = pdev->name;
piobu->chip.parent = &pdev->dev;
piobu->chip.owner = THIS_MODULE,
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 8a83f7bf4382..e48392074e4b 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -380,8 +380,6 @@ static int sch_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- platform_set_drvdata(pdev, sch);
-
girq = &sch->chip.irq;
gpio_irq_chip_set_chip(girq, &sch_irqchip);
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 745e5f67254e..8decd9b5d229 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -6,10 +6,10 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/errno.h>
-#include <linux/of_irq.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/regmap.h>
@@ -150,6 +150,7 @@ static const struct irq_chip sifive_gpio_irqchip = {
.irq_disable = sifive_gpio_irq_disable,
.irq_eoi = sifive_gpio_irq_eoi,
.irq_set_affinity = sifive_gpio_irq_set_affinity,
+ .irq_set_wake = irq_chip_set_wake_parent,
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
@@ -180,12 +181,10 @@ static const struct regmap_config sifive_gpio_regmap_config = {
static int sifive_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = pdev->dev.of_node;
- struct device_node *irq_parent;
struct irq_domain *parent;
struct gpio_irq_chip *girq;
struct sifive_gpio *chip;
- int ret, ngpio, i;
+ int ret, ngpio;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -202,31 +201,22 @@ static int sifive_gpio_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
- ngpio = of_irq_count(node);
- if (ngpio > SIFIVE_GPIO_MAX) {
- dev_err(dev, "Too many GPIO interrupts (max=%d)\n",
- SIFIVE_GPIO_MAX);
- return -ENXIO;
- }
-
- irq_parent = of_irq_find_parent(node);
- if (!irq_parent) {
- dev_err(dev, "no IRQ parent node\n");
- return -ENODEV;
+ for (ngpio = 0; ngpio < SIFIVE_GPIO_MAX; ngpio++) {
+ ret = platform_get_irq_optional(pdev, ngpio);
+ if (ret < 0)
+ break;
+ chip->irq_number[ngpio] = ret;
}
- parent = irq_find_host(irq_parent);
- of_node_put(irq_parent);
- if (!parent) {
- dev_err(dev, "no IRQ parent domain\n");
+ if (!ngpio) {
+ dev_err(dev, "no IRQ found\n");
return -ENODEV;
}
- for (i = 0; i < ngpio; i++) {
- ret = platform_get_irq(pdev, i);
- if (ret < 0)
- return ret;
- chip->irq_number[i] = ret;
- }
+ /*
+ * The check above ensures at least one parent IRQ is valid.
+ * Assume all parent IRQs belong to the same domain.
+ */
+ parent = irq_get_irq_data(chip->irq_number[0])->domain;
ret = bgpio_init(&chip->gc, dev, 4,
chip->base + SIFIVE_GPIO_INPUT_VAL,
@@ -254,7 +244,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
chip->gc.owner = THIS_MODULE;
girq = &chip->gc.irq;
gpio_irq_chip_set_chip(girq, &sifive_gpio_irqchip);
- girq->fwnode = of_node_to_fwnode(node);
+ girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
girq->child_to_parent_hwirq = sifive_gpio_child_to_parent_hwirq;
girq->handler = handle_bad_irq;
@@ -277,4 +267,8 @@ static struct platform_driver sifive_gpio_driver = {
.of_match_table = sifive_gpio_match,
},
};
-builtin_platform_driver(sifive_gpio_driver)
+module_platform_driver(sifive_gpio_driver)
+
+MODULE_AUTHOR("Yash Shah <yash.shah@sifive.com>");
+MODULE_DESCRIPTION("SiFive GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index f1f6f1c32987..271db3639a78 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitmap.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/configfs.h>
#include <linux/device.h>
@@ -68,7 +69,7 @@ static int gpio_sim_apply_pull(struct gpio_sim_chip *chip,
gc = &chip->gc;
desc = &gc->gpiodev->descs[offset];
- mutex_lock(&chip->lock);
+ guard(mutex)(&chip->lock);
if (test_bit(FLAG_REQUESTED, &desc->flags) &&
!test_bit(FLAG_IS_OUT, &desc->flags)) {
@@ -104,29 +105,24 @@ set_value:
set_pull:
__assign_bit(offset, chip->pull_map, value);
- mutex_unlock(&chip->lock);
return 0;
}
static int gpio_sim_get(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
- int ret;
- mutex_lock(&chip->lock);
- ret = !!test_bit(offset, chip->value_map);
- mutex_unlock(&chip->lock);
+ guard(mutex)(&chip->lock);
- return ret;
+ return !!test_bit(offset, chip->value_map);
}
static void gpio_sim_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
- mutex_lock(&chip->lock);
- __assign_bit(offset, chip->value_map, value);
- mutex_unlock(&chip->lock);
+ scoped_guard(mutex, &chip->lock)
+ __assign_bit(offset, chip->value_map, value);
}
static int gpio_sim_get_multiple(struct gpio_chip *gc,
@@ -134,9 +130,8 @@ static int gpio_sim_get_multiple(struct gpio_chip *gc,
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
- mutex_lock(&chip->lock);
- bitmap_replace(bits, bits, chip->value_map, mask, gc->ngpio);
- mutex_unlock(&chip->lock);
+ scoped_guard(mutex, &chip->lock)
+ bitmap_replace(bits, bits, chip->value_map, mask, gc->ngpio);
return 0;
}
@@ -146,9 +141,9 @@ static void gpio_sim_set_multiple(struct gpio_chip *gc,
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
- mutex_lock(&chip->lock);
- bitmap_replace(chip->value_map, chip->value_map, bits, mask, gc->ngpio);
- mutex_unlock(&chip->lock);
+ scoped_guard(mutex, &chip->lock)
+ bitmap_replace(chip->value_map, chip->value_map, bits, mask,
+ gc->ngpio);
}
static int gpio_sim_direction_output(struct gpio_chip *gc,
@@ -156,10 +151,10 @@ static int gpio_sim_direction_output(struct gpio_chip *gc,
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
- mutex_lock(&chip->lock);
- __clear_bit(offset, chip->direction_map);
- __assign_bit(offset, chip->value_map, value);
- mutex_unlock(&chip->lock);
+ scoped_guard(mutex, &chip->lock) {
+ __clear_bit(offset, chip->direction_map);
+ __assign_bit(offset, chip->value_map, value);
+ }
return 0;
}
@@ -168,9 +163,8 @@ static int gpio_sim_direction_input(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
- mutex_lock(&chip->lock);
- __set_bit(offset, chip->direction_map);
- mutex_unlock(&chip->lock);
+ scoped_guard(mutex, &chip->lock)
+ __set_bit(offset, chip->direction_map);
return 0;
}
@@ -180,9 +174,8 @@ static int gpio_sim_get_direction(struct gpio_chip *gc, unsigned int offset)
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
int direction;
- mutex_lock(&chip->lock);
- direction = !!test_bit(offset, chip->direction_map);
- mutex_unlock(&chip->lock);
+ scoped_guard(mutex, &chip->lock)
+ direction = !!test_bit(offset, chip->direction_map);
return direction ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
}
@@ -215,9 +208,9 @@ static void gpio_sim_free(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
- mutex_lock(&chip->lock);
- __assign_bit(offset, chip->value_map, !!test_bit(offset, chip->pull_map));
- mutex_unlock(&chip->lock);
+ scoped_guard(mutex, &chip->lock)
+ __assign_bit(offset, chip->value_map,
+ !!test_bit(offset, chip->pull_map));
}
static ssize_t gpio_sim_sysfs_val_show(struct device *dev,
@@ -227,9 +220,8 @@ static ssize_t gpio_sim_sysfs_val_show(struct device *dev,
struct gpio_sim_chip *chip = dev_get_drvdata(dev);
int val;
- mutex_lock(&chip->lock);
- val = !!test_bit(line_attr->offset, chip->value_map);
- mutex_unlock(&chip->lock);
+ scoped_guard(mutex, &chip->lock)
+ val = !!test_bit(line_attr->offset, chip->value_map);
return sysfs_emit(buf, "%d\n", val);
}
@@ -258,9 +250,8 @@ static ssize_t gpio_sim_sysfs_pull_show(struct device *dev,
struct gpio_sim_chip *chip = dev_get_drvdata(dev);
int pull;
- mutex_lock(&chip->lock);
- pull = !!test_bit(line_attr->offset, chip->pull_map);
- mutex_unlock(&chip->lock);
+ scoped_guard(mutex, &chip->lock)
+ pull = !!test_bit(line_attr->offset, chip->pull_map);
return sysfs_emit(buf, "%s\n", gpio_sim_sysfs_pull_strings[pull]);
}
@@ -291,6 +282,15 @@ static void gpio_sim_mutex_destroy(void *data)
mutex_destroy(lock);
}
+static void gpio_sim_dispose_mappings(void *data)
+{
+ struct gpio_sim_chip *chip = data;
+ unsigned int i;
+
+ for (i = 0; i < chip->gc.ngpio; i++)
+ irq_dispose_mapping(irq_find_mapping(chip->irq_sim, i));
+}
+
static void gpio_sim_sysfs_remove(void *data)
{
struct gpio_sim_chip *chip = data;
@@ -402,10 +402,14 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
if (!chip->pull_map)
return -ENOMEM;
- chip->irq_sim = devm_irq_domain_create_sim(dev, NULL, num_lines);
+ chip->irq_sim = devm_irq_domain_create_sim(dev, swnode, num_lines);
if (IS_ERR(chip->irq_sim))
return PTR_ERR(chip->irq_sim);
+ ret = devm_add_action_or_reset(dev, gpio_sim_dispose_mappings, chip);
+ if (ret)
+ return ret;
+
mutex_init(&chip->lock);
ret = devm_add_action_or_reset(dev, gpio_sim_mutex_destroy,
&chip->lock);
@@ -489,7 +493,7 @@ struct gpio_sim_device {
* This structure however can be modified by callbacks of different
* attributes so we need another lock.
*
- * We use this lock fo protecting all data structures owned by this
+ * We use this lock for protecting all data structures owned by this
* object too.
*/
struct mutex lock;
@@ -643,16 +647,13 @@ static bool gpio_sim_device_is_live_unlocked(struct gpio_sim_device *dev)
static char *gpio_sim_strdup_trimmed(const char *str, size_t count)
{
- char *dup, *trimmed;
+ char *trimmed;
- dup = kstrndup(str, count, GFP_KERNEL);
- if (!dup)
+ trimmed = kstrndup(skip_spaces(str), count, GFP_KERNEL);
+ if (!trimmed)
return NULL;
- trimmed = strstrip(dup);
- memmove(dup, trimmed, strlen(trimmed) + 1);
-
- return dup;
+ return strim(trimmed);
}
static ssize_t gpio_sim_device_config_dev_name_show(struct config_item *item,
@@ -660,17 +661,14 @@ static ssize_t gpio_sim_device_config_dev_name_show(struct config_item *item,
{
struct gpio_sim_device *dev = to_gpio_sim_device(item);
struct platform_device *pdev;
- int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
+
pdev = dev->pdev;
if (pdev)
- ret = sprintf(page, "%s\n", dev_name(&pdev->dev));
- else
- ret = sprintf(page, "gpio-sim.%d\n", dev->id);
- mutex_unlock(&dev->lock);
+ return sprintf(page, "%s\n", dev_name(&pdev->dev));
- return ret;
+ return sprintf(page, "gpio-sim.%d\n", dev->id);
}
CONFIGFS_ATTR_RO(gpio_sim_device_config_, dev_name);
@@ -681,9 +679,8 @@ gpio_sim_device_config_live_show(struct config_item *item, char *page)
struct gpio_sim_device *dev = to_gpio_sim_device(item);
bool live;
- mutex_lock(&dev->lock);
- live = gpio_sim_device_is_live_unlocked(dev);
- mutex_unlock(&dev->lock);
+ scoped_guard(mutex, &dev->lock)
+ live = gpio_sim_device_is_live_unlocked(dev);
return sprintf(page, "%c\n", live ? '1' : '0');
}
@@ -838,8 +835,7 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
{
struct property_entry properties[GPIO_SIM_PROP_MAX];
unsigned int prop_idx = 0, line_names_size = 0;
- struct fwnode_handle *swnode;
- char **line_names;
+ char **line_names __free(kfree) = NULL;
memset(properties, 0, sizeof(properties));
@@ -858,9 +854,7 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
"gpio-line-names",
line_names, line_names_size);
- swnode = fwnode_create_software_node(properties, parent);
- kfree(line_names);
- return swnode;
+ return fwnode_create_software_node(properties, parent);
}
static void gpio_sim_remove_swnode_recursive(struct fwnode_handle *swnode)
@@ -985,18 +979,15 @@ gpio_sim_device_config_live_store(struct config_item *item,
if (ret)
return ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
- if ((!live && !gpio_sim_device_is_live_unlocked(dev)) ||
- (live && gpio_sim_device_is_live_unlocked(dev)))
+ if (live == gpio_sim_device_is_live_unlocked(dev))
ret = -EPERM;
else if (live)
ret = gpio_sim_device_activate_unlocked(dev);
else
gpio_sim_device_deactivate_unlocked(dev);
- mutex_unlock(&dev->lock);
-
return ret ?: count;
}
@@ -1033,17 +1024,14 @@ static ssize_t gpio_sim_bank_config_chip_name_show(struct config_item *item,
struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
struct gpio_sim_chip_name_ctx ctx = { bank->swnode, page };
- int ret;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
+
if (gpio_sim_device_is_live_unlocked(dev))
- ret = device_for_each_child(&dev->pdev->dev, &ctx,
- gpio_sim_emit_chip_name);
- else
- ret = sprintf(page, "none\n");
- mutex_unlock(&dev->lock);
+ return device_for_each_child(&dev->pdev->dev, &ctx,
+ gpio_sim_emit_chip_name);
- return ret;
+ return sprintf(page, "none\n");
}
CONFIGFS_ATTR_RO(gpio_sim_bank_config_, chip_name);
@@ -1053,13 +1041,10 @@ gpio_sim_bank_config_label_show(struct config_item *item, char *page)
{
struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
- int ret;
- mutex_lock(&dev->lock);
- ret = sprintf(page, "%s\n", bank->label ?: "");
- mutex_unlock(&dev->lock);
+ guard(mutex)(&dev->lock);
- return ret;
+ return sprintf(page, "%s\n", bank->label ?: "");
}
static ssize_t gpio_sim_bank_config_label_store(struct config_item *item,
@@ -1069,23 +1054,18 @@ static ssize_t gpio_sim_bank_config_label_store(struct config_item *item,
struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
char *trimmed;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev)) {
- mutex_unlock(&dev->lock);
+ if (gpio_sim_device_is_live_unlocked(dev))
return -EBUSY;
- }
trimmed = gpio_sim_strdup_trimmed(page, count);
- if (!trimmed) {
- mutex_unlock(&dev->lock);
+ if (!trimmed)
return -ENOMEM;
- }
kfree(bank->label);
bank->label = trimmed;
- mutex_unlock(&dev->lock);
return count;
}
@@ -1096,13 +1076,10 @@ gpio_sim_bank_config_num_lines_show(struct config_item *item, char *page)
{
struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
- int ret;
- mutex_lock(&dev->lock);
- ret = sprintf(page, "%u\n", bank->num_lines);
- mutex_unlock(&dev->lock);
+ guard(mutex)(&dev->lock);
- return ret;
+ return sprintf(page, "%u\n", bank->num_lines);
}
static ssize_t
@@ -1121,16 +1098,13 @@ gpio_sim_bank_config_num_lines_store(struct config_item *item,
if (num_lines == 0)
return -EINVAL;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev)) {
- mutex_unlock(&dev->lock);
+ if (gpio_sim_device_is_live_unlocked(dev))
return -EBUSY;
- }
bank->num_lines = num_lines;
- mutex_unlock(&dev->lock);
return count;
}
@@ -1148,13 +1122,10 @@ gpio_sim_line_config_name_show(struct config_item *item, char *page)
{
struct gpio_sim_line *line = to_gpio_sim_line(item);
struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
- int ret;
- mutex_lock(&dev->lock);
- ret = sprintf(page, "%s\n", line->name ?: "");
- mutex_unlock(&dev->lock);
+ guard(mutex)(&dev->lock);
- return ret;
+ return sprintf(page, "%s\n", line->name ?: "");
}
static ssize_t gpio_sim_line_config_name_store(struct config_item *item,
@@ -1164,24 +1135,18 @@ static ssize_t gpio_sim_line_config_name_store(struct config_item *item,
struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
char *trimmed;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev)) {
- mutex_unlock(&dev->lock);
+ if (gpio_sim_device_is_live_unlocked(dev))
return -EBUSY;
- }
trimmed = gpio_sim_strdup_trimmed(page, count);
- if (!trimmed) {
- mutex_unlock(&dev->lock);
+ if (!trimmed)
return -ENOMEM;
- }
kfree(line->name);
line->name = trimmed;
- mutex_unlock(&dev->lock);
-
return count;
}
@@ -1197,13 +1162,10 @@ static ssize_t gpio_sim_hog_config_name_show(struct config_item *item,
{
struct gpio_sim_hog *hog = to_gpio_sim_hog(item);
struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog);
- int ret;
- mutex_lock(&dev->lock);
- ret = sprintf(page, "%s\n", hog->name ?: "");
- mutex_unlock(&dev->lock);
+ guard(mutex)(&dev->lock);
- return ret;
+ return sprintf(page, "%s\n", hog->name ?: "");
}
static ssize_t gpio_sim_hog_config_name_store(struct config_item *item,
@@ -1213,24 +1175,18 @@ static ssize_t gpio_sim_hog_config_name_store(struct config_item *item,
struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog);
char *trimmed;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev)) {
- mutex_unlock(&dev->lock);
+ if (gpio_sim_device_is_live_unlocked(dev))
return -EBUSY;
- }
trimmed = gpio_sim_strdup_trimmed(page, count);
- if (!trimmed) {
- mutex_unlock(&dev->lock);
+ if (!trimmed)
return -ENOMEM;
- }
kfree(hog->name);
hog->name = trimmed;
- mutex_unlock(&dev->lock);
-
return count;
}
@@ -1244,9 +1200,8 @@ static ssize_t gpio_sim_hog_config_direction_show(struct config_item *item,
char *repr;
int dir;
- mutex_lock(&dev->lock);
- dir = hog->dir;
- mutex_unlock(&dev->lock);
+ scoped_guard(mutex, &dev->lock)
+ dir = hog->dir;
switch (dir) {
case GPIOD_IN:
@@ -1273,42 +1228,24 @@ gpio_sim_hog_config_direction_store(struct config_item *item,
{
struct gpio_sim_hog *hog = to_gpio_sim_hog(item);
struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog);
- char *trimmed;
int dir;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev)) {
- mutex_unlock(&dev->lock);
+ if (gpio_sim_device_is_live_unlocked(dev))
return -EBUSY;
- }
-
- trimmed = gpio_sim_strdup_trimmed(page, count);
- if (!trimmed) {
- mutex_unlock(&dev->lock);
- return -ENOMEM;
- }
- if (strcmp(trimmed, "input") == 0)
+ if (sysfs_streq(page, "input"))
dir = GPIOD_IN;
- else if (strcmp(trimmed, "output-high") == 0)
+ else if (sysfs_streq(page, "output-high"))
dir = GPIOD_OUT_HIGH;
- else if (strcmp(trimmed, "output-low") == 0)
+ else if (sysfs_streq(page, "output-low"))
dir = GPIOD_OUT_LOW;
else
- dir = -EINVAL;
-
- kfree(trimmed);
-
- if (dir < 0) {
- mutex_unlock(&dev->lock);
- return dir;
- }
+ return -EINVAL;
hog->dir = dir;
- mutex_unlock(&dev->lock);
-
return count;
}
@@ -1326,9 +1263,8 @@ static void gpio_sim_hog_config_item_release(struct config_item *item)
struct gpio_sim_line *line = hog->parent;
struct gpio_sim_device *dev = gpio_sim_hog_get_device(hog);
- mutex_lock(&dev->lock);
- line->hog = NULL;
- mutex_unlock(&dev->lock);
+ scoped_guard(mutex, &dev->lock)
+ line->hog = NULL;
kfree(hog->name);
kfree(hog);
@@ -1354,13 +1290,11 @@ gpio_sim_line_config_make_hog_item(struct config_group *group, const char *name)
if (strcmp(name, "hog") != 0)
return ERR_PTR(-EINVAL);
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
hog = kzalloc(sizeof(*hog), GFP_KERNEL);
- if (!hog) {
- mutex_unlock(&dev->lock);
+ if (!hog)
return ERR_PTR(-ENOMEM);
- }
config_item_init_type_name(&hog->item, name,
&gpio_sim_hog_config_type);
@@ -1370,8 +1304,6 @@ gpio_sim_line_config_make_hog_item(struct config_group *group, const char *name)
hog->parent = line;
line->hog = hog;
- mutex_unlock(&dev->lock);
-
return &hog->item;
}
@@ -1380,9 +1312,8 @@ static void gpio_sim_line_config_group_release(struct config_item *item)
struct gpio_sim_line *line = to_gpio_sim_line(item);
struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
- mutex_lock(&dev->lock);
- list_del(&line->siblings);
- mutex_unlock(&dev->lock);
+ scoped_guard(mutex, &dev->lock)
+ list_del(&line->siblings);
kfree(line->name);
kfree(line);
@@ -1417,18 +1348,14 @@ gpio_sim_bank_config_make_line_group(struct config_group *group,
if (ret != 1 || nchar != strlen(name))
return ERR_PTR(-EINVAL);
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev)) {
- mutex_unlock(&dev->lock);
+ if (gpio_sim_device_is_live_unlocked(dev))
return ERR_PTR(-EBUSY);
- }
line = kzalloc(sizeof(*line), GFP_KERNEL);
- if (!line) {
- mutex_unlock(&dev->lock);
+ if (!line)
return ERR_PTR(-ENOMEM);
- }
config_group_init_type_name(&line->group, name,
&gpio_sim_line_config_type);
@@ -1437,8 +1364,6 @@ gpio_sim_bank_config_make_line_group(struct config_group *group,
line->offset = offset;
list_add_tail(&line->siblings, &bank->line_list);
- mutex_unlock(&dev->lock);
-
return &line->group;
}
@@ -1447,9 +1372,8 @@ static void gpio_sim_bank_config_group_release(struct config_item *item)
struct gpio_sim_bank *bank = to_gpio_sim_bank(item);
struct gpio_sim_device *dev = gpio_sim_bank_get_device(bank);
- mutex_lock(&dev->lock);
- list_del(&bank->siblings);
- mutex_unlock(&dev->lock);
+ scoped_guard(mutex, &dev->lock)
+ list_del(&bank->siblings);
kfree(bank->label);
kfree(bank);
@@ -1477,18 +1401,14 @@ gpio_sim_device_config_make_bank_group(struct config_group *group,
struct gpio_sim_device *dev = to_gpio_sim_device(&group->cg_item);
struct gpio_sim_bank *bank;
- mutex_lock(&dev->lock);
+ guard(mutex)(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev)) {
- mutex_unlock(&dev->lock);
+ if (gpio_sim_device_is_live_unlocked(dev))
return ERR_PTR(-EBUSY);
- }
bank = kzalloc(sizeof(*bank), GFP_KERNEL);
- if (!bank) {
- mutex_unlock(&dev->lock);
+ if (!bank)
return ERR_PTR(-ENOMEM);
- }
config_group_init_type_name(&bank->group, name,
&gpio_sim_bank_config_group_type);
@@ -1497,8 +1417,6 @@ gpio_sim_device_config_make_bank_group(struct config_group *group,
INIT_LIST_HEAD(&bank->line_list);
list_add_tail(&bank->siblings, &dev->bank_list);
- mutex_unlock(&dev->lock);
-
return &bank->group;
}
@@ -1506,10 +1424,10 @@ static void gpio_sim_device_config_group_release(struct config_item *item)
{
struct gpio_sim_device *dev = to_gpio_sim_device(item);
- mutex_lock(&dev->lock);
- if (gpio_sim_device_is_live_unlocked(dev))
- gpio_sim_device_deactivate_unlocked(dev);
- mutex_unlock(&dev->lock);
+ scoped_guard(mutex, &dev->lock) {
+ if (gpio_sim_device_is_live_unlocked(dev))
+ gpio_sim_device_deactivate_unlocked(dev);
+ }
mutex_destroy(&dev->lock);
ida_free(&gpio_sim_ida, dev->id);
@@ -1534,7 +1452,7 @@ static const struct config_item_type gpio_sim_device_config_group_type = {
static struct config_group *
gpio_sim_config_make_device_group(struct config_group *group, const char *name)
{
- struct gpio_sim_device *dev;
+ struct gpio_sim_device *dev __free(kfree) = NULL;
int id;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1542,10 +1460,8 @@ gpio_sim_config_make_device_group(struct config_group *group, const char *name)
return ERR_PTR(-ENOMEM);
id = ida_alloc(&gpio_sim_ida, GFP_KERNEL);
- if (id < 0) {
- kfree(dev);
+ if (id < 0)
return ERR_PTR(id);
- }
config_group_init_type_name(&dev->group, name,
&gpio_sim_device_config_group_type);
@@ -1556,7 +1472,7 @@ gpio_sim_config_make_device_group(struct config_group *group, const char *name)
dev->bus_notifier.notifier_call = gpio_sim_bus_notifier_call;
init_completion(&dev->probe_completion);
- return &dev->group;
+ return &no_free_ptr(dev)->group;
}
static struct configfs_group_operations gpio_sim_config_group_ops = {
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index 072b4e653216..c117c11bfb29 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -7,8 +7,8 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 4750ea34204c..053d616f2e02 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -4,11 +4,12 @@
* Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/mutex.h>
#include <linux/gpio/driver.h>
#include <linux/io.h>
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 6076937b18e7..6e1a2581e6ae 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -9,7 +9,6 @@
#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
@@ -249,8 +248,6 @@ static int syscon_gpio_probe(struct platform_device *pdev)
priv->chip.direction_output = syscon_gpio_dir_out;
}
- platform_set_drvdata(pdev, priv);
-
return devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
}
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 5b265a6fd3c1..ea715582bcf3 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -15,7 +15,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gpio/driver.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 80d08ddde40e..d87dd06db40d 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -11,7 +11,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index de14949a3fe5..bbd9e9191199 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -256,8 +256,6 @@ static int timbgpio_probe(struct platform_device *pdev)
if (err)
return err;
- platform_set_drvdata(pdev, tgpio);
-
/* make sure to disable interrupts */
iowrite32(0x0, tgpio->membase + TGPIO_IER);
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index e1d425a18854..d7d9d50dcddf 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -216,7 +216,7 @@ MODULE_DEVICE_TABLE(platform, tps65218_gpio_id_table);
static struct platform_driver tps65218_gpio_driver = {
.driver = {
.name = "tps65218-gpio",
- .of_match_table = of_match_ptr(tps65218_dt_match)
+ .of_match_table = tps65218_dt_match,
},
.probe = tps65218_gpio_probe,
.id_table = tps65218_gpio_id_table,
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index c5713524b581..d277aa951143 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mfd/tps6586x.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
/* GPIO control registers */
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 321e6945f0be..187d21580573 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -15,7 +15,7 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/mfd/tps65910.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
struct tps65910_gpio {
struct gpio_chip gpio_chip;
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 6f8bd1155db7..3a28c1f273c3 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -277,8 +277,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD);
- platform_set_drvdata(pdev, gpio);
-
chip = &gpio->chip;
chip->label = "gpio-tqmx86";
chip->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 95d80ba14bee..4748e3d47106 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -7,8 +7,7 @@
#include <linux/gpio/driver.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#define DEFAULT_PIN_NUMBER 16
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index eba96319dac2..0f6397b77c9d 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -8,7 +8,7 @@
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/module.h>
#include <linux/regmap.h>
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 19ce6675cbc0..9725b7aa18a7 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -9,7 +9,6 @@
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index d3f3a69d4907..dbc7ba0ee72c 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -17,7 +17,6 @@
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
@@ -259,7 +258,6 @@ static void vf610_gpio_disable_clk(void *data)
static int vf610_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct vf610_gpio_port *port;
struct gpio_chip *gc;
struct gpio_irq_chip *girq;
@@ -319,7 +317,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc->parent = dev;
gc->label = dev_name(dev);
gc->ngpio = VF610_GPIO_PER_PORT;
- gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT;
+ gc->base = -1;
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 69713fd5485b..8fd6c3913d69 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -240,8 +240,6 @@ static int vx855gpio_probe(struct platform_device *pdev)
if (!vg)
return -ENOMEM;
- platform_set_drvdata(pdev, vg);
-
dev_info(&pdev->dev, "found VX855 GPIO controller\n");
vg->io_gpi = res_gpi->start;
vg->io_gpo = res_gpo->start;
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 817750e4e033..2bba27b13947 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019, Linaro Limited
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/of_device.h>
#define WCD_PIN_MASK(p) BIT(p)
#define WCD_REG_DIR_CTL_OFFSET 0x42
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index afb42a8e916f..6289b0510cf2 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -3,19 +3,18 @@
* GPIO driver for the WinSystems WS16C48
* Copyright (C) 2016 William Breathitt Gray
*/
-#include <linux/bitmap.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/gpio/driver.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/irqdesc.h>
+#include <linux/err.h>
+#include <linux/gpio/regmap.h>
+#include <linux/irq.h>
#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
+#include <linux/regmap.h>
#include <linux/types.h>
#define WS16C48_EXTENT 11
@@ -31,371 +30,178 @@ static unsigned int num_irq;
module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "WinSystems WS16C48 interrupt line numbers");
-/**
- * struct ws16c48_reg - device register structure
- * @port: Port 0 through 5 I/O
- * @int_pending: Interrupt Pending
- * @page_lock: Register page (Bits 7-6) and I/O port lock (Bits 5-0)
- * @pol_enab_int_id: Interrupt polarity, enable, and ID
- */
-struct ws16c48_reg {
- u8 port[6];
- u8 int_pending;
- u8 page_lock;
- u8 pol_enab_int_id[3];
+#define WS16C48_DAT_BASE 0x0
+#define WS16C48_PAGE_LOCK 0x7
+#define WS16C48_PAGE_BASE 0x8
+#define WS16C48_POL WS16C48_PAGE_BASE
+#define WS16C48_ENAB WS16C48_PAGE_BASE
+#define WS16C48_INT_ID WS16C48_PAGE_BASE
+
+#define PAGE_LOCK_PAGE_FIELD GENMASK(7, 6)
+#define POL_PAGE u8_encode_bits(1, PAGE_LOCK_PAGE_FIELD)
+#define ENAB_PAGE u8_encode_bits(2, PAGE_LOCK_PAGE_FIELD)
+#define INT_ID_PAGE u8_encode_bits(3, PAGE_LOCK_PAGE_FIELD)
+
+static const struct regmap_range ws16c48_wr_ranges[] = {
+ regmap_reg_range(0x0, 0x5), regmap_reg_range(0x7, 0xA),
+};
+static const struct regmap_range ws16c48_rd_ranges[] = {
+ regmap_reg_range(0x0, 0xA),
+};
+static const struct regmap_range ws16c48_volatile_ranges[] = {
+ regmap_reg_range(0x0, 0x6), regmap_reg_range(0x8, 0xA),
+};
+static const struct regmap_access_table ws16c48_wr_table = {
+ .yes_ranges = ws16c48_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ws16c48_wr_ranges),
+};
+static const struct regmap_access_table ws16c48_rd_table = {
+ .yes_ranges = ws16c48_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ws16c48_rd_ranges),
+};
+static const struct regmap_access_table ws16c48_volatile_table = {
+ .yes_ranges = ws16c48_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ws16c48_volatile_ranges),
+};
+static const struct regmap_config ws16c48_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .io_port = true,
+ .wr_table = &ws16c48_wr_table,
+ .rd_table = &ws16c48_rd_table,
+ .volatile_table = &ws16c48_volatile_table,
+ .cache_type = REGCACHE_FLAT,
+ .use_raw_spinlock = true,
+};
+
+#define WS16C48_NGPIO_PER_REG 8
+#define WS16C48_REGMAP_IRQ(_id) \
+ [_id] = { \
+ .reg_offset = (_id) / WS16C48_NGPIO_PER_REG, \
+ .mask = BIT((_id) % WS16C48_NGPIO_PER_REG), \
+ .type = { \
+ .type_reg_offset = (_id) / WS16C48_NGPIO_PER_REG, \
+ .types_supported = IRQ_TYPE_EDGE_BOTH, \
+ }, \
+ }
+
+/* Only the first 24 lines (Port 0-2) support interrupts */
+#define WS16C48_NUM_IRQS 24
+static const struct regmap_irq ws16c48_regmap_irqs[WS16C48_NUM_IRQS] = {
+ WS16C48_REGMAP_IRQ(0), WS16C48_REGMAP_IRQ(1), WS16C48_REGMAP_IRQ(2), /* 0-2 */
+ WS16C48_REGMAP_IRQ(3), WS16C48_REGMAP_IRQ(4), WS16C48_REGMAP_IRQ(5), /* 3-5 */
+ WS16C48_REGMAP_IRQ(6), WS16C48_REGMAP_IRQ(7), WS16C48_REGMAP_IRQ(8), /* 6-8 */
+ WS16C48_REGMAP_IRQ(9), WS16C48_REGMAP_IRQ(10), WS16C48_REGMAP_IRQ(11), /* 9-11 */
+ WS16C48_REGMAP_IRQ(12), WS16C48_REGMAP_IRQ(13), WS16C48_REGMAP_IRQ(14), /* 12-14 */
+ WS16C48_REGMAP_IRQ(15), WS16C48_REGMAP_IRQ(16), WS16C48_REGMAP_IRQ(17), /* 15-17 */
+ WS16C48_REGMAP_IRQ(18), WS16C48_REGMAP_IRQ(19), WS16C48_REGMAP_IRQ(20), /* 18-20 */
+ WS16C48_REGMAP_IRQ(21), WS16C48_REGMAP_IRQ(22), WS16C48_REGMAP_IRQ(23), /* 21-23 */
};
/**
* struct ws16c48_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @io_state: bit I/O state (whether bit is set to input or output)
- * @out_state: output bits state
+ * @map: regmap for the device
* @lock: synchronization lock to prevent I/O race conditions
* @irq_mask: I/O bits affected by interrupts
- * @flow_mask: IRQ flow type mask for the respective I/O bits
- * @reg: I/O address offset for the device registers
*/
struct ws16c48_gpio {
- struct gpio_chip chip;
- unsigned char io_state[6];
- unsigned char out_state[6];
+ struct regmap *map;
raw_spinlock_t lock;
- unsigned long irq_mask;
- unsigned long flow_mask;
- struct ws16c48_reg __iomem *reg;
+ u8 irq_mask[WS16C48_NUM_IRQS / WS16C48_NGPIO_PER_REG];
};
-static int ws16c48_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+static int ws16c48_handle_pre_irq(void *const irq_drv_data) __acquires(&ws16c48gpio->lock)
{
- struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- const unsigned port = offset / 8;
- const unsigned mask = BIT(offset % 8);
+ struct ws16c48_gpio *const ws16c48gpio = irq_drv_data;
- if (ws16c48gpio->io_state[port] & mask)
- return GPIO_LINE_DIRECTION_IN;
-
- return GPIO_LINE_DIRECTION_OUT;
-}
-
-static int ws16c48_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- const unsigned port = offset / 8;
- const unsigned mask = BIT(offset % 8);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
-
- ws16c48gpio->io_state[port] |= mask;
- ws16c48gpio->out_state[port] &= ~mask;
- iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->reg->port + port);
-
- raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ /* Lock to prevent Page/Lock register change while we handle IRQ */
+ raw_spin_lock(&ws16c48gpio->lock);
return 0;
}
-static int ws16c48_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
+static int ws16c48_handle_post_irq(void *const irq_drv_data) __releases(&ws16c48gpio->lock)
{
- struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- const unsigned port = offset / 8;
- const unsigned mask = BIT(offset % 8);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ struct ws16c48_gpio *const ws16c48gpio = irq_drv_data;
- ws16c48gpio->io_state[port] &= ~mask;
- if (value)
- ws16c48gpio->out_state[port] |= mask;
- else
- ws16c48gpio->out_state[port] &= ~mask;
- iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->reg->port + port);
-
- raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ raw_spin_unlock(&ws16c48gpio->lock);
return 0;
}
-static int ws16c48_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- const unsigned port = offset / 8;
- const unsigned mask = BIT(offset % 8);
- unsigned long flags;
- unsigned port_state;
-
- raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
-
- /* ensure that GPIO is set for input */
- if (!(ws16c48gpio->io_state[port] & mask)) {
- raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
- return -EINVAL;
- }
-
- port_state = ioread8(ws16c48gpio->reg->port + port);
-
- raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
-
- return !!(port_state & mask);
-}
-
-static int ws16c48_gpio_get_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
-{
- struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- size_t index;
- u8 __iomem *port_addr;
- unsigned long port_state;
-
- /* clear bits array to a clean slate */
- bitmap_zero(bits, chip->ngpio);
-
- for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
- index = offset / 8;
- port_addr = ws16c48gpio->reg->port + index;
- port_state = ioread8(port_addr) & gpio_mask;
-
- bitmap_set_value8(bits, port_state, offset);
- }
-
- return 0;
-}
-
-static void ws16c48_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- const unsigned port = offset / 8;
- const unsigned mask = BIT(offset % 8);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
-
- /* ensure that GPIO is set for output */
- if (ws16c48gpio->io_state[port] & mask) {
- raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
- return;
- }
-
- if (value)
- ws16c48gpio->out_state[port] |= mask;
- else
- ws16c48gpio->out_state[port] &= ~mask;
- iowrite8(ws16c48gpio->out_state[port], ws16c48gpio->reg->port + port);
-
- raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
-}
-
-static void ws16c48_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
-{
- struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- unsigned long offset;
- unsigned long gpio_mask;
- size_t index;
- u8 __iomem *port_addr;
- unsigned long bitmask;
- unsigned long flags;
-
- for_each_set_clump8(offset, gpio_mask, mask, chip->ngpio) {
- index = offset / 8;
- port_addr = ws16c48gpio->reg->port + index;
-
- /* mask out GPIO configured for input */
- gpio_mask &= ~ws16c48gpio->io_state[index];
- bitmask = bitmap_get_value8(bits, offset) & gpio_mask;
-
- raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
-
- /* update output state data and set device gpio register */
- ws16c48gpio->out_state[index] &= ~gpio_mask;
- ws16c48gpio->out_state[index] |= bitmask;
- iowrite8(ws16c48gpio->out_state[index], port_addr);
-
- raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
- }
-}
-
-static void ws16c48_irq_ack(struct irq_data *data)
+static int ws16c48_handle_mask_sync(const int index, const unsigned int mask_buf_def,
+ const unsigned int mask_buf, void *const irq_drv_data)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- const unsigned long offset = irqd_to_hwirq(data);
- const unsigned port = offset / 8;
- const unsigned mask = BIT(offset % 8);
+ struct ws16c48_gpio *const ws16c48gpio = irq_drv_data;
unsigned long flags;
- unsigned port_state;
-
- /* only the first 3 ports support interrupts */
- if (port > 2)
- return;
+ int ret = 0;
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
- port_state = ws16c48gpio->irq_mask >> (8*port);
+ /* exit early if no change since the last mask sync */
+ if (mask_buf == ws16c48gpio->irq_mask[index])
+ goto exit_unlock;
+ ws16c48gpio->irq_mask[index] = mask_buf;
- /* Select Register Page 2; Unlock all I/O ports */
- iowrite8(0x80, &ws16c48gpio->reg->page_lock);
+ ret = regmap_write(ws16c48gpio->map, WS16C48_PAGE_LOCK, ENAB_PAGE);
+ if (ret)
+ goto exit_unlock;
- /* Clear pending interrupt */
- iowrite8(port_state & ~mask, ws16c48gpio->reg->pol_enab_int_id + port);
- iowrite8(port_state | mask, ws16c48gpio->reg->pol_enab_int_id + port);
+ /* Update ENAB register (inverted mask) */
+ ret = regmap_write(ws16c48gpio->map, WS16C48_ENAB + index, ~mask_buf);
+ if (ret)
+ goto exit_unlock;
- /* Select Register Page 3; Unlock all I/O ports */
- iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
+ ret = regmap_write(ws16c48gpio->map, WS16C48_PAGE_LOCK, INT_ID_PAGE);
+ if (ret)
+ goto exit_unlock;
+exit_unlock:
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
-}
-
-static void ws16c48_irq_mask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- const unsigned long offset = irqd_to_hwirq(data);
- const unsigned long mask = BIT(offset);
- const unsigned port = offset / 8;
- unsigned long flags;
- unsigned long port_state;
-
- /* only the first 3 ports support interrupts */
- if (port > 2)
- return;
-
- raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
- ws16c48gpio->irq_mask &= ~mask;
- gpiochip_disable_irq(chip, offset);
- port_state = ws16c48gpio->irq_mask >> (8 * port);
-
- /* Select Register Page 2; Unlock all I/O ports */
- iowrite8(0x80, &ws16c48gpio->reg->page_lock);
-
- /* Disable interrupt */
- iowrite8(port_state, ws16c48gpio->reg->pol_enab_int_id + port);
-
- /* Select Register Page 3; Unlock all I/O ports */
- iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
-
- raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
+ return ret;
}
-static void ws16c48_irq_unmask(struct irq_data *data)
+static int ws16c48_set_type_config(unsigned int **const buf, const unsigned int type,
+ const struct regmap_irq *const irq_data, const int idx,
+ void *const irq_drv_data)
{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- const unsigned long offset = irqd_to_hwirq(data);
- const unsigned long mask = BIT(offset);
- const unsigned port = offset / 8;
+ struct ws16c48_gpio *const ws16c48gpio = irq_drv_data;
+ unsigned int polarity;
unsigned long flags;
- unsigned long port_state;
-
- /* only the first 3 ports support interrupts */
- if (port > 2)
- return;
-
- raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
-
- gpiochip_enable_irq(chip, offset);
- ws16c48gpio->irq_mask |= mask;
- port_state = ws16c48gpio->irq_mask >> (8 * port);
-
- /* Select Register Page 2; Unlock all I/O ports */
- iowrite8(0x80, &ws16c48gpio->reg->page_lock);
-
- /* Enable interrupt */
- iowrite8(port_state, ws16c48gpio->reg->pol_enab_int_id + port);
-
- /* Select Register Page 3; Unlock all I/O ports */
- iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
-
- raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
-}
+ int ret;
-static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(chip);
- const unsigned long offset = irqd_to_hwirq(data);
- const unsigned long mask = BIT(offset);
- const unsigned port = offset / 8;
- unsigned long flags;
- unsigned long port_state;
-
- /* only the first 3 ports support interrupts */
- if (port > 2)
- return -EINVAL;
-
- raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
-
- switch (flow_type) {
- case IRQ_TYPE_NONE:
- break;
+ switch (type) {
case IRQ_TYPE_EDGE_RISING:
- ws16c48gpio->flow_mask |= mask;
+ polarity = irq_data->mask;
break;
case IRQ_TYPE_EDGE_FALLING:
- ws16c48gpio->flow_mask &= ~mask;
+ polarity = 0;
break;
default:
- raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
return -EINVAL;
}
- port_state = ws16c48gpio->flow_mask >> (8 * port);
+ raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
- /* Select Register Page 1; Unlock all I/O ports */
- iowrite8(0x40, &ws16c48gpio->reg->page_lock);
+ ret = regmap_write(ws16c48gpio->map, WS16C48_PAGE_LOCK, POL_PAGE);
+ if (ret)
+ goto exit_unlock;
/* Set interrupt polarity */
- iowrite8(port_state, ws16c48gpio->reg->pol_enab_int_id + port);
+ ret = regmap_update_bits(ws16c48gpio->map, WS16C48_POL + idx, irq_data->mask, polarity);
+ if (ret)
+ goto exit_unlock;
- /* Select Register Page 3; Unlock all I/O ports */
- iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
+ ret = regmap_write(ws16c48gpio->map, WS16C48_PAGE_LOCK, INT_ID_PAGE);
+ if (ret)
+ goto exit_unlock;
+exit_unlock:
raw_spin_unlock_irqrestore(&ws16c48gpio->lock, flags);
- return 0;
-}
-
-static const struct irq_chip ws16c48_irqchip = {
- .name = "ws16c48",
- .irq_ack = ws16c48_irq_ack,
- .irq_mask = ws16c48_irq_mask,
- .irq_unmask = ws16c48_irq_unmask,
- .irq_set_type = ws16c48_irq_set_type,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
-};
-
-static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
-{
- struct ws16c48_gpio *const ws16c48gpio = dev_id;
- struct gpio_chip *const chip = &ws16c48gpio->chip;
- struct ws16c48_reg __iomem *const reg = ws16c48gpio->reg;
- unsigned long int_pending;
- unsigned long port;
- unsigned long int_id;
- unsigned long gpio;
-
- int_pending = ioread8(&reg->int_pending) & 0x7;
- if (!int_pending)
- return IRQ_NONE;
-
- /* loop until all pending interrupts are handled */
- do {
- for_each_set_bit(port, &int_pending, 3) {
- int_id = ioread8(reg->pol_enab_int_id + port);
- for_each_set_bit(gpio, &int_id, 8)
- generic_handle_domain_irq(chip->irq.domain,
- gpio + 8*port);
- }
-
- int_pending = ioread8(&reg->int_pending) & 0x7;
- } while (int_pending);
-
- return IRQ_HANDLED;
+ return ret;
}
#define WS16C48_NGPIO 48
@@ -414,30 +220,37 @@ static const char *ws16c48_names[WS16C48_NGPIO] = {
"Port 5 Bit 4", "Port 5 Bit 5", "Port 5 Bit 6", "Port 5 Bit 7"
};
-static int ws16c48_irq_init_hw(struct gpio_chip *gc)
+static int ws16c48_irq_init_hw(struct regmap *const map)
{
- struct ws16c48_gpio *const ws16c48gpio = gpiochip_get_data(gc);
+ int err;
- /* Select Register Page 2; Unlock all I/O ports */
- iowrite8(0x80, &ws16c48gpio->reg->page_lock);
+ err = regmap_write(map, WS16C48_PAGE_LOCK, ENAB_PAGE);
+ if (err)
+ return err;
/* Disable interrupts for all lines */
- iowrite8(0, &ws16c48gpio->reg->pol_enab_int_id[0]);
- iowrite8(0, &ws16c48gpio->reg->pol_enab_int_id[1]);
- iowrite8(0, &ws16c48gpio->reg->pol_enab_int_id[2]);
-
- /* Select Register Page 3; Unlock all I/O ports */
- iowrite8(0xC0, &ws16c48gpio->reg->page_lock);
+ err = regmap_write(map, WS16C48_ENAB + 0, 0x00);
+ if (err)
+ return err;
+ err = regmap_write(map, WS16C48_ENAB + 1, 0x00);
+ if (err)
+ return err;
+ err = regmap_write(map, WS16C48_ENAB + 2, 0x00);
+ if (err)
+ return err;
- return 0;
+ return regmap_write(map, WS16C48_PAGE_LOCK, INT_ID_PAGE);
}
static int ws16c48_probe(struct device *dev, unsigned int id)
{
struct ws16c48_gpio *ws16c48gpio;
const char *const name = dev_name(dev);
- struct gpio_irq_chip *girq;
int err;
+ struct gpio_regmap_config gpio_config = {};
+ void __iomem *regs;
+ struct regmap_irq_chip *chip;
+ struct regmap_irq_chip_data *chip_data;
ws16c48gpio = devm_kzalloc(dev, sizeof(*ws16c48gpio), GFP_KERNEL);
if (!ws16c48gpio)
@@ -449,50 +262,55 @@ static int ws16c48_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- ws16c48gpio->reg = devm_ioport_map(dev, base[id], WS16C48_EXTENT);
- if (!ws16c48gpio->reg)
+ regs = devm_ioport_map(dev, base[id], WS16C48_EXTENT);
+ if (!regs)
return -ENOMEM;
- ws16c48gpio->chip.label = name;
- ws16c48gpio->chip.parent = dev;
- ws16c48gpio->chip.owner = THIS_MODULE;
- ws16c48gpio->chip.base = -1;
- ws16c48gpio->chip.ngpio = WS16C48_NGPIO;
- ws16c48gpio->chip.names = ws16c48_names;
- ws16c48gpio->chip.get_direction = ws16c48_gpio_get_direction;
- ws16c48gpio->chip.direction_input = ws16c48_gpio_direction_input;
- ws16c48gpio->chip.direction_output = ws16c48_gpio_direction_output;
- ws16c48gpio->chip.get = ws16c48_gpio_get;
- ws16c48gpio->chip.get_multiple = ws16c48_gpio_get_multiple;
- ws16c48gpio->chip.set = ws16c48_gpio_set;
- ws16c48gpio->chip.set_multiple = ws16c48_gpio_set_multiple;
-
- girq = &ws16c48gpio->chip.irq;
- gpio_irq_chip_set_chip(girq, &ws16c48_irqchip);
- /* This will let us handle the parent IRQ in the driver */
- girq->parent_handler = NULL;
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_edge_irq;
- girq->init_hw = ws16c48_irq_init_hw;
+ ws16c48gpio->map = devm_regmap_init_mmio(dev, regs, &ws16c48_regmap_config);
+ if (IS_ERR(ws16c48gpio->map))
+ return dev_err_probe(dev, PTR_ERR(ws16c48gpio->map),
+ "Unable to initialize register map\n");
- raw_spin_lock_init(&ws16c48gpio->lock);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
- err = devm_gpiochip_add_data(dev, &ws16c48gpio->chip, ws16c48gpio);
- if (err) {
- dev_err(dev, "GPIO registering failed (%d)\n", err);
- return err;
- }
+ chip->name = name;
+ chip->status_base = WS16C48_INT_ID;
+ chip->mask_base = WS16C48_ENAB;
+ chip->ack_base = WS16C48_INT_ID;
+ chip->num_regs = 3;
+ chip->irqs = ws16c48_regmap_irqs;
+ chip->num_irqs = ARRAY_SIZE(ws16c48_regmap_irqs);
+ chip->handle_pre_irq = ws16c48_handle_pre_irq;
+ chip->handle_post_irq = ws16c48_handle_post_irq;
+ chip->handle_mask_sync = ws16c48_handle_mask_sync;
+ chip->set_type_config = ws16c48_set_type_config;
+ chip->irq_drv_data = ws16c48gpio;
- err = devm_request_irq(dev, irq[id], ws16c48_irq_handler, IRQF_SHARED,
- name, ws16c48gpio);
- if (err) {
- dev_err(dev, "IRQ handler registering failed (%d)\n", err);
+ raw_spin_lock_init(&ws16c48gpio->lock);
+
+ /* Initialize to prevent spurious interrupts before we're ready */
+ err = ws16c48_irq_init_hw(ws16c48gpio->map);
+ if (err)
return err;
- }
- return 0;
+ err = devm_regmap_add_irq_chip(dev, ws16c48gpio->map, irq[id], 0, 0, chip, &chip_data);
+ if (err)
+ return dev_err_probe(dev, err, "IRQ registration failed\n");
+
+ gpio_config.parent = dev;
+ gpio_config.regmap = ws16c48gpio->map;
+ gpio_config.ngpio = WS16C48_NGPIO;
+ gpio_config.names = ws16c48_names;
+ gpio_config.reg_dat_base = GPIO_REGMAP_ADDR(WS16C48_DAT_BASE);
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(WS16C48_DAT_BASE);
+ /* Setting a GPIO to 0 allows it to be used as an input */
+ gpio_config.reg_dir_out_base = GPIO_REGMAP_ADDR(WS16C48_DAT_BASE);
+ gpio_config.ngpio_per_reg = WS16C48_NGPIO_PER_REG;
+ gpio_config.irq_domain = regmap_irq_get_domain(chip_data);
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
static struct isa_driver ws16c48_driver = {
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 1fa66f2a667f..a16945e8319e 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -15,8 +15,8 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index bbc06cdd9634..dc2710c21c50 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -8,9 +8,9 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
#include <linux/seq_file.h>
#include <linux/spi/spi.h>
#include <linux/regmap.h>
@@ -194,7 +194,7 @@ static const struct spi_device_id xra1403_ids[] = {
};
MODULE_DEVICE_TABLE(spi, xra1403_ids);
-static const struct of_device_id xra1403_spi_of_match[] __maybe_unused = {
+static const struct of_device_id xra1403_spi_of_match[] = {
{ .compatible = "exar,xra1403" },
{},
};
@@ -205,7 +205,7 @@ static struct spi_driver xra1403_driver = {
.id_table = xra1403_ids,
.driver = {
.name = "xra1403",
- .of_match_table = of_match_ptr(xra1403_spi_of_match),
+ .of_match_table = xra1403_spi_of_match,
},
};
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index f0f571b323f2..2de61337ad3b 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -176,8 +176,6 @@ static int zevio_gpio_probe(struct platform_device *pdev)
if (!controller)
return -ENOMEM;
- platform_set_drvdata(pdev, controller);
-
/* Copy our reference */
controller->chip = zevio_gpio_chip;
controller->chip.parent = &pdev->dev;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 97496c0f9133..fbda452fb4d6 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -128,7 +128,7 @@ static bool acpi_gpio_deferred_req_irqs_done;
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{
- return ACPI_HANDLE_FWNODE(gc->fwnode) == data;
+ return device_match_acpi_handle(&gc->gpiodev->dev, data);
}
/**
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index 0a33971c964c..e39d344feb28 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -230,9 +230,7 @@ static long linehandle_set_config(struct linehandle_state *lh,
return ret;
}
- blocking_notifier_call_chain(&desc->gdev->notifier,
- GPIO_V2_LINE_CHANGED_CONFIG,
- desc);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
}
return 0;
}
@@ -414,8 +412,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
goto out_free_lh;
}
- blocking_notifier_call_chain(&desc->gdev->notifier,
- GPIO_V2_LINE_CHANGED_REQUESTED, desc);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
offset);
@@ -555,6 +552,7 @@ struct line {
* @label: consumer label used to tag GPIO descriptors
* @num_lines: the number of lines in the lines array
* @wait: wait queue that handles blocking reads of events
+ * @device_unregistered_nb: notifier block for receiving gdev unregister events
* @event_buffer_size: the number of elements allocated in @events
* @events: KFIFO for the GPIO events
* @seqno: the sequence number for edge events generated on all lines in
@@ -569,6 +567,7 @@ struct linereq {
const char *label;
u32 num_lines;
wait_queue_head_t wait;
+ struct notifier_block device_unregistered_nb;
u32 event_buffer_size;
DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
atomic_t seqno;
@@ -610,6 +609,17 @@ struct linereq {
GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
GPIO_V2_LINE_EDGE_FLAGS)
+static int linereq_unregistered_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct linereq *lr = container_of(nb, struct linereq,
+ device_unregistered_nb);
+
+ wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR);
+
+ return NOTIFY_OK;
+}
+
static void linereq_put_event(struct linereq *lr,
struct gpio_v2_line_event *le)
{
@@ -1407,9 +1417,7 @@ static long linereq_set_config_unlocked(struct linereq *lr,
WRITE_ONCE(line->edflags, edflags);
- blocking_notifier_call_chain(&desc->gdev->notifier,
- GPIO_V2_LINE_CHANGED_CONFIG,
- desc);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
}
return 0;
}
@@ -1567,6 +1575,10 @@ static void linereq_free(struct linereq *lr)
{
unsigned int i;
+ if (lr->device_unregistered_nb.notifier_call)
+ blocking_notifier_chain_unregister(&lr->gdev->device_notifier,
+ &lr->device_unregistered_nb);
+
for (i = 0; i < lr->num_lines; i++) {
if (lr->lines[i].desc) {
edge_detector_stop(&lr->lines[i]);
@@ -1720,13 +1732,18 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
lr->lines[i].edflags = edflags;
- blocking_notifier_call_chain(&desc->gdev->notifier,
- GPIO_V2_LINE_CHANGED_REQUESTED, desc);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
offset);
}
+ lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify;
+ ret = blocking_notifier_chain_register(&gdev->device_notifier,
+ &lr->device_unregistered_nb);
+ if (ret)
+ goto out_free_linereq;
+
fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
if (fd < 0) {
ret = fd;
@@ -1779,6 +1796,7 @@ out_free_linereq:
* @eflags: the event flags this line was requested with
* @irq: the interrupt that trigger in response to events on this GPIO
* @wait: wait queue that handles blocking reads of events
+ * @device_unregistered_nb: notifier block for receiving gdev unregister events
* @events: KFIFO for the GPIO events
* @timestamp: cache for the timestamp storing it between hardirq
* and IRQ thread, used to bring the timestamp close to the actual
@@ -1791,6 +1809,7 @@ struct lineevent_state {
u32 eflags;
int irq;
wait_queue_head_t wait;
+ struct notifier_block device_unregistered_nb;
DECLARE_KFIFO(events, struct gpioevent_data, 16);
u64 timestamp;
};
@@ -1824,6 +1843,17 @@ static __poll_t lineevent_poll(struct file *file,
return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked);
}
+static int lineevent_unregistered_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct lineevent_state *le = container_of(nb, struct lineevent_state,
+ device_unregistered_nb);
+
+ wake_up_poll(&le->wait, EPOLLIN | EPOLLERR);
+
+ return NOTIFY_OK;
+}
+
struct compat_gpioeevent_data {
compat_u64 timestamp;
u32 id;
@@ -1909,6 +1939,9 @@ static ssize_t lineevent_read(struct file *file, char __user *buf,
static void lineevent_free(struct lineevent_state *le)
{
+ if (le->device_unregistered_nb.notifier_call)
+ blocking_notifier_chain_unregister(&le->gdev->device_notifier,
+ &le->device_unregistered_nb);
if (le->irq)
free_irq(le->irq, le);
if (le->desc)
@@ -2117,8 +2150,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_le;
- blocking_notifier_call_chain(&desc->gdev->notifier,
- GPIO_V2_LINE_CHANGED_REQUESTED, desc);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
irq = gpiod_to_irq(desc);
if (irq <= 0) {
@@ -2137,6 +2169,12 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
INIT_KFIFO(le->events);
init_waitqueue_head(&le->wait);
+ le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify;
+ ret = blocking_notifier_chain_register(&gdev->device_notifier,
+ &le->device_unregistered_nb);
+ if (ret)
+ goto out_free_le;
+
/* Request a thread to read the events */
ret = request_threaded_irq(irq,
lineevent_irq_handler,
@@ -2320,6 +2358,7 @@ struct gpio_chardev_data {
wait_queue_head_t wait;
DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
struct notifier_block lineinfo_changed_nb;
+ struct notifier_block device_unregistered_nb;
unsigned long *watched_lines;
#ifdef CONFIG_GPIO_CDEV_V1
atomic_t watch_abi_version;
@@ -2491,16 +2530,11 @@ static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
}
#endif
-static struct gpio_chardev_data *
-to_gpio_chardev_data(struct notifier_block *nb)
-{
- return container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
-}
-
static int lineinfo_changed_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct gpio_chardev_data *cdev = to_gpio_chardev_data(nb);
+ struct gpio_chardev_data *cdev =
+ container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
struct gpio_v2_line_info_changed chg;
struct gpio_desc *desc = data;
int ret;
@@ -2522,6 +2556,18 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
return NOTIFY_OK;
}
+static int gpio_device_unregistered_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct gpio_chardev_data *cdev = container_of(nb,
+ struct gpio_chardev_data,
+ device_unregistered_nb);
+
+ wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR);
+
+ return NOTIFY_OK;
+}
+
static __poll_t lineinfo_watch_poll_unlocked(struct file *file,
struct poll_table_struct *pollt)
{
@@ -2671,23 +2717,33 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
cdev->gdev = gpio_device_get(gdev);
cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
- ret = blocking_notifier_chain_register(&gdev->notifier,
+ ret = blocking_notifier_chain_register(&gdev->line_state_notifier,
&cdev->lineinfo_changed_nb);
if (ret)
goto out_free_bitmap;
+ cdev->device_unregistered_nb.notifier_call =
+ gpio_device_unregistered_notify;
+ ret = blocking_notifier_chain_register(&gdev->device_notifier,
+ &cdev->device_unregistered_nb);
+ if (ret)
+ goto out_unregister_line_notifier;
+
file->private_data = cdev;
ret = nonseekable_open(inode, file);
if (ret)
- goto out_unregister_notifier;
+ goto out_unregister_device_notifier;
up_read(&gdev->sem);
return ret;
-out_unregister_notifier:
- blocking_notifier_chain_unregister(&gdev->notifier,
+out_unregister_device_notifier:
+ blocking_notifier_chain_unregister(&gdev->device_notifier,
+ &cdev->device_unregistered_nb);
+out_unregister_line_notifier:
+ blocking_notifier_chain_unregister(&gdev->line_state_notifier,
&cdev->lineinfo_changed_nb);
out_free_bitmap:
gpio_device_put(gdev);
@@ -2711,7 +2767,9 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file)
struct gpio_device *gdev = cdev->gdev;
bitmap_free(cdev->watched_lines);
- blocking_notifier_chain_unregister(&gdev->notifier,
+ blocking_notifier_chain_unregister(&gdev->device_notifier,
+ &cdev->device_unregistered_nb);
+ blocking_notifier_chain_unregister(&gdev->line_state_notifier,
&cdev->lineinfo_changed_nb);
gpio_device_put(gdev);
kfree(cdev);
@@ -2753,4 +2811,5 @@ int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
void gpiolib_cdev_unregister(struct gpio_device *gdev)
{
cdev_device_del(&gdev->chrdev, &gdev->dev);
+ blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 1436cdb5fa26..531faabead0f 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -209,6 +209,8 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
const char *propname,
enum of_gpio_flags *flags)
{
+ const struct device_node *np_compat = np;
+ const struct device_node *np_propname = np;
static const struct {
const char *compatible;
const char *gpio_propname;
@@ -253,14 +255,28 @@ static void of_gpio_set_polarity_by_property(const struct device_node *np,
{ "regulator-gpio", "enable-gpio", "enable-active-high" },
{ "regulator-gpio", "enable-gpios", "enable-active-high" },
#endif
+#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
+ { "atmel,hsmci", "cd-gpios", "cd-inverted" },
+#endif
};
unsigned int i;
bool active_high;
+#if IS_ENABLED(CONFIG_MMC_ATMELMCI)
+ /*
+ * The Atmel HSMCI has compatible property in the parent node and
+ * gpio property in a child node
+ */
+ if (of_device_is_compatible(np->parent, "atmel,hsmci")) {
+ np_compat = np->parent;
+ np_propname = np;
+ }
+#endif
+
for (i = 0; i < ARRAY_SIZE(gpios); i++) {
- if (of_device_is_compatible(np, gpios[i].compatible) &&
+ if (of_device_is_compatible(np_compat, gpios[i].compatible) &&
!strcmp(propname, gpios[i].gpio_propname)) {
- active_high = of_property_read_bool(np,
+ active_high = of_property_read_bool(np_propname,
gpios[i].polarity_propname);
of_gpio_quirk_polarity(np, active_high, flags);
break;
@@ -1078,16 +1094,16 @@ int of_gpiochip_add(struct gpio_chip *chip)
if (ret)
return ret;
- fwnode_handle_get(chip->fwnode);
+ of_node_get(np);
ret = of_gpiochip_scan_gpios(chip);
if (ret)
- fwnode_handle_put(chip->fwnode);
+ of_node_put(np);
return ret;
}
void of_gpiochip_remove(struct gpio_chip *chip)
{
- fwnode_handle_put(chip->fwnode);
+ of_node_put(dev_of_node(&chip->gpiodev->dev));
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 76e0c38026c3..40a0022ea719 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -700,6 +700,40 @@ void *gpiochip_get_data(struct gpio_chip *gc)
}
EXPORT_SYMBOL_GPL(gpiochip_get_data);
+int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev)
+{
+ u32 ngpios = gc->ngpio;
+ int ret;
+
+ if (ngpios == 0) {
+ ret = device_property_read_u32(dev, "ngpios", &ngpios);
+ if (ret == -ENODATA)
+ /*
+ * -ENODATA means that there is no property found and
+ * we want to issue the error message to the user.
+ * Besides that, we want to return different error code
+ * to state that supplied value is not valid.
+ */
+ ngpios = 0;
+ else if (ret)
+ return ret;
+
+ gc->ngpio = ngpios;
+ }
+
+ if (gc->ngpio == 0) {
+ chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
+ return -EINVAL;
+ }
+
+ if (gc->ngpio > FASTPATH_NGPIO)
+ chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
+ gc->ngpio, FASTPATH_NGPIO);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gpiochip_get_ngpios);
+
int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
@@ -707,18 +741,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
struct gpio_device *gdev;
unsigned long flags;
unsigned int i;
- u32 ngpios = 0;
int base = 0;
int ret = 0;
/*
- * If the calling driver did not initialize firmware node, do it here
- * using the parent device, if any.
- */
- if (!gc->fwnode && gc->parent)
- gc->fwnode = dev_fwnode(gc->parent);
-
- /*
* First: allocate and populate the internal stat container, and
* set up the struct device.
*/
@@ -732,7 +758,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gc->gpiodev = gdev;
gpiochip_set_data(gc, data);
- device_set_node(&gdev->dev, gc->fwnode);
+ /*
+ * If the calling driver did not initialize firmware node,
+ * do it here using the parent device, if any.
+ */
+ if (gc->fwnode)
+ device_set_node(&gdev->dev, gc->fwnode);
+ else if (gc->parent)
+ device_set_node(&gdev->dev, dev_fwnode(gc->parent));
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
if (gdev->id < 0) {
@@ -753,36 +786,9 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
else
gdev->owner = THIS_MODULE;
- /*
- * Try the device properties if the driver didn't supply the number
- * of GPIO lines.
- */
- ngpios = gc->ngpio;
- if (ngpios == 0) {
- ret = device_property_read_u32(&gdev->dev, "ngpios", &ngpios);
- if (ret == -ENODATA)
- /*
- * -ENODATA means that there is no property found and
- * we want to issue the error message to the user.
- * Besides that, we want to return different error code
- * to state that supplied value is not valid.
- */
- ngpios = 0;
- else if (ret)
- goto err_free_dev_name;
-
- gc->ngpio = ngpios;
- }
-
- if (gc->ngpio == 0) {
- chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
- ret = -EINVAL;
+ ret = gpiochip_get_ngpios(gc, &gdev->dev);
+ if (ret)
goto err_free_dev_name;
- }
-
- if (gc->ngpio > FASTPATH_NGPIO)
- chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
- gc->ngpio, FASTPATH_NGPIO);
gdev->descs = kcalloc(gc->ngpio, sizeof(*gdev->descs), GFP_KERNEL);
if (!gdev->descs) {
@@ -841,7 +847,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
spin_unlock_irqrestore(&gpio_lock, flags);
- BLOCKING_INIT_NOTIFIER_HEAD(&gdev->notifier);
+ BLOCKING_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier);
+ BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier);
init_rwsem(&gdev->sem);
#ifdef CONFIG_PINCTRL
@@ -947,7 +954,7 @@ err_print_message:
/* failures here can mean systems won't boot... */
if (ret != -EPROBE_DEFER) {
pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
- base, base + (int)ngpios - 1,
+ base, base + (int)gc->ngpio - 1,
gc->label ? : "generic", ret);
}
return ret;
@@ -1292,12 +1299,14 @@ static void gpiochip_hierarchy_setup_domain_ops(struct irq_domain_ops *ops)
ops->free = irq_domain_free_irqs_common;
}
-static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
+static struct irq_domain *gpiochip_hierarchy_create_domain(struct gpio_chip *gc)
{
+ struct irq_domain *domain;
+
if (!gc->irq.child_to_parent_hwirq ||
!gc->irq.fwnode) {
chip_err(gc, "missing irqdomain vital data\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if (!gc->irq.child_offset_to_irq)
@@ -1309,7 +1318,7 @@ static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
gpiochip_hierarchy_setup_domain_ops(&gc->irq.child_irq_domain_ops);
- gc->irq.domain = irq_domain_create_hierarchy(
+ domain = irq_domain_create_hierarchy(
gc->irq.parent_domain,
0,
gc->ngpio,
@@ -1317,12 +1326,12 @@ static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
&gc->irq.child_irq_domain_ops,
gc);
- if (!gc->irq.domain)
- return -ENOMEM;
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
gpiochip_set_hierarchical_irqchip(gc, gc->irq.chip);
- return 0;
+ return domain;
}
static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
@@ -1366,9 +1375,9 @@ EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_fourcell);
#else
-static int gpiochip_hierarchy_add_domain(struct gpio_chip *gc)
+static struct irq_domain *gpiochip_hierarchy_create_domain(struct gpio_chip *gc)
{
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc)
@@ -1445,6 +1454,19 @@ static const struct irq_domain_ops gpiochip_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
+static struct irq_domain *gpiochip_simple_create_domain(struct gpio_chip *gc)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(&gc->gpiodev->dev);
+ struct irq_domain *domain;
+
+ domain = irq_domain_create_simple(fwnode, gc->ngpio, gc->irq.first,
+ &gpiochip_domain_ops, gc);
+ if (!domain)
+ return ERR_PTR(-EINVAL);
+
+ return domain;
+}
+
/*
* TODO: move these activate/deactivate in under the hierarchicial
* irqchip implementation as static once SPMI and SSBI (all external
@@ -1623,6 +1645,31 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
}
}
+static int gpiochip_irqchip_add_allocated_domain(struct gpio_chip *gc,
+ struct irq_domain *domain,
+ bool allocated_externally)
+{
+ if (!domain)
+ return -EINVAL;
+
+ if (gc->to_irq)
+ chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
+
+ gc->to_irq = gpiochip_to_irq;
+ gc->irq.domain = domain;
+ gc->irq.domain_is_allocated_externally = allocated_externally;
+
+ /*
+ * Using barrier() here to prevent compiler from reordering
+ * gc->irq.initialized before adding irqdomain.
+ */
+ barrier();
+
+ gc->irq.initialized = true;
+
+ return 0;
+}
+
/**
* gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip
* @gc: the GPIO chip to add the IRQ chip to
@@ -1635,8 +1682,10 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
{
struct fwnode_handle *fwnode = dev_fwnode(&gc->gpiodev->dev);
struct irq_chip *irqchip = gc->irq.chip;
+ struct irq_domain *domain;
unsigned int type;
unsigned int i;
+ int ret;
if (!irqchip)
return 0;
@@ -1657,28 +1706,18 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
"%pfw: Ignoring %u default trigger\n", fwnode, type))
type = IRQ_TYPE_NONE;
- if (gc->to_irq)
- chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
-
- gc->to_irq = gpiochip_to_irq;
gc->irq.default_type = type;
gc->irq.lock_key = lock_key;
gc->irq.request_key = request_key;
/* If a parent irqdomain is provided, let's build a hierarchy */
if (gpiochip_hierarchy_is_hierarchical(gc)) {
- int ret = gpiochip_hierarchy_add_domain(gc);
- if (ret)
- return ret;
+ domain = gpiochip_hierarchy_create_domain(gc);
} else {
- gc->irq.domain = irq_domain_create_simple(fwnode,
- gc->ngpio,
- gc->irq.first,
- &gpiochip_domain_ops,
- gc);
- if (!gc->irq.domain)
- return -EINVAL;
+ domain = gpiochip_simple_create_domain(gc);
}
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
if (gc->irq.parent_handler) {
for (i = 0; i < gc->irq.num_parents; i++) {
@@ -1702,14 +1741,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
gpiochip_set_irq_hooks(gc);
- /*
- * Using barrier() here to prevent compiler from reordering
- * gc->irq.initialized before initialization of above
- * GPIO chip irq members.
- */
- barrier();
-
- gc->irq.initialized = true;
+ ret = gpiochip_irqchip_add_allocated_domain(gc, domain, false);
+ if (ret)
+ return ret;
acpi_gpiochip_request_interrupts(gc);
@@ -1780,22 +1814,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gc)
int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
struct irq_domain *domain)
{
- if (!domain)
- return -EINVAL;
-
- gc->to_irq = gpiochip_to_irq;
- gc->irq.domain = domain;
- gc->irq.domain_is_allocated_externally = true;
-
- /*
- * Using barrier() here to prevent compiler from reordering
- * gc->irq.initialized before adding irqdomain.
- */
- barrier();
-
- gc->irq.initialized = true;
-
- return 0;
+ return gpiochip_irqchip_add_allocated_domain(gc, domain, true);
}
EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_domain);
@@ -2159,8 +2178,7 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
}
spin_unlock_irqrestore(&gpio_lock, flags);
- blocking_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_RELEASED, desc);
+ gpiod_line_state_notify(desc, GPIOLINE_CHANGED_RELEASED);
return ret;
}
@@ -3728,6 +3746,12 @@ int gpiod_set_array_value_cansleep(unsigned int array_size,
}
EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep);
+void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action)
+{
+ blocking_notifier_call_chain(&desc->gdev->line_state_notifier,
+ action, desc);
+}
+
/**
* gpiod_add_lookup_table() - register GPIO device consumers
* @table: table of consumers to register
@@ -3995,8 +4019,7 @@ static struct gpio_desc *gpiod_find_and_request(struct device *consumer,
return ERR_PTR(ret);
}
- blocking_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_REQUESTED, desc);
+ gpiod_line_state_notify(desc, GPIOLINE_CHANGED_REQUESTED);
return desc;
}
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index cca81375f127..a0a67569300b 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -9,12 +9,13 @@
#ifndef GPIOLIB_H
#define GPIOLIB_H
-#include <linux/gpio/driver.h>
-#include <linux/gpio/consumer.h> /* for enum gpiod_flags */
-#include <linux/err.h>
+#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h> /* for enum gpiod_flags */
+#include <linux/gpio/driver.h>
#include <linux/module.h>
-#include <linux/cdev.h>
+#include <linux/notifier.h>
#include <linux/rwsem.h>
#define GPIOCHIP_NAME "gpiochip"
@@ -38,8 +39,10 @@
* or name of the IP component in a System on Chip.
* @data: per-instance data assigned by the driver
* @list: links gpio_device:s together for traversal
- * @notifier: used to notify subscribers about lines being requested, released
- * or reconfigured
+ * @line_state_notifier: used to notify subscribers about lines being
+ * requested, released or reconfigured
+ * @device_notifier: used to notify character device wait queues about the GPIO
+ * device being unregistered
* @sem: protects the structure from a NULL-pointer dereference of @chip by
* user-space operations when the device gets unregistered during
* a hot-unplug event
@@ -63,7 +66,8 @@ struct gpio_device {
const char *label;
void *data;
struct list_head list;
- struct blocking_notifier_head notifier;
+ struct blocking_notifier_head line_state_notifier;
+ struct blocking_notifier_head device_notifier;
struct rw_semaphore sem;
#ifdef CONFIG_PINCTRL
@@ -143,6 +147,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
extern spinlock_t gpio_lock;
extern struct list_head gpio_devices;
+void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action);
/**
* struct gpio_desc - Opaque descriptor for a GPIO
@@ -217,6 +222,7 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
int gpiod_hog(struct gpio_desc *desc, const char *name,
unsigned long lflags, enum gpiod_flags dflags);
+int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev);
/*
* Return the GPIO number of the passed descriptor relative to its chip
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index 043b8109e64a..73ec60757dbc 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -1386,6 +1386,18 @@ static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
disable_irq(dsi->irq);
}
+static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable)
+{
+ u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
+
+ if (enable)
+ reg |= DSIM_FORCE_STOP_STATE;
+ else
+ reg &= ~DSIM_FORCE_STOP_STATE;
+
+ samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
+}
+
static int samsung_dsim_init(struct samsung_dsim *dsi)
{
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
@@ -1445,15 +1457,12 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
- u32 reg;
if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
samsung_dsim_set_display_mode(dsi);
samsung_dsim_set_display_enable(dsi, true);
} else {
- reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
- reg &= ~DSIM_FORCE_STOP_STATE;
- samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
+ samsung_dsim_set_stop_state(dsi, false);
}
dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
@@ -1463,16 +1472,12 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct samsung_dsim *dsi = bridge_to_dsi(bridge);
- u32 reg;
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
- if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
- reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
- reg |= DSIM_FORCE_STOP_STATE;
- samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
- }
+ if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
+ samsung_dsim_set_stop_state(dsi, true);
dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
}
@@ -1775,6 +1780,8 @@ static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
if (ret)
return ret;
+ samsung_dsim_set_stop_state(dsi, false);
+
ret = mipi_dsi_create_packet(&xfer.packet, msg);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 2fb9bf901a2c..3f479483d7d8 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -262,6 +262,26 @@ static bool drm_kms_helper_enable_hpd(struct drm_device *dev)
}
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
+static void reschedule_output_poll_work(struct drm_device *dev)
+{
+ unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
+
+ if (dev->mode_config.delayed_event)
+ /*
+ * FIXME:
+ *
+ * Use short (1s) delay to handle the initial delayed event.
+ * This delay should not be needed, but Optimus/nouveau will
+ * fail in a mysterious way if the delayed event is handled as
+ * soon as possible like it is done in
+ * drm_helper_probe_single_connector_modes() in case the poll
+ * was enabled before.
+ */
+ delay = HZ;
+
+ schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
+}
+
/**
* drm_kms_helper_poll_enable - re-enable output polling.
* @dev: drm_device
@@ -279,37 +299,41 @@ static bool drm_kms_helper_enable_hpd(struct drm_device *dev)
*/
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
- bool poll = false;
- unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
-
if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll ||
dev->mode_config.poll_running)
return;
- poll = drm_kms_helper_enable_hpd(dev);
-
- if (dev->mode_config.delayed_event) {
- /*
- * FIXME:
- *
- * Use short (1s) delay to handle the initial delayed event.
- * This delay should not be needed, but Optimus/nouveau will
- * fail in a mysterious way if the delayed event is handled as
- * soon as possible like it is done in
- * drm_helper_probe_single_connector_modes() in case the poll
- * was enabled before.
- */
- poll = true;
- delay = HZ;
- }
-
- if (poll)
- schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
+ if (drm_kms_helper_enable_hpd(dev) ||
+ dev->mode_config.delayed_event)
+ reschedule_output_poll_work(dev);
dev->mode_config.poll_running = true;
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
+/**
+ * drm_kms_helper_poll_reschedule - reschedule the output polling work
+ * @dev: drm_device
+ *
+ * This function reschedules the output polling work, after polling for a
+ * connector has been enabled.
+ *
+ * Drivers must call this helper after enabling polling for a connector by
+ * setting %DRM_CONNECTOR_POLL_CONNECT / %DRM_CONNECTOR_POLL_DISCONNECT flags
+ * in drm_connector::polled. Note that after disabling polling by clearing these
+ * flags for a connector will stop the output polling work automatically if
+ * the polling is disabled for all other connectors as well.
+ *
+ * The function can be called only after polling has been enabled by calling
+ * drm_kms_helper_poll_init() / drm_kms_helper_poll_enable().
+ */
+void drm_kms_helper_poll_reschedule(struct drm_device *dev)
+{
+ if (dev->mode_config.poll_running)
+ reschedule_output_poll_work(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_poll_reschedule);
+
static enum drm_connector_status
drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force)
{
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 1160fa20433b..5eac7032bb5a 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -211,7 +211,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
- drm_kms_helper_poll_enable(&dev_priv->drm);
+ drm_kms_helper_poll_reschedule(&dev_priv->drm);
mod_delayed_work(dev_priv->unordered_wq,
&dev_priv->display.hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
@@ -649,7 +649,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
drm_connector_list_iter_end(&conn_iter);
if (enabled)
- drm_kms_helper_poll_enable(&dev_priv->drm);
+ drm_kms_helper_poll_reschedule(&dev_priv->drm);
mutex_unlock(&dev_priv->drm.mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index ddd146265beb..fa70defcb5b2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -26,6 +26,7 @@
* The kernel driver is only responsible for loading the HuC firmware and
* triggering its security authentication. This is done differently depending
* on the platform:
+ *
* - older platforms (from Gen9 to most Gen12s): the load is performed via DMA
* and the authentication via GuC
* - DG2: load and authentication are both performed via GSC.
@@ -33,6 +34,7 @@
* not-DG2 older platforms), while the authentication is done in 2-steps,
* a first auth for clear-media workloads via GuC and a second one for all
* workloads via GSC.
+ *
* On platforms where the GuC does the authentication, to correctly do so the
* HuC binary must be loaded before the GuC one.
* Loading the HuC is optional; however, not using the HuC might negatively
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 0ad0c5885ec2..7d8671fdf447 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -443,7 +443,6 @@ static int i915_pcode_init(struct drm_i915_private *i915)
static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- struct pci_dev *root_pdev;
int ret;
if (i915_inject_probe_failure(dev_priv))
@@ -557,15 +556,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_bw_init_hw(dev_priv);
- /*
- * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
- * This should be totally removed when we handle the pci states properly
- * on runtime PM and on s2idle cases.
- */
- root_pdev = pcie_find_root_port(pdev);
- if (root_pdev)
- pci_d3cold_disable(root_pdev);
-
return 0;
err_opregion:
@@ -591,7 +581,6 @@ err_perf:
static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- struct pci_dev *root_pdev;
i915_perf_fini(dev_priv);
@@ -599,10 +588,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled)
pci_disable_msi(pdev);
-
- root_pdev = pcie_find_root_port(pdev);
- if (root_pdev)
- pci_d3cold_enable(root_pdev);
}
/**
@@ -1517,6 +1502,8 @@ static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *root_pdev;
struct intel_gt *gt;
int ret, i;
@@ -1568,6 +1555,15 @@ static int intel_runtime_suspend(struct device *kdev)
drm_err(&dev_priv->drm,
"Unclaimed access detected prior to suspending\n");
+ /*
+ * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
+ * This should be totally removed when we handle the pci states properly
+ * on runtime PM.
+ */
+ root_pdev = pcie_find_root_port(pdev);
+ if (root_pdev)
+ pci_d3cold_disable(root_pdev);
+
rpm->suspended = true;
/*
@@ -1606,6 +1602,8 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct pci_dev *root_pdev;
struct intel_gt *gt;
int ret, i;
@@ -1619,6 +1617,11 @@ static int intel_runtime_resume(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
rpm->suspended = false;
+
+ root_pdev = pcie_find_root_port(pdev);
+ if (root_pdev)
+ pci_d3cold_enable(root_pdev);
+
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
drm_dbg(&dev_priv->drm,
"Unclaimed access during suspend, bios?\n");
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 58dfb15a8757..e78de99e9933 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -96,7 +96,7 @@ static int panfrost_read_speedbin(struct device *dev)
* keep going without it; any other error means that we are
* supposed to read the bin value, but we failed doing so.
*/
- if (ret != -ENOENT) {
+ if (ret != -ENOENT && ret != -EOPNOTSUPP) {
DRM_DEV_ERROR(dev, "Cannot read speed-bin (%d).", ret);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 82094c137855..c43853597776 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -497,10 +497,9 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
if (!(flags & drm_vmw_synccpu_allow_cs)) {
atomic_dec(&vmw_bo->cpu_writers);
}
- ttm_bo_put(&vmw_bo->tbo);
+ vmw_user_bo_unref(vmw_bo);
}
- drm_gem_object_put(&vmw_bo->tbo.base);
return ret;
}
@@ -540,8 +539,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
return ret;
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
- vmw_bo_unreference(&vbo);
- drm_gem_object_put(&vbo->tbo.base);
+ vmw_user_bo_unref(vbo);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 50a836e70994..1d433fceed3d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -195,6 +195,14 @@ static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
return buf;
}
+static inline void vmw_user_bo_unref(struct vmw_bo *vbo)
+{
+ if (vbo) {
+ ttm_bo_put(&vbo->tbo);
+ drm_gem_object_put(&vbo->tbo.base);
+ }
+}
+
static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
{
return container_of((gobj), struct vmw_bo, tbo.base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 3810a9984a7f..58bfdf203eca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1513,4 +1513,16 @@ static inline bool vmw_has_fences(struct vmw_private *vmw)
return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0;
}
+static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model,
+ u32 shader_type)
+{
+ SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX;
+
+ if (shader_model >= VMW_SM_5)
+ max_allowed = SVGA3D_SHADERTYPE_MAX;
+ else if (shader_model >= VMW_SM_4)
+ max_allowed = SVGA3D_SHADERTYPE_DX10_MAX;
+ return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed;
+}
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 6b9aa2b4ef54..98e0723ca6f5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1164,8 +1164,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
- ttm_bo_put(&vmw_bo->tbo);
- drm_gem_object_put(&vmw_bo->tbo.base);
+ vmw_user_bo_unref(vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1221,8 +1220,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
- ttm_bo_put(&vmw_bo->tbo);
- drm_gem_object_put(&vmw_bo->tbo.base);
+ vmw_user_bo_unref(vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1992,7 +1990,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
- if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
+ if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type);
return -EINVAL;
@@ -2115,8 +2113,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
- SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
- SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
@@ -2133,6 +2129,14 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
+ if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
+ cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
+ VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
+ (unsigned int) cmd->body.type,
+ (unsigned int) cmd->body.slot);
+ return -EINVAL;
+ }
+
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_cb;
@@ -2141,14 +2145,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
binding.size = cmd->body.sizeInBytes;
binding.slot = cmd->body.slot;
- if (binding.shader_slot >= max_shader_num ||
- binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
- VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
- (unsigned int) cmd->body.type,
- (unsigned int) binding.slot);
- return -EINVAL;
- }
-
vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
binding.slot);
@@ -2207,15 +2203,13 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
container_of(header, typeof(*cmd), header);
- SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
- SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dShaderResourceViewId);
if ((u64) cmd->body.startView + (u64) num_sr_view >
(u64) SVGA3D_DX_MAX_SRVIEWS ||
- cmd->body.type >= max_allowed) {
+ !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
VMW_DEBUG_USER("Invalid shader binding.\n");
return -EINVAL;
}
@@ -2239,8 +2233,6 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
- SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
- SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
struct vmw_resource *res = NULL;
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_shader binding;
@@ -2251,8 +2243,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
- if (cmd->body.type >= max_allowed ||
- cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
+ if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
VMW_DEBUG_USER("Illegal shader type %u.\n",
(unsigned int) cmd->body.type);
return -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b62207be3363..1489ad73c103 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1665,10 +1665,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
- if (bo) {
- vmw_bo_unreference(&bo);
- drm_gem_object_put(&bo->tbo.base);
- }
+ if (bo)
+ vmw_user_bo_unref(bo);
if (surface)
vmw_surface_unreference(&surface);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 7e112319a23c..fb85f244c3d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -451,8 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
- vmw_bo_unreference(&buf);
- drm_gem_object_put(&buf->tbo.base);
+ vmw_user_bo_unref(buf);
out_unlock:
mutex_unlock(&overlay->mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index e7226db8b242..1e81ff2422cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -809,8 +809,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
shader_type, num_input_sig,
num_output_sig, tfile, shader_handle);
out_bad_arg:
- vmw_bo_unreference(&buffer);
- drm_gem_object_put(&buffer->tbo.base);
+ vmw_user_bo_unref(buffer);
return ret;
}
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index 84ba8b875199..4c1a00f9929e 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -25,6 +25,7 @@
#include <linux/debugfs.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/hsi/hsi.h>
#include <linux/idr.h>
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 307477b8a371..ec38c8892158 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -734,6 +734,16 @@ config SENSORS_HIH6130
This driver can also be built as a module. If so, the module
will be called hih6130.
+config SENSORS_HS3001
+ tristate "Renesas HS3001 humidity and temperature sensors"
+ depends on I2C
+ help
+ If you say yes here you get support for the Renesas HS3001,
+ to HS3004 humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called hs3001.
+
config SENSORS_IBMAEM
tristate "IBM Active Energy Manager temperature/power sensors and control"
select IPMI_SI
@@ -1951,20 +1961,6 @@ config SENSORS_SFCTEMP
This driver can also be built as a module. If so, the module
will be called sfctemp.
-config SENSORS_SMM665
- tristate "Summit Microelectronics SMM665"
- depends on I2C
- help
- If you say yes here you get support for the hardware monitoring
- features of the Summit Microelectronics SMM665/SMM665B Six-Channel
- Active DC Output Controller / Monitor.
-
- Other supported chips are SMM465, SMM665C, SMM764, and SMM766.
- Support for those chips is untested.
-
- This driver can also be built as a module. If so, the module will
- be called smm665.
-
config SENSORS_ADC128D818
tristate "Texas Instruments ADC128D818"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 3f4b0fda0998..4ac9452b5430 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_SENSORS_GSC) += gsc-hwmon.o
obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_GXP_FAN_CTRL) += gxp-fan-ctrl.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
+obj-$(CONFIG_SENSORS_HS3001) += hs3001.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
@@ -191,7 +192,6 @@ obj-$(CONFIG_SENSORS_SHT3x) += sht3x.o
obj-$(CONFIG_SENSORS_SHT4x) += sht4x.o
obj-$(CONFIG_SENSORS_SHTC1) += shtc1.o
obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
-obj-$(CONFIG_SENSORS_SMM665) += smm665.o
obj-$(CONFIG_SENSORS_SMPRO) += smpro-hwmon.o
obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
diff --git a/drivers/hwmon/ad7418.c b/drivers/hwmon/ad7418.c
index ffe81e445010..4829f83ff52e 100644
--- a/drivers/hwmon/ad7418.c
+++ b/drivers/hwmon/ad7418.c
@@ -16,7 +16,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -253,7 +253,7 @@ static int ad7418_probe(struct i2c_client *client)
mutex_init(&data->lock);
data->client = client;
if (dev->of_node)
- data->type = (enum chips)of_device_get_match_data(dev);
+ data->type = (uintptr_t)of_device_get_match_data(dev);
else
data->type = i2c_match_id(ad7418_id, client)->driver_data;
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 1932613ec095..809e830f52a6 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -18,7 +18,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_data/ads7828.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -139,8 +139,7 @@ static int ads7828_probe(struct i2c_client *client)
}
if (client->dev.of_node)
- chip = (enum ads7828_chips)
- of_device_get_match_data(&client->dev);
+ chip = (uintptr_t)of_device_get_match_data(&client->dev);
else
chip = i2c_match_id(ads7828_device_ids, client)->driver_data;
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index c0ce88324ea6..03acadc3a6cb 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
@@ -1653,7 +1652,7 @@ static int adt7475_probe(struct i2c_client *client)
i2c_set_clientdata(client, data);
if (client->dev.of_node)
- chip = (enum chips)of_device_get_match_data(&client->dev);
+ chip = (uintptr_t)of_device_get_match_data(&client->dev);
else
chip = id->driver_data;
diff --git a/drivers/hwmon/as370-hwmon.c b/drivers/hwmon/as370-hwmon.c
index fffbf385a57f..316454bd983d 100644
--- a/drivers/hwmon/as370-hwmon.c
+++ b/drivers/hwmon/as370-hwmon.c
@@ -11,7 +11,8 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#define CTRL 0x0
#define PD BIT(0)
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index d11f674e3dc3..997df4b40509 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -12,8 +12,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index f52a539eb33e..51f9c2db403e 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -340,7 +340,7 @@ static const struct ec_board_info board_info_crosshair_x670e_hero = {
.sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
SENSOR_SET_TEMP_WATER,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
.family = family_amd_600_series,
};
diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c
index 8d402a627306..b77ebac2e0ce 100644
--- a/drivers/hwmon/bt1-pvt.c
+++ b/drivers/hwmon/bt1-pvt.c
@@ -891,15 +891,8 @@ static struct pvt_hwmon *pvt_create_data(struct platform_device *pdev)
static int pvt_request_regs(struct pvt_hwmon *pvt)
{
struct platform_device *pdev = to_platform_device(pvt->dev);
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(pvt->dev, "Couldn't find PVT memresource\n");
- return -EINVAL;
- }
-
- pvt->regs = devm_ioremap_resource(pvt->dev, res);
+ pvt->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pvt->regs))
return PTR_ERR(pvt->regs);
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 1b6ff4712138..fad69ef56c75 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -39,7 +39,6 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_data/g762.h>
#define DRVNAME "g762"
diff --git a/drivers/hwmon/gxp-fan-ctrl.c b/drivers/hwmon/gxp-fan-ctrl.c
index 2e05bc2f627a..00e057050437 100644
--- a/drivers/hwmon/gxp-fan-ctrl.c
+++ b/drivers/hwmon/gxp-fan-ctrl.c
@@ -6,7 +6,7 @@
#include <linux/hwmon.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#define OFS_FAN_INST 0 /* Is 0 because plreg base will be set at INST */
diff --git a/drivers/hwmon/hp-wmi-sensors.c b/drivers/hwmon/hp-wmi-sensors.c
index ebe2fb513480..17ae62f88bbf 100644
--- a/drivers/hwmon/hp-wmi-sensors.c
+++ b/drivers/hwmon/hp-wmi-sensors.c
@@ -435,25 +435,11 @@ static union acpi_object *hp_wmi_get_wobj(const char *guid, u8 instance)
/* hp_wmi_wobj_instance_count - find count of WMI object instances */
static u8 hp_wmi_wobj_instance_count(const char *guid)
{
- u8 hi = HP_WMI_MAX_INSTANCES;
- union acpi_object *wobj;
- u8 lo = 0;
- u8 mid;
-
- while (lo < hi) {
- mid = (lo + hi) / 2;
+ int count;
- wobj = hp_wmi_get_wobj(guid, mid);
- if (!wobj) {
- hi = mid;
- continue;
- }
+ count = wmi_instance_count(guid);
- lo = mid + 1;
- kfree(wobj);
- }
-
- return lo;
+ return clamp(count, 0, (int)HP_WMI_MAX_INSTANCES);
}
static int check_wobj(const union acpi_object *wobj,
@@ -1927,7 +1913,7 @@ static bool add_event_handler(struct hp_wmi_sensors *state)
static int hp_wmi_sensors_init(struct hp_wmi_sensors *state)
{
struct hp_wmi_info *connected[HP_WMI_MAX_INSTANCES];
- struct hp_wmi_platform_events *pevents;
+ struct hp_wmi_platform_events *pevents = NULL;
struct device *dev = &state->wdev->dev;
struct hp_wmi_info *info;
struct device *hwdev;
diff --git a/drivers/hwmon/hs3001.c b/drivers/hwmon/hs3001.c
new file mode 100644
index 000000000000..ac574e46d069
--- /dev/null
+++ b/drivers/hwmon/hs3001.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This is a non-complete driver implementation for the
+ * HS3001 humidity and temperature sensor and compatibles. It does not include
+ * the configuration possibilities, where it needs to be set to 'programming mode'
+ * during power-up.
+ *
+ *
+ * Copyright (C) 2023 SYS TEC electronic AG
+ * Author: Andre Werner <andre.werner@systec-electronic.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+/* Measurement times */
+#define HS3001_WAKEUP_TIME 100 /* us */
+#define HS3001_8BIT_RESOLUTION 550 /* us */
+#define HS3001_10BIT_RESOLUTION 1310 /* us */
+#define HS3001_12BIT_RESOLUTION 4500 /* us */
+#define HS3001_14BIT_RESOLUTION 16900 /* us */
+
+#define HS3001_RESPONSE_LENGTH 4
+
+#define HS3001_FIXPOINT_ARITH 1000U
+
+#define HS3001_MASK_HUMIDITY_0X3FFF GENMASK(13, 0)
+#define HS3001_MASK_STATUS_0XC0 GENMASK(7, 6)
+
+/* Definitions for Status Bits of A/D Data */
+#define HS3001_DATA_VALID 0x00 /* Valid Data */
+#define HS3001_DATA_STALE 0x01 /* Stale Data */
+
+struct hs3001_data {
+ struct i2c_client *client;
+ struct mutex i2c_lock; /* lock for sending i2c commands */
+ u32 wait_time; /* in us */
+ int temperature; /* in milli degree */
+ u32 humidity; /* in milli % */
+};
+
+static int hs3001_extract_temperature(u16 raw)
+{
+ /* fixpoint arithmetic 1 digit */
+ u32 temp = (raw >> 2) * HS3001_FIXPOINT_ARITH * 165;
+
+ temp /= (1 << 14) - 1;
+
+ return (int)temp - 40 * HS3001_FIXPOINT_ARITH;
+}
+
+static u32 hs3001_extract_humidity(u16 raw)
+{
+ u32 hum = (raw & HS3001_MASK_HUMIDITY_0X3FFF) * HS3001_FIXPOINT_ARITH * 100;
+
+ return hum /= (1 << 14) - 1;
+}
+
+static int hs3001_data_fetch_command(struct i2c_client *client,
+ struct hs3001_data *data)
+{
+ int ret;
+ u8 buf[HS3001_RESPONSE_LENGTH];
+ u8 hs3001_status;
+
+ ret = i2c_master_recv(client, buf, HS3001_RESPONSE_LENGTH);
+ if (ret != HS3001_RESPONSE_LENGTH) {
+ ret = ret < 0 ? ret : -EIO;
+ dev_dbg(&client->dev,
+ "Error in i2c communication. Error code: %d.\n", ret);
+ return ret;
+ }
+
+ hs3001_status = FIELD_GET(HS3001_MASK_STATUS_0XC0, buf[0]);
+ if (hs3001_status == HS3001_DATA_STALE) {
+ dev_dbg(&client->dev, "Sensor busy.\n");
+ return -EBUSY;
+ }
+ if (hs3001_status != HS3001_DATA_VALID) {
+ dev_dbg(&client->dev, "Data invalid.\n");
+ return -EIO;
+ }
+
+ data->humidity =
+ hs3001_extract_humidity(be16_to_cpup((__be16 *)&buf[0]));
+ data->temperature =
+ hs3001_extract_temperature(be16_to_cpup((__be16 *)&buf[2]));
+
+ return 0;
+}
+
+static umode_t hs3001_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ /* Both, humidity and temperature can only be read. */
+ return 0444;
+}
+
+static int hs3001_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct hs3001_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int ret;
+
+ mutex_lock(&data->i2c_lock);
+ ret = i2c_master_send(client, NULL, 0);
+ if (ret < 0) {
+ mutex_unlock(&data->i2c_lock);
+ return ret;
+ }
+
+ /*
+ * Sensor needs some time to process measurement depending on
+ * resolution (ref. datasheet)
+ */
+ fsleep(data->wait_time);
+
+ ret = hs3001_data_fetch_command(client, data);
+ mutex_unlock(&data->i2c_lock);
+
+ if (ret < 0)
+ return ret;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = data->temperature;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case hwmon_humidity:
+ switch (attr) {
+ case hwmon_humidity_input:
+ *val = data->humidity;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct hwmon_channel_info *hs3001_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(humidity, HWMON_H_INPUT),
+ NULL
+};
+
+static const struct hwmon_ops hs3001_hwmon_ops = {
+ .is_visible = hs3001_is_visible,
+ .read = hs3001_read,
+};
+
+static const struct hwmon_chip_info hs3001_chip_info = {
+ .ops = &hs3001_hwmon_ops,
+ .info = hs3001_info,
+};
+
+/* device ID table */
+static const struct i2c_device_id hs3001_ids[] = {
+ { "hs3001", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, hs3001_ids);
+
+static const struct of_device_id hs3001_of_match[] = {
+ {.compatible = "renesas,hs3001"},
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, hs3001_of_match);
+
+static int hs3001_probe(struct i2c_client *client)
+{
+ struct hs3001_data *data;
+ struct device *hwmon_dev;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+
+ /*
+ * Measurement time = wake-up time + measurement time temperature
+ * + measurement time humidity. This is currently static, because
+ * enabling programming mode is not supported, yet.
+ */
+ data->wait_time = (HS3001_WAKEUP_TIME + HS3001_14BIT_RESOLUTION +
+ HS3001_14BIT_RESOLUTION);
+
+ mutex_init(&data->i2c_lock);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ client->name,
+ data,
+ &hs3001_chip_info,
+ NULL);
+
+ if (IS_ERR(hwmon_dev))
+ return dev_err_probe(dev, PTR_ERR(hwmon_dev),
+ "Unable to register hwmon device.\n");
+
+ return 0;
+}
+
+static struct i2c_driver hs3001_i2c_driver = {
+ .driver = {
+ .name = "hs3001",
+ .of_match_table = hs3001_of_match,
+ },
+ .probe = hs3001_probe,
+ .id_table = hs3001_ids,
+};
+
+module_i2c_driver(hs3001_i2c_driver);
+
+MODULE_AUTHOR("Andre Werner <andre.werner@systec-electronic.com>");
+MODULE_DESCRIPTION("HS3001 humidity and temperature sensor base driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index cfd7efef5cdf..d8415d1f21fc 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -31,7 +31,6 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/jiffies.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/delay.h>
#include <linux/util_macros.h>
@@ -625,7 +624,7 @@ static int ina2xx_probe(struct i2c_client *client)
enum ina2xx_ids chip;
if (client->dev.of_node)
- chip = (enum ina2xx_ids)of_device_get_match_data(&client->dev);
+ chip = (uintptr_t)of_device_get_match_data(&client->dev);
else
chip = i2c_match_id(ina2xx_id, client)->driver_data;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 5deff5e5f693..fbe86cec6055 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -221,6 +221,10 @@ static bool fix_pwm_polarity;
* Super-I/O configuration space.
*/
#define IT87_REG_VID 0x0a
+
+/* Interface Selection register on other chips */
+#define IT87_REG_IFSEL 0x0a
+
/*
* The IT8705F and IT8712F earlier than revision 0x08 use register 0x0b
* for fan divisors. Later IT8712F revisions must use 16-bit tachometer
@@ -1159,28 +1163,66 @@ static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 3, 0);
static SENSOR_DEVICE_ATTR_2(temp5_input, S_IRUGO, show_temp, NULL, 4, 0);
static SENSOR_DEVICE_ATTR_2(temp6_input, S_IRUGO, show_temp, NULL, 5, 0);
+static int get_temp_type(struct it87_data *data, int index)
+{
+ /*
+ * 2 is deprecated;
+ * 3 = thermal diode;
+ * 4 = thermistor;
+ * 5 = AMDTSI;
+ * 6 = Intel PECI;
+ * 0 = disabled
+ */
+ u8 reg, extra;
+ int ttype, type = 0;
+
+ /* Detect PECI vs. AMDTSI */
+ ttype = 6;
+ if ((has_temp_peci(data, index)) || data->type == it8721 ||
+ data->type == it8720) {
+ extra = it87_read_value(data, IT87_REG_IFSEL);
+ if ((extra & 0x70) == 0x40)
+ ttype = 5;
+ }
+
+ reg = it87_read_value(data, IT87_REG_TEMP_ENABLE);
+
+ /* Per chip special detection */
+ switch (data->type) {
+ case it8622:
+ if (!(reg & 0xc0) && index == 3)
+ type = ttype;
+ break;
+ default:
+ break;
+ }
+
+ if (type || index >= 3)
+ return type;
+
+ extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
+
+ if ((has_temp_peci(data, index) && (reg >> 6 == index + 1)) ||
+ (has_temp_old_peci(data, index) && (extra & 0x80)))
+ type = ttype; /* Intel PECI or AMDTSI */
+ else if (reg & BIT(index))
+ type = 3; /* thermal diode */
+ else if (reg & BIT(index + 3))
+ type = 4; /* thermistor */
+
+ return type;
+}
+
static ssize_t show_temp_type(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
- u8 reg, extra;
if (IS_ERR(data))
return PTR_ERR(data);
- reg = data->sensor; /* In case value is updated while used */
- extra = data->extra;
-
- if ((has_temp_peci(data, nr) && (reg >> 6 == nr + 1)) ||
- (has_temp_old_peci(data, nr) && (extra & 0x80)))
- return sprintf(buf, "6\n"); /* Intel PECI */
- if (reg & (1 << nr))
- return sprintf(buf, "3\n"); /* thermal diode */
- if (reg & (8 << nr))
- return sprintf(buf, "4\n"); /* thermistor */
- return sprintf(buf, "0\n"); /* disabled */
+ return sprintf(buf, "%d\n", get_temp_type(data, sensor_attr->index));
}
static ssize_t set_temp_type(struct device *dev, struct device_attribute *attr,
@@ -2313,6 +2355,12 @@ static umode_t it87_temp_is_visible(struct kobject *kobj,
if (!(data->has_temp & BIT(i)))
return 0;
+ if (a == 3) {
+ if (get_temp_type(data, i) == 0)
+ return 0;
+ return attr->mode;
+ }
+
if (a == 5 && !has_temp_offset(data))
return 0;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index a267b11731a8..bae0becfa24b 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -65,7 +65,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64
#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
-/* Common for Zen CPU families (Family 17h and 18h and 19h) */
+/* Common for Zen CPU families (Family 17h and 18h and 19h and 1Ah) */
#define ZEN_REPORTED_TEMP_CTRL_BASE 0x00059800
#define ZEN_CCD_TEMP(offset, x) (ZEN_REPORTED_TEMP_CTRL_BASE + \
@@ -475,6 +475,10 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
k10temp_get_ccd_support(pdev, data, 12);
break;
}
+ } else if (boot_cpu_data.x86 == 0x1a) {
+ data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
+ data->read_tempreg = read_tempreg_nb_zen;
+ data->is_zen = true;
} else {
data->read_htcreg = read_htcreg_pci;
data->read_tempreg = read_tempreg_pci;
@@ -521,6 +525,8 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
diff --git a/drivers/hwmon/lan966x-hwmon.c b/drivers/hwmon/lan966x-hwmon.c
index f8658359a098..7247c03e4f44 100644
--- a/drivers/hwmon/lan966x-hwmon.c
+++ b/drivers/hwmon/lan966x-hwmon.c
@@ -334,24 +334,6 @@ static struct regmap *lan966x_init_regmap(struct platform_device *pdev,
return devm_regmap_init_mmio(&pdev->dev, base, &regmap_config);
}
-static void lan966x_clk_disable(void *data)
-{
- struct lan966x_hwmon *hwmon = data;
-
- clk_disable_unprepare(hwmon->clk);
-}
-
-static int lan966x_clk_enable(struct device *dev, struct lan966x_hwmon *hwmon)
-{
- int ret;
-
- ret = clk_prepare_enable(hwmon->clk);
- if (ret)
- return ret;
-
- return devm_add_action_or_reset(dev, lan966x_clk_disable, hwmon);
-}
-
static int lan966x_hwmon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -363,15 +345,11 @@ static int lan966x_hwmon_probe(struct platform_device *pdev)
if (!hwmon)
return -ENOMEM;
- hwmon->clk = devm_clk_get(dev, NULL);
+ hwmon->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(hwmon->clk))
return dev_err_probe(dev, PTR_ERR(hwmon->clk),
"failed to get clock\n");
- ret = lan966x_clk_enable(dev, hwmon);
- if (ret)
- return dev_err_probe(dev, ret, "failed to enable clock\n");
-
hwmon->clk_rate = clk_get_rate(hwmon->clk);
hwmon->regmap_pvt = lan966x_init_regmap(pdev, "pvt");
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 6972454eb4e0..0878a044dd8e 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -33,7 +33,7 @@
#include <linux/hwmon.h>
#include <linux/err.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/sysfs.h>
#include <linux/types.h>
@@ -1105,7 +1105,7 @@ static int lm63_probe(struct i2c_client *client)
/* Set the device type */
if (client->dev.of_node)
- data->kind = (enum chips)of_device_get_match_data(&client->dev);
+ data->kind = (uintptr_t)of_device_get_match_data(&client->dev);
else
data->kind = i2c_match_id(lm63_id, client)->driver_data;
if (data->kind == lm64)
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 72e634d1b857..5b2ea05c951e 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -13,7 +13,6 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/util_macros.h>
@@ -579,7 +578,7 @@ static int lm75_probe(struct i2c_client *client)
enum lm75_type kind;
if (client->dev.of_node)
- kind = (enum lm75_type)of_device_get_match_data(&client->dev);
+ kind = (uintptr_t)of_device_get_match_data(&client->dev);
else
kind = i2c_match_id(lm75_ids, client)->driver_data;
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 8540178f5b74..68c210002357 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -12,7 +12,7 @@
*/
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
@@ -1559,7 +1559,7 @@ static int lm85_probe(struct i2c_client *client)
data->client = client;
if (client->dev.of_node)
- data->type = (enum chips)of_device_get_match_data(&client->dev);
+ data->type = (uintptr_t)of_device_get_match_data(&client->dev);
else
data->type = i2c_match_id(lm85_id, client)->driver_data;
mutex_init(&data->update_lock);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 90101c236f35..e0d7454a301c 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -106,7 +106,7 @@
#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
@@ -2765,7 +2765,7 @@ static int lm90_probe(struct i2c_client *client)
/* Set the device type */
if (client->dev.of_node)
- data->kind = (enum chips)of_device_get_match_data(&client->dev);
+ data->kind = (uintptr_t)of_device_get_match_data(&client->dev);
else
data->kind = i2c_match_id(lm90_id, client)->driver_data;
diff --git a/drivers/hwmon/lochnagar-hwmon.c b/drivers/hwmon/lochnagar-hwmon.c
index 6350904a8a8b..5202dddfd61e 100644
--- a/drivers/hwmon/lochnagar-hwmon.c
+++ b/drivers/hwmon/lochnagar-hwmon.c
@@ -16,7 +16,6 @@
#include <linux/mfd/lochnagar2_regs.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/hwmon/ltq-cputemp.c b/drivers/hwmon/ltq-cputemp.c
index 08e09a82acab..f7e4a4ca5239 100644
--- a/drivers/hwmon/ltq-cputemp.c
+++ b/drivers/hwmon/ltq-cputemp.c
@@ -9,8 +9,9 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <lantiq_soc.h>
diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c
index b1300ca9db1f..7d237db6e57c 100644
--- a/drivers/hwmon/max31730.c
+++ b/drivers/hwmon/max31730.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/hwmon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/slab.h>
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
index 7f709fd1af89..af7e62685898 100644
--- a/drivers/hwmon/max6621.c
+++ b/drivers/hwmon/max6621.c
@@ -12,7 +12,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#define MAX6621_DRV_NAME "max6621"
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index caf527154fca..aa7f21ab2395 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -618,11 +618,17 @@ MODULE_DEVICE_TABLE(i2c, max6639_id);
static DEFINE_SIMPLE_DEV_PM_OPS(max6639_pm_ops, max6639_suspend, max6639_resume);
+static const struct of_device_id max6639_of_match[] = {
+ { .compatible = "maxim,max6639", },
+ { },
+};
+
static struct i2c_driver max6639_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max6639",
.pm = pm_sleep_ptr(&max6639_pm_ops),
+ .of_match_table = max6639_of_match,
},
.probe = max6639_probe,
.id_table = max6639_id,
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 3a67778f111c..7d10dd434f2e 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -15,7 +15,6 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_data/max6697.h>
@@ -703,7 +702,7 @@ static int max6697_probe(struct i2c_client *client)
return -ENOMEM;
if (client->dev.of_node)
- data->type = (enum chips)of_device_get_match_data(&client->dev);
+ data->type = (uintptr_t)of_device_get_match_data(&client->dev);
else
data->type = i2c_match_id(max6697_id, client)->driver_data;
data->chip = &max6697_chip_data[data->type];
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
index 127e15ff3e76..9814eaf24564 100644
--- a/drivers/hwmon/mcp3021.c
+++ b/drivers/hwmon/mcp3021.c
@@ -20,7 +20,6 @@
#include <linux/err.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
/* Vdd / reference voltage in millivolt */
#define MCP3021_VDD_REF_MAX 5500
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index c2a96468c9b4..a5f89aab3fb4 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -12,7 +12,7 @@
#include <linux/regmap.h>
#include <linux/thermal.h>
-#define MLXREG_FAN_MAX_TACHO 14
+#define MLXREG_FAN_MAX_TACHO 24
#define MLXREG_FAN_MAX_PWM 4
#define MLXREG_FAN_PWM_NOT_CONNECTED 0xff
#define MLXREG_FAN_MAX_STATE 10
@@ -300,6 +300,16 @@ static const struct hwmon_channel_info * const mlxreg_fan_hwmon_info[] = {
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT,
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index 08ce4984151d..02a71244fc3b 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -33,7 +33,8 @@
* (0xd451)
* nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3
* (0xd429)
- * nct6799d 14 7 7 2+6 0xd802 0xc1 0x5ca3
+ * nct6796d-s 18 7 7 6+2 0xd801 0xc1 0x5ca3
+ * nct6799d-r 18 7 7 6+2 0xd802 0xc1 0x5ca3
*
* #temp lists the number of monitored temperature sources (first value) plus
* the number of directly connectable temperature sensors (second value).
@@ -79,14 +80,17 @@ static const char * const nct6775_device_names[] = {
/* Common and NCT6775 specific data */
-/* Voltage min/max registers for nr=7..14 are in bank 5 */
+/*
+ * Voltage min/max registers for nr=7..14 are in bank 5
+ * min/max: 15-17 for NCT6799 only
+ */
static const u16 NCT6775_REG_IN_MAX[] = {
0x2b, 0x2d, 0x2f, 0x31, 0x33, 0x35, 0x37, 0x554, 0x556, 0x558, 0x55a,
- 0x55c, 0x55e, 0x560, 0x562 };
+ 0x55c, 0x55e, 0x560, 0x562, 0x564, 0x570, 0x572 };
static const u16 NCT6775_REG_IN_MIN[] = {
0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x555, 0x557, 0x559, 0x55b,
- 0x55d, 0x55f, 0x561, 0x563 };
+ 0x55d, 0x55f, 0x561, 0x563, 0x565, 0x571, 0x573 };
static const u16 NCT6775_REG_IN[] = {
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x550, 0x551, 0x552
};
@@ -97,31 +101,23 @@ static const u16 NCT6775_REG_IN[] = {
static const u16 NCT6775_REG_ALARM[NUM_REG_ALARM] = { 0x459, 0x45A, 0x45B };
-/* 0..15 voltages, 16..23 fans, 24..29 temperatures, 30..31 intrusion */
-
-static const s8 NCT6775_ALARM_BITS[] = {
- 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
- 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
- -1, /* unused */
- 6, 7, 11, -1, -1, /* fan1..fan5 */
- -1, -1, -1, /* unused */
- 4, 5, 13, -1, -1, -1, /* temp1..temp6 */
- 12, -1 }; /* intrusion0, intrusion1 */
+static const s8 NCT6775_ALARM_BITS[NUM_ALARM_BITS] = {
+ 0, 1, 2, 3, 8, 21, 20, 16, 17, -1, -1, -1, /* in0-in11 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 6, 7, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 4, 5, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* temp1-temp12 */
+ 12, -1, /* intr0-intr1 */
+};
static const u16 NCT6775_REG_BEEP[NUM_REG_BEEP] = { 0x56, 0x57, 0x453, 0x4e };
-/*
- * 0..14 voltages, 15 global beep enable, 16..23 fans, 24..29 temperatures,
- * 30..31 intrusion
- */
-static const s8 NCT6775_BEEP_BITS[] = {
- 0, 1, 2, 3, 8, 9, 10, 16, /* in0.. in7 */
- 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
- 21, /* global beep enable */
- 6, 7, 11, 28, -1, /* fan1..fan5 */
- -1, -1, -1, /* unused */
- 4, 5, 13, -1, -1, -1, /* temp1..temp6 */
- 12, -1 }; /* intrusion0, intrusion1 */
+static const s8 NCT6775_BEEP_BITS[NUM_BEEP_BITS] = {
+ 0, 1, 2, 3, 8, 9, 10, 16, 17, -1, -1, -1, /* in0-in11 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 6, 7, 11, 28, -1, -1, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 4, 5, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* temp1-temp12 */
+ 12, -1, 21 /* intr0-intr1, beep_en */
+};
/* DC or PWM output fan configuration */
static const u8 NCT6775_REG_PWM_MODE[] = { 0x04, 0x04, 0x12 };
@@ -255,25 +251,24 @@ static const u16 NCT6775_REG_TSI_TEMP[] = { 0x669 };
#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
-static const s8 NCT6776_ALARM_BITS[] = {
- 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
- 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
- -1, /* unused */
- 6, 7, 11, 10, 23, /* fan1..fan5 */
- -1, -1, -1, /* unused */
- 4, 5, 13, -1, -1, -1, /* temp1..temp6 */
- 12, 9 }; /* intrusion0, intrusion1 */
-
-static const u16 NCT6776_REG_BEEP[NUM_REG_BEEP] = { 0xb2, 0xb3, 0xb4, 0xb5 };
-
-static const s8 NCT6776_BEEP_BITS[] = {
- 0, 1, 2, 3, 4, 5, 6, 7, /* in0.. in7 */
- 8, -1, -1, -1, -1, -1, -1, /* in8..in14 */
- 24, /* global beep enable */
- 25, 26, 27, 28, 29, /* fan1..fan5 */
- -1, -1, -1, /* unused */
- 16, 17, 18, 19, 20, 21, /* temp1..temp6 */
- 30, 31 }; /* intrusion0, intrusion1 */
+static const s8 NCT6776_ALARM_BITS[NUM_ALARM_BITS] = {
+ 0, 1, 2, 3, 8, 21, 20, 16, 17, -1, -1, -1, /* in0-in11 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 6, 7, 11, 10, 23, -1, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 4, 5, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* temp1-temp12 */
+ 12, 9, /* intr0-intr1 */
+};
+
+/* 0xbf: nct6799 only */
+static const u16 NCT6776_REG_BEEP[NUM_REG_BEEP] = { 0xb2, 0xb3, 0xb4, 0xb5, 0xbf };
+
+static const s8 NCT6776_BEEP_BITS[NUM_BEEP_BITS] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, /* in0-in11 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 25, 26, 27, 28, 29, -1, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 16, 17, 18, 19, 20, 21, -1, -1, -1, -1, -1, -1, /* temp1-temp12 */
+ 30, 31, 24 /* intr0-intr1, beep_en */
+};
static const u16 NCT6776_REG_TOLERANCE_H[] = {
0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c, 0xb0c };
@@ -337,30 +332,35 @@ static const u16 NCT6776_REG_TSI_TEMP[] = {
/* NCT6779 specific data */
+/*
+ * 15-17 for NCT6799 only, register labels are:
+ * CPUVC, VIN1, AVSB, 3VCC, VIN0, VIN8, VIN4, 3VSB
+ * VBAT, VTT, VIN5, VIN6, VIN2, VIN3, VIN7, VIN9
+ * VHIF, VIN10
+ */
static const u16 NCT6779_REG_IN[] = {
0x480, 0x481, 0x482, 0x483, 0x484, 0x485, 0x486, 0x487,
- 0x488, 0x489, 0x48a, 0x48b, 0x48c, 0x48d, 0x48e };
+ 0x488, 0x489, 0x48a, 0x48b, 0x48c, 0x48d, 0x48e, 0x48f,
+ 0x470, 0x471};
static const u16 NCT6779_REG_ALARM[NUM_REG_ALARM] = {
0x459, 0x45A, 0x45B, 0x568 };
-static const s8 NCT6779_ALARM_BITS[] = {
- 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
- 17, 24, 25, 26, 27, 28, 29, /* in8..in14 */
- -1, /* unused */
- 6, 7, 11, 10, 23, /* fan1..fan5 */
- -1, -1, -1, /* unused */
- 4, 5, 13, -1, -1, -1, /* temp1..temp6 */
- 12, 9 }; /* intrusion0, intrusion1 */
-
-static const s8 NCT6779_BEEP_BITS[] = {
- 0, 1, 2, 3, 4, 5, 6, 7, /* in0.. in7 */
- 8, 9, 10, 11, 12, 13, 14, /* in8..in14 */
- 24, /* global beep enable */
- 25, 26, 27, 28, 29, /* fan1..fan5 */
- -1, -1, -1, /* unused */
- 16, 17, -1, -1, -1, -1, /* temp1..temp6 */
- 30, 31 }; /* intrusion0, intrusion1 */
+static const s8 NCT6779_ALARM_BITS[NUM_ALARM_BITS] = {
+ 0, 1, 2, 3, 8, 21, 20, 16, 17, 24, 25, 26, /* in0-in11 */
+ 27, 28, 29, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 6, 7, 11, 10, 23, -1, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 4, 5, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* temp1-temp12 */
+ 12, 9, /* intr0-intr1 */
+};
+
+static const s8 NCT6779_BEEP_BITS[NUM_BEEP_BITS] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, /* in0-in11 */
+ 12, 13, 14, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 25, 26, 27, 28, 29, -1, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 16, 17, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* temp1-temp12 */
+ 30, 31, 24 /* intr0-intr1, beep_en */
+};
static const u16 NCT6779_REG_FAN[] = {
0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x4ce };
@@ -448,14 +448,13 @@ static const u16 NCT6791_REG_WEIGHT_DUTY_BASE[NUM_FAN] = { 0, 0x23e };
static const u16 NCT6791_REG_ALARM[NUM_REG_ALARM] = {
0x459, 0x45A, 0x45B, 0x568, 0x45D };
-static const s8 NCT6791_ALARM_BITS[] = {
- 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
- 17, 24, 25, 26, 27, 28, 29, /* in8..in14 */
- -1, /* unused */
- 6, 7, 11, 10, 23, 33, /* fan1..fan6 */
- -1, -1, /* unused */
- 4, 5, 13, -1, -1, -1, /* temp1..temp6 */
- 12, 9 }; /* intrusion0, intrusion1 */
+static const s8 NCT6791_ALARM_BITS[NUM_ALARM_BITS] = {
+ 0, 1, 2, 3, 8, 21, 20, 16, 17, 24, 25, 26, /* in0-in11 */
+ 27, 28, 29, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 6, 7, 11, 10, 23, 33, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 4, 5, 13, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* temp1-temp12 */
+ 12, 9, /* intr0-intr1 */
+};
/* NCT6792/NCT6793 specific data */
@@ -618,6 +617,28 @@ static const char *const nct6796_temp_label[] = {
static const u16 NCT6796_REG_TSI_TEMP[] = { 0x409, 0x40b };
+static const u16 NCT6798_REG_TEMP[] = {
+ 0x27, 0x150, 0x670, 0x672, 0x674, 0x676, 0x678, 0x67a};
+
+static const u16 NCT6798_REG_TEMP_SOURCE[] = {
+ 0x621, 0x622, 0xc26, 0xc27, 0xc28, 0xc29, 0xc2a, 0xc2b };
+
+static const u16 NCT6798_REG_TEMP_MON[] = {
+ 0x73, 0x75, 0x77, 0x79, 0x7b, 0x7d, 0x4a0 };
+static const u16 NCT6798_REG_TEMP_OVER[] = {
+ 0x39, 0x155, 0xc1a, 0xc1b, 0xc1c, 0xc1d, 0xc1e, 0xc1f };
+static const u16 NCT6798_REG_TEMP_HYST[] = {
+ 0x3a, 0x153, 0xc20, 0xc21, 0xc22, 0xc23, 0xc24, 0xc25 };
+
+static const u16 NCT6798_REG_TEMP_CRIT[32] = {
+ 0x135, 0x235, 0x335, 0x835, 0x935, 0xa35, 0xb35, 0 };
+
+static const u16 NCT6798_REG_TEMP_ALTERNATE[32] = {
+ 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0x496, 0,
+ 0, 0, 0, 0, 0x4a2, 0, 0, 0,
+ 0, 0x400, 0x401, 0x402, 0x404, 0x405, 0x406, 0x407,
+ 0x408, 0x419, 0x41a, 0x4f4, 0x4f5 };
+
static const char *const nct6798_temp_label[] = {
"",
"SYSTIN",
@@ -656,6 +677,26 @@ static const char *const nct6798_temp_label[] = {
#define NCT6798_TEMP_MASK 0xbfff0ffe
#define NCT6798_VIRT_TEMP_MASK 0x80000c00
+static const u16 NCT6799_REG_ALARM[NUM_REG_ALARM] = {
+ 0x459, 0x45A, 0x45B, 0x568, 0x45D, 0xc01 };
+
+static const s8 NCT6799_ALARM_BITS[NUM_ALARM_BITS] = {
+ 0, 1, 2, 3, 8, -1, 20, 16, 17, 24, 25, 26, /* in0-in11 */
+ 27, 28, 29, 30, 31, -1, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 6, 7, 11, 10, 23, 33, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 4, 5, 40, 41, 42, 43, 44, -1, -1, -1, -1, -1, /* temp1-temp12 */
+ 12, 9, /* intr0-intr1 */
+};
+
+static const s8 NCT6799_BEEP_BITS[NUM_BEEP_BITS] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, /* in0-in11 */
+ 12, 13, 14, 15, 34, 35, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 25, 26, 27, 28, 29, -1, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 16, 17, 18, 19, 20, 21, 22, 23, -1, -1, -1, -1, /* temp1-temp12 */
+ 30, 31, 24 /* intr0-intr1, beep_en */
+};
+
+/* PECI Calibration only for NCT6799D, not NCT6796D-S */
static const char *const nct6799_temp_label[] = {
"",
"SYSTIN",
@@ -685,8 +726,8 @@ static const char *const nct6799_temp_label[] = {
"Agent1 Dimm1",
"BYTE_TEMP0",
"BYTE_TEMP1",
- "PECI Agent 0 Calibration", /* undocumented */
- "PECI Agent 1 Calibration", /* undocumented */
+ "PECI/TSI Agent 0 Calibration",
+ "PECI/TSI Agent 1 Calibration",
"",
"Virtual_TEMP"
};
@@ -763,27 +804,23 @@ static const u16 NCT6106_REG_AUTO_PWM[] = { 0x164, 0x174, 0x184 };
static const u16 NCT6106_REG_ALARM[NUM_REG_ALARM] = {
0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d };
-static const s8 NCT6106_ALARM_BITS[] = {
- 0, 1, 2, 3, 4, 5, 7, 8, /* in0.. in7 */
- 9, -1, -1, -1, -1, -1, -1, /* in8..in14 */
- -1, /* unused */
- 32, 33, 34, -1, -1, /* fan1..fan5 */
- -1, -1, -1, /* unused */
- 16, 17, 18, 19, 20, 21, /* temp1..temp6 */
- 48, -1 /* intrusion0, intrusion1 */
+static const s8 NCT6106_ALARM_BITS[NUM_ALARM_BITS] = {
+ 0, 1, 2, 3, 4, 5, 7, 8, 9, -1, -1, -1, /* in0-in11 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 32, 33, 34, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 16, 17, 18, 19, 20, 21, -1, -1, -1, -1, -1, -1, /* temp1-temp12 */
+ 48, -1, /* intr0-intr1 */
};
static const u16 NCT6106_REG_BEEP[NUM_REG_BEEP] = {
0x3c0, 0x3c1, 0x3c2, 0x3c3, 0x3c4 };
-static const s8 NCT6106_BEEP_BITS[] = {
- 0, 1, 2, 3, 4, 5, 7, 8, /* in0.. in7 */
- 9, 10, 11, 12, -1, -1, -1, /* in8..in14 */
- 32, /* global beep enable */
- 24, 25, 26, 27, 28, /* fan1..fan5 */
- -1, -1, -1, /* unused */
- 16, 17, 18, 19, 20, 21, /* temp1..temp6 */
- 34, -1 /* intrusion0, intrusion1 */
+static const s8 NCT6106_BEEP_BITS[NUM_BEEP_BITS] = {
+ 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, /* in0-in11 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 24, 25, 26, 27, 28, -1, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 16, 17, 18, 19, 20, 21, -1, -1, -1, -1, -1, -1, /* temp1-temp12 */
+ 34, -1, 32 /* intr0-intr1, beep_en */
};
static const u16 NCT6106_REG_TEMP_ALTERNATE[32] = {
@@ -843,24 +880,20 @@ static const u16 NCT6116_REG_AUTO_TEMP[] = {
static const u16 NCT6116_REG_AUTO_PWM[] = {
0x164, 0x174, 0x184, 0x1d4, 0x1e4 };
-static const s8 NCT6116_ALARM_BITS[] = {
- 0, 1, 2, 3, 4, 5, 7, 8, /* in0.. in7 */
- 9, -1, -1, -1, -1, -1, -1, /* in8..in9 */
- -1, /* unused */
- 32, 33, 34, 35, 36, /* fan1..fan5 */
- -1, -1, -1, /* unused */
- 16, 17, 18, -1, -1, -1, /* temp1..temp6 */
- 48, -1 /* intrusion0, intrusion1 */
+static const s8 NCT6116_ALARM_BITS[NUM_ALARM_BITS] = {
+ 0, 1, 2, 3, 4, 5, 7, 8, 9, -1, -1, -1, /* in0-in11 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 32, 33, 34, 35, 36, -1, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 16, 17, 18, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* temp1-temp12 */
+ 48, -1, /* intr0-intr1 */
};
-static const s8 NCT6116_BEEP_BITS[] = {
- 0, 1, 2, 3, 4, 5, 7, 8, /* in0.. in7 */
- 9, 10, 11, 12, -1, -1, -1, /* in8..in14 */
- 32, /* global beep enable */
- 24, 25, 26, 27, 28, /* fan1..fan5 */
- -1, -1, -1, /* unused */
- 16, 17, 18, -1, -1, -1, /* temp1..temp6 */
- 34, -1 /* intrusion0, intrusion1 */
+static const s8 NCT6116_BEEP_BITS[NUM_BEEP_BITS] = {
+ 0, 1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, /* in0-in11 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* in12-in23 */
+ 24, 25, 26, 27, 28, -1, -1, -1, -1, -1, -1, -1, /* fan1-fan12 */
+ 16, 17, 18, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* temp1-temp12 */
+ 34, -1, 32 /* intr0-intr1, beep_en */
};
static const u16 NCT6116_REG_TSI_TEMP[] = { 0x59, 0x5b };
@@ -958,12 +991,12 @@ static const u16 scale_in[15] = {
/*
* NCT6798 scaling:
* CPUVC, IN1, AVSB, 3VCC, IN0, IN8, IN4, 3VSB, VBAT, VTT, IN5, IN6, IN2,
- * IN3, IN7
- * Additional scales to be added later: IN9 (800), VHIF (1600)
+ * IN3, IN7, IN9, VHIF, IN10
+ * 15-17 for NCT6799 only
*/
-static const u16 scale_in_6798[15] = {
+static const u16 scale_in_6798[NUM_IN] = {
800, 800, 1600, 1600, 800, 800, 800, 1600, 1600, 1600, 1600, 1600, 800,
- 800, 800
+ 800, 800, 800, 1600, 800
};
static inline long in_from_reg(u8 reg, u8 nr, const u16 *scales)
@@ -3862,13 +3895,9 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
case nct6795:
case nct6796:
case nct6797:
- case nct6798:
- case nct6799:
data->in_num = 15;
data->pwm_num = (data->kind == nct6796 ||
- data->kind == nct6797 ||
- data->kind == nct6798 ||
- data->kind == nct6799) ? 7 : 6;
+ data->kind == nct6797) ? 7 : 6;
data->auto_pwm_num = 4;
data->has_fan_div = false;
data->temp_fixed_num = 6;
@@ -3912,16 +3941,6 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
data->temp_mask = NCT6796_TEMP_MASK;
data->virt_temp_mask = NCT6796_VIRT_TEMP_MASK;
break;
- case nct6798:
- data->temp_label = nct6798_temp_label;
- data->temp_mask = NCT6798_TEMP_MASK;
- data->virt_temp_mask = NCT6798_VIRT_TEMP_MASK;
- break;
- case nct6799:
- data->temp_label = nct6799_temp_label;
- data->temp_mask = NCT6799_TEMP_MASK;
- data->virt_temp_mask = NCT6799_VIRT_TEMP_MASK;
- break;
}
data->REG_CONFIG = NCT6775_REG_CONFIG;
@@ -3980,8 +3999,6 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
case nct6795:
case nct6796:
case nct6797:
- case nct6798:
- case nct6799:
data->REG_TSI_TEMP = NCT6796_REG_TSI_TEMP;
num_reg_tsi_temp = ARRAY_SIZE(NCT6796_REG_TSI_TEMP);
break;
@@ -3990,9 +4007,6 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
break;
}
- if (data->kind == nct6798 || data->kind == nct6799)
- data->scale_in = scale_in_6798;
-
reg_temp = NCT6779_REG_TEMP;
num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
if (data->kind == nct6791) {
@@ -4009,6 +4023,95 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_crit = NCT6779_REG_TEMP_CRIT;
break;
+ case nct6798:
+ case nct6799:
+ data->in_num = data->kind == nct6799 ? 18 : 15;
+ data->scale_in = scale_in_6798;
+ data->pwm_num = 7;
+ data->auto_pwm_num = 4;
+ data->has_fan_div = false;
+ data->temp_fixed_num = 6;
+ data->num_temp_alarms = 7;
+ data->num_temp_beeps = 8;
+
+ data->ALARM_BITS = NCT6799_ALARM_BITS;
+ data->BEEP_BITS = NCT6799_BEEP_BITS;
+
+ data->fan_from_reg = fan_from_reg_rpm;
+ data->fan_from_reg_min = fan_from_reg13;
+ data->target_temp_mask = 0xff;
+ data->tolerance_mask = 0x07;
+ data->speed_tolerance_limit = 63;
+
+ switch (data->kind) {
+ default:
+ case nct6798:
+ data->temp_label = nct6798_temp_label;
+ data->temp_mask = NCT6798_TEMP_MASK;
+ data->virt_temp_mask = NCT6798_VIRT_TEMP_MASK;
+ break;
+ case nct6799:
+ data->temp_label = nct6799_temp_label;
+ data->temp_mask = NCT6799_TEMP_MASK;
+ data->virt_temp_mask = NCT6799_VIRT_TEMP_MASK;
+ break;
+ }
+
+ data->REG_CONFIG = NCT6775_REG_CONFIG;
+ data->REG_VBAT = NCT6775_REG_VBAT;
+ data->REG_DIODE = NCT6775_REG_DIODE;
+ data->DIODE_MASK = NCT6775_DIODE_MASK;
+ data->REG_VIN = NCT6779_REG_IN;
+ data->REG_IN_MINMAX[0] = NCT6775_REG_IN_MIN;
+ data->REG_IN_MINMAX[1] = NCT6775_REG_IN_MAX;
+ data->REG_TARGET = NCT6775_REG_TARGET;
+ data->REG_FAN = NCT6779_REG_FAN;
+ data->REG_FAN_MODE = NCT6775_REG_FAN_MODE;
+ data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
+ data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
+ data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
+ data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
+ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
+ data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
+ data->REG_PWM[0] = NCT6775_REG_PWM;
+ data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
+ data->REG_PWM[2] = NCT6775_REG_FAN_STOP_OUTPUT;
+ data->REG_PWM[5] = NCT6791_REG_WEIGHT_DUTY_STEP;
+ data->REG_PWM[6] = NCT6791_REG_WEIGHT_DUTY_BASE;
+ data->REG_PWM_READ = NCT6775_REG_PWM_READ;
+ data->REG_PWM_MODE = NCT6776_REG_PWM_MODE;
+ data->PWM_MODE_MASK = NCT6776_PWM_MODE_MASK;
+ data->REG_AUTO_TEMP = NCT6775_REG_AUTO_TEMP;
+ data->REG_AUTO_PWM = NCT6775_REG_AUTO_PWM;
+ data->REG_CRITICAL_TEMP = NCT6775_REG_CRITICAL_TEMP;
+ data->REG_CRITICAL_TEMP_TOLERANCE = NCT6775_REG_CRITICAL_TEMP_TOLERANCE;
+ data->REG_CRITICAL_PWM_ENABLE = NCT6779_REG_CRITICAL_PWM_ENABLE;
+ data->CRITICAL_PWM_ENABLE_MASK = NCT6779_CRITICAL_PWM_ENABLE_MASK;
+ data->REG_CRITICAL_PWM = NCT6779_REG_CRITICAL_PWM;
+ data->REG_TEMP_OFFSET = NCT6779_REG_TEMP_OFFSET;
+ data->REG_TEMP_SOURCE = NCT6798_REG_TEMP_SOURCE;
+ data->REG_TEMP_SEL = NCT6775_REG_TEMP_SEL;
+ data->REG_WEIGHT_TEMP_SEL = NCT6791_REG_WEIGHT_TEMP_SEL;
+ data->REG_WEIGHT_TEMP[0] = NCT6791_REG_WEIGHT_TEMP_STEP;
+ data->REG_WEIGHT_TEMP[1] = NCT6791_REG_WEIGHT_TEMP_STEP_TOL;
+ data->REG_WEIGHT_TEMP[2] = NCT6791_REG_WEIGHT_TEMP_BASE;
+ data->REG_ALARM = NCT6799_REG_ALARM;
+ data->REG_BEEP = NCT6792_REG_BEEP;
+ data->REG_TSI_TEMP = NCT6796_REG_TSI_TEMP;
+ num_reg_tsi_temp = ARRAY_SIZE(NCT6796_REG_TSI_TEMP);
+
+ reg_temp = NCT6798_REG_TEMP;
+ num_reg_temp = ARRAY_SIZE(NCT6798_REG_TEMP);
+ reg_temp_mon = NCT6798_REG_TEMP_MON;
+ num_reg_temp_mon = ARRAY_SIZE(NCT6798_REG_TEMP_MON);
+ reg_temp_over = NCT6798_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6798_REG_TEMP_HYST;
+ reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ reg_temp_alternate = NCT6798_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6798_REG_TEMP_CRIT;
+
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index a409d7a0b813..81bf03dad6bb 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -35,7 +35,7 @@ static const char * const nct6775_sio_names[] __initconst = {
"NCT6796D",
"NCT6797D",
"NCT6798D",
- "NCT6799D",
+ "NCT6796D-S/NCT6799D-R",
};
static unsigned short force_id;
diff --git a/drivers/hwmon/nct6775.h b/drivers/hwmon/nct6775.h
index a84c6ce7275d..296eff99d003 100644
--- a/drivers/hwmon/nct6775.h
+++ b/drivers/hwmon/nct6775.h
@@ -8,7 +8,7 @@ enum kinds { nct6106, nct6116, nct6775, nct6776, nct6779, nct6791, nct6792,
nct6793, nct6795, nct6796, nct6797, nct6798, nct6799 };
enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
-#define NUM_TEMP 10 /* Max number of temp attribute sets w/ limits*/
+#define NUM_TEMP 12 /* Max number of temp attribute sets w/ limits*/
#define NUM_TEMP_FIXED 6 /* Max number of fixed temp attribute sets */
#define NUM_TSI_TEMP 8 /* Max number of TSI temp register pairs */
@@ -16,6 +16,7 @@ enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
#define NUM_REG_BEEP 5 /* Max number of beep registers */
#define NUM_FAN 7
+#define NUM_IN 18
struct nct6775_data {
int addr; /* IO base of hw monitor block */
@@ -97,7 +98,7 @@ struct nct6775_data {
/* Register values */
u8 bank; /* current register bank */
u8 in_num; /* number of in inputs we have */
- u8 in[15][3]; /* [0]=in, [1]=in_max, [2]=in_min */
+ u8 in[NUM_IN][3]; /* [0]=in, [1]=in_max, [2]=in_min */
const u16 *scale_in; /* internal scaling factors */
unsigned int rpm[NUM_FAN];
u16 fan_min[NUM_FAN];
@@ -166,7 +167,7 @@ struct nct6775_data {
u16 have_temp;
u16 have_temp_fixed;
u16 have_tsi_temp;
- u16 have_in;
+ u32 have_in;
/* Remember extra register values over suspend/resume */
u8 vbat;
@@ -239,10 +240,25 @@ nct6775_add_attr_group(struct nct6775_data *data, const struct attribute_group *
#define NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE 0x28
-#define FAN_ALARM_BASE 16
-#define TEMP_ALARM_BASE 24
-#define INTRUSION_ALARM_BASE 30
-#define BEEP_ENABLE_BASE 15
+/*
+ * ALARM_BITS and BEEP_BITS store bit-index for the mask of the registers
+ * loaded into data->alarm and data->beep.
+ *
+ * Every input register (IN/TEMP/FAN) must have a corresponding
+ * ALARM/BEEP bit at the same index BITS[BASE + index]
+ * Set value to -1 to disable the visibility of that '*_alarm' attribute and
+ * to pad the bits until the next BASE
+ *
+ * Beep has an additional GLOBAL_BEEP_ENABLE bit
+ */
+#define VIN_ALARM_BASE 0
+#define FAN_ALARM_BASE 24
+#define TEMP_ALARM_BASE 36
+#define INTRUSION_ALARM_BASE 48
+#define BEEP_ENABLE_BASE 50
+
+#define NUM_ALARM_BITS (INTRUSION_ALARM_BASE + 4)
+#define NUM_BEEP_BITS (BEEP_ENABLE_BASE + 1)
/*
* Not currently used:
diff --git a/drivers/hwmon/nsa320-hwmon.c b/drivers/hwmon/nsa320-hwmon.c
index ebe6b031e56f..18076ba7fc14 100644
--- a/drivers/hwmon/nsa320-hwmon.c
+++ b/drivers/hwmon/nsa320-hwmon.c
@@ -20,8 +20,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
/* Tests for error return values rely upon this value being < 0x80 */
@@ -193,7 +191,7 @@ static struct platform_driver nsa320_hwmon_driver = {
.probe = nsa320_hwmon_probe,
.driver = {
.name = "nsa320-hwmon",
- .of_match_table = of_match_ptr(of_nsa320_hwmon_match),
+ .of_match_table = of_nsa320_hwmon_match,
},
};
diff --git a/drivers/hwmon/oxp-sensors.c b/drivers/hwmon/oxp-sensors.c
index 1e1cc67bcdea..ea9602063eab 100644
--- a/drivers/hwmon/oxp-sensors.c
+++ b/drivers/hwmon/oxp-sensors.c
@@ -434,23 +434,9 @@ static const struct hwmon_chip_info oxp_ec_chip_info = {
/* Initialization logic */
static int oxp_platform_probe(struct platform_device *pdev)
{
- const struct dmi_system_id *dmi_entry;
struct device *dev = &pdev->dev;
struct device *hwdev;
- /*
- * Have to check for AMD processor here because DMI strings are the
- * same between Intel and AMD boards, the only way to tell them apart
- * is the CPU.
- * Intel boards seem to have different EC registers and values to
- * read/write.
- */
- dmi_entry = dmi_first_match(dmi_table);
- if (!dmi_entry || boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
- return -ENODEV;
-
- board = (enum oxp_board)(unsigned long)dmi_entry->driver_data;
-
hwdev = devm_hwmon_device_register_with_info(dev, "oxpec", NULL,
&oxp_ec_chip_info, NULL);
@@ -469,6 +455,21 @@ static struct platform_device *oxp_platform_device;
static int __init oxp_platform_init(void)
{
+ const struct dmi_system_id *dmi_entry;
+
+ /*
+ * Have to check for AMD processor here because DMI strings are the
+ * same between Intel and AMD boards, the only way to tell them apart
+ * is the CPU.
+ * Intel boards seem to have different EC registers and values to
+ * read/write.
+ */
+ dmi_entry = dmi_first_match(dmi_table);
+ if (!dmi_entry || boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return -ENODEV;
+
+ board = (enum oxp_board)(unsigned long)dmi_entry->driver_data;
+
oxp_platform_device =
platform_create_bundle(&oxp_platform_driver,
oxp_platform_probe, NULL, 0, NULL, 0);
diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c
index ed968401f93c..ce89da3937a0 100644
--- a/drivers/hwmon/peci/dimmtemp.c
+++ b/drivers/hwmon/peci/dimmtemp.c
@@ -219,19 +219,21 @@ static int check_populated_dimms(struct peci_dimmtemp *priv)
{
int chan_rank_max = priv->gen_info->chan_rank_max;
int dimm_idx_max = priv->gen_info->dimm_idx_max;
- u32 chan_rank_empty = 0;
- u32 dimm_mask = 0;
- int chan_rank, dimm_idx, ret;
+ DECLARE_BITMAP(dimm_mask, DIMM_NUMS_MAX);
+ DECLARE_BITMAP(chan_rank_empty, CHAN_RANK_MAX);
+
+ int chan_rank, dimm_idx, ret, i;
u32 pcs;
- BUILD_BUG_ON(BITS_PER_TYPE(chan_rank_empty) < CHAN_RANK_MAX);
- BUILD_BUG_ON(BITS_PER_TYPE(dimm_mask) < DIMM_NUMS_MAX);
if (chan_rank_max * dimm_idx_max > DIMM_NUMS_MAX) {
WARN_ONCE(1, "Unsupported number of DIMMs - chan_rank_max: %d, dimm_idx_max: %d",
chan_rank_max, dimm_idx_max);
return -EINVAL;
}
+ bitmap_zero(dimm_mask, DIMM_NUMS_MAX);
+ bitmap_zero(chan_rank_empty, CHAN_RANK_MAX);
+
for (chan_rank = 0; chan_rank < chan_rank_max; chan_rank++) {
ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &pcs);
if (ret) {
@@ -242,7 +244,7 @@ static int check_populated_dimms(struct peci_dimmtemp *priv)
* detection to be performed at a later point in time.
*/
if (ret == -EINVAL) {
- chan_rank_empty |= BIT(chan_rank);
+ bitmap_set(chan_rank_empty, chan_rank, 1);
continue;
}
@@ -251,7 +253,7 @@ static int check_populated_dimms(struct peci_dimmtemp *priv)
for (dimm_idx = 0; dimm_idx < dimm_idx_max; dimm_idx++)
if (__dimm_temp(pcs, dimm_idx))
- dimm_mask |= BIT(chan_rank * dimm_idx_max + dimm_idx);
+ bitmap_set(dimm_mask, chan_rank * dimm_idx_max + dimm_idx, 1);
}
/*
@@ -260,7 +262,7 @@ static int check_populated_dimms(struct peci_dimmtemp *priv)
* host platform boot. Retrying a couple of times lets us make sure
* that the state is persistent.
*/
- if (chan_rank_empty == GENMASK(chan_rank_max - 1, 0)) {
+ if (bitmap_full(chan_rank_empty, chan_rank_max)) {
if (priv->no_dimm_retry_count < NO_DIMM_RETRY_COUNT_MAX) {
priv->no_dimm_retry_count++;
@@ -274,14 +276,16 @@ static int check_populated_dimms(struct peci_dimmtemp *priv)
* It's possible that memory training is not done yet. In this case we
* defer the detection to be performed at a later point in time.
*/
- if (!dimm_mask) {
+ if (bitmap_empty(dimm_mask, DIMM_NUMS_MAX)) {
priv->no_dimm_retry_count = 0;
return -EAGAIN;
}
- dev_dbg(priv->dev, "Scanned populated DIMMs: %#x\n", dimm_mask);
+ for_each_set_bit(i, dimm_mask, DIMM_NUMS_MAX) {
+ dev_dbg(priv->dev, "Found DIMM%#x\n", i);
+ }
- bitmap_from_arr32(priv->dimm_mask, &dimm_mask, DIMM_NUMS_MAX);
+ bitmap_copy(priv->dimm_mask, dimm_mask, DIMM_NUMS_MAX);
return 0;
}
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 270b6336b76d..b4e93bd5835e 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -317,6 +317,13 @@ config SENSORS_MP2975
This driver can also be built as a module. If so, the module will
be called mp2975.
+config SENSORS_MP2975_REGULATOR
+ depends on SENSORS_MP2975 && REGULATOR
+ bool "Regulator support for MPS MP2975"
+ help
+ If you say yes here you get regulator support for MPS MP2975
+ Dual Loop Digital Multi-Phase Controller.
+
config SENSORS_MP5023
tristate "MPS MP5023"
help
diff --git a/drivers/hwmon/pmbus/acbel-fsg032.c b/drivers/hwmon/pmbus/acbel-fsg032.c
index 0a0ef4ce3493..e0c55fd8f3a6 100644
--- a/drivers/hwmon/pmbus/acbel-fsg032.c
+++ b/drivers/hwmon/pmbus/acbel-fsg032.c
@@ -3,14 +3,51 @@
* Copyright 2023 IBM Corp.
*/
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/i2c.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/pmbus.h>
#include <linux/hwmon-sysfs.h>
#include "pmbus.h"
+#define ACBEL_MFR_FW_REVISION 0xd9
+
+static ssize_t acbel_fsg032_debugfs_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct i2c_client *client = file->private_data;
+ u8 data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
+ char out[8];
+ int rc;
+
+ rc = i2c_smbus_read_block_data(client, ACBEL_MFR_FW_REVISION, data);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(out, sizeof(out), "%*phN\n", min(rc, 3), data);
+ return simple_read_from_buffer(buf, count, ppos, out, rc);
+}
+
+static const struct file_operations acbel_debugfs_ops = {
+ .llseek = noop_llseek,
+ .read = acbel_fsg032_debugfs_read,
+ .write = NULL,
+ .open = simple_open,
+};
+
+static void acbel_fsg032_init_debugfs(struct i2c_client *client)
+{
+ struct dentry *debugfs = pmbus_get_debugfs_dir(client);
+
+ if (!debugfs)
+ return;
+
+ debugfs_create_file("fw_version", 0444, debugfs, client, &acbel_debugfs_ops);
+}
+
static const struct i2c_device_id acbel_fsg032_id[] = {
{ "acbel_fsg032" },
{}
@@ -59,6 +96,7 @@ static int acbel_fsg032_probe(struct i2c_client *client)
if (rc)
return rc;
+ acbel_fsg032_init_debugfs(client);
return 0;
}
diff --git a/drivers/hwmon/pmbus/dps920ab.c b/drivers/hwmon/pmbus/dps920ab.c
index f7ff3e4650b7..04e0d598a6e5 100644
--- a/drivers/hwmon/pmbus/dps920ab.c
+++ b/drivers/hwmon/pmbus/dps920ab.c
@@ -9,7 +9,7 @@
#include <linux/debugfs.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include "pmbus.h"
struct dps920ab_data {
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index c791925b8907..1ba4c5e95820 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -13,7 +13,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pmbus.h>
#include "pmbus.h"
@@ -489,7 +489,7 @@ static int ibm_cffps_probe(struct i2c_client *client)
const struct i2c_device_id *id;
if (md) {
- vs = (enum versions)md;
+ vs = (uintptr_t)md;
} else {
id = i2c_match_id(ibm_cffps_id, client);
if (id)
diff --git a/drivers/hwmon/pmbus/ir38064.c b/drivers/hwmon/pmbus/ir38064.c
index 871c322d3d51..04185be3fdb6 100644
--- a/drivers/hwmon/pmbus/ir38064.c
+++ b/drivers/hwmon/pmbus/ir38064.c
@@ -6,7 +6,7 @@
*
* VOUT_MODE is not supported by the device. The driver fakes VOUT linear16
* mode with exponent value -8 as direct mode with m=256/b=0/R=0.
- *
+ *
* The device supports VOUT_PEAK, IOUT_PEAK, and TEMPERATURE_PEAK, however
* this driver does not currently support them.
*/
@@ -16,7 +16,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/driver.h>
#include "pmbus.h"
diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c
index 7bcf27995033..d56ec24764fd 100644
--- a/drivers/hwmon/pmbus/max20730.c
+++ b/drivers/hwmon/pmbus/max20730.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pmbus.h>
#include <linux/util_macros.h>
#include "pmbus.h"
@@ -114,6 +114,7 @@ static ssize_t max20730_debugfs_read(struct file *file, char __user *buf,
const struct pmbus_driver_info *info;
const struct max20730_data *data;
char tbuf[DEBUG_FS_DATA_MAX] = { 0 };
+ char *result = tbuf;
u16 val;
info = pmbus_get_driver_info(psu->client);
@@ -148,13 +149,13 @@ static ssize_t max20730_debugfs_read(struct file *file, char __user *buf,
>> MAX20730_MFR_DEVSET1_TSTAT_BIT_POS;
if (val == 0)
- len = strlcpy(tbuf, "2000\n", DEBUG_FS_DATA_MAX);
+ result = "2000\n";
else if (val == 1)
- len = strlcpy(tbuf, "125\n", DEBUG_FS_DATA_MAX);
+ result = "125\n";
else if (val == 2)
- len = strlcpy(tbuf, "62.5\n", DEBUG_FS_DATA_MAX);
+ result = "62.5\n";
else
- len = strlcpy(tbuf, "32\n", DEBUG_FS_DATA_MAX);
+ result = "32\n";
break;
case MAX20730_DEBUGFS_INTERNAL_GAIN:
val = (data->mfr_devset1 & MAX20730_MFR_DEVSET1_RGAIN_MASK)
@@ -163,35 +164,35 @@ static ssize_t max20730_debugfs_read(struct file *file, char __user *buf,
if (data->id == max20734) {
/* AN6209 */
if (val == 0)
- len = strlcpy(tbuf, "0.8\n", DEBUG_FS_DATA_MAX);
+ result = "0.8\n";
else if (val == 1)
- len = strlcpy(tbuf, "3.2\n", DEBUG_FS_DATA_MAX);
+ result = "3.2\n";
else if (val == 2)
- len = strlcpy(tbuf, "1.6\n", DEBUG_FS_DATA_MAX);
+ result = "1.6\n";
else
- len = strlcpy(tbuf, "6.4\n", DEBUG_FS_DATA_MAX);
+ result = "6.4\n";
} else if (data->id == max20730 || data->id == max20710) {
/* AN6042 or AN6140 */
if (val == 0)
- len = strlcpy(tbuf, "0.9\n", DEBUG_FS_DATA_MAX);
+ result = "0.9\n";
else if (val == 1)
- len = strlcpy(tbuf, "3.6\n", DEBUG_FS_DATA_MAX);
+ result = "3.6\n";
else if (val == 2)
- len = strlcpy(tbuf, "1.8\n", DEBUG_FS_DATA_MAX);
+ result = "1.8\n";
else
- len = strlcpy(tbuf, "7.2\n", DEBUG_FS_DATA_MAX);
+ result = "7.2\n";
} else if (data->id == max20743) {
/* AN6042 */
if (val == 0)
- len = strlcpy(tbuf, "0.45\n", DEBUG_FS_DATA_MAX);
+ result = "0.45\n";
else if (val == 1)
- len = strlcpy(tbuf, "1.8\n", DEBUG_FS_DATA_MAX);
+ result = "1.8\n";
else if (val == 2)
- len = strlcpy(tbuf, "0.9\n", DEBUG_FS_DATA_MAX);
+ result = "0.9\n";
else
- len = strlcpy(tbuf, "3.6\n", DEBUG_FS_DATA_MAX);
+ result = "3.6\n";
} else {
- len = strlcpy(tbuf, "Not supported\n", DEBUG_FS_DATA_MAX);
+ result = "Not supported\n";
}
break;
case MAX20730_DEBUGFS_BOOT_VOLTAGE:
@@ -199,26 +200,26 @@ static ssize_t max20730_debugfs_read(struct file *file, char __user *buf,
>> MAX20730_MFR_DEVSET1_VBOOT_BIT_POS;
if (val == 0)
- len = strlcpy(tbuf, "0.6484\n", DEBUG_FS_DATA_MAX);
+ result = "0.6484\n";
else if (val == 1)
- len = strlcpy(tbuf, "0.8984\n", DEBUG_FS_DATA_MAX);
+ result = "0.8984\n";
else if (val == 2)
- len = strlcpy(tbuf, "1.0\n", DEBUG_FS_DATA_MAX);
+ result = "1.0\n";
else
- len = strlcpy(tbuf, "Invalid\n", DEBUG_FS_DATA_MAX);
+ result = "Invalid\n";
break;
case MAX20730_DEBUGFS_OUT_V_RAMP_RATE:
val = (data->mfr_devset2 & MAX20730_MFR_DEVSET2_VRATE)
>> MAX20730_MFR_DEVSET2_VRATE_BIT_POS;
if (val == 0)
- len = strlcpy(tbuf, "4\n", DEBUG_FS_DATA_MAX);
+ result = "4\n";
else if (val == 1)
- len = strlcpy(tbuf, "2\n", DEBUG_FS_DATA_MAX);
+ result = "2\n";
else if (val == 2)
- len = strlcpy(tbuf, "1\n", DEBUG_FS_DATA_MAX);
+ result = "1\n";
else
- len = strlcpy(tbuf, "Invalid\n", DEBUG_FS_DATA_MAX);
+ result = "Invalid\n";
break;
case MAX20730_DEBUGFS_OC_PROTECT_MODE:
ret = (data->mfr_devset2 & MAX20730_MFR_DEVSET2_OCPM_MASK)
@@ -230,13 +231,13 @@ static ssize_t max20730_debugfs_read(struct file *file, char __user *buf,
>> MAX20730_MFR_DEVSET2_SS_BIT_POS;
if (val == 0)
- len = strlcpy(tbuf, "0.75\n", DEBUG_FS_DATA_MAX);
+ result = "0.75\n";
else if (val == 1)
- len = strlcpy(tbuf, "1.5\n", DEBUG_FS_DATA_MAX);
+ result = "1.5\n";
else if (val == 2)
- len = strlcpy(tbuf, "3\n", DEBUG_FS_DATA_MAX);
+ result = "3\n";
else
- len = strlcpy(tbuf, "6\n", DEBUG_FS_DATA_MAX);
+ result = "6\n";
break;
case MAX20730_DEBUGFS_IMAX:
ret = (data->mfr_devset2 & MAX20730_MFR_DEVSET2_IMAX_MASK)
@@ -287,10 +288,11 @@ static ssize_t max20730_debugfs_read(struct file *file, char __user *buf,
"%d.%d\n", ret / 10000, ret % 10000);
break;
default:
- len = strlcpy(tbuf, "Invalid\n", DEBUG_FS_DATA_MAX);
+ result = "Invalid\n";
}
- return simple_read_from_buffer(buf, count, ppos, tbuf, len);
+ len = strlen(result);
+ return simple_read_from_buffer(buf, count, ppos, result, len);
}
static const struct file_operations max20730_fops = {
@@ -714,7 +716,7 @@ static int max20730_probe(struct i2c_client *client)
}
if (client->dev.of_node)
- chip_id = (enum chips)of_device_get_match_data(dev);
+ chip_id = (uintptr_t)of_device_get_match_data(dev);
else
chip_id = i2c_match_id(max20730_id, client)->driver_data;
diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
index 2109b0458a8b..26ba50633100 100644
--- a/drivers/hwmon/pmbus/mp2975.c
+++ b/drivers/hwmon/pmbus/mp2975.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include "pmbus.h"
/* Vendor specific registers. */
@@ -34,6 +35,8 @@
#define MP2975_MFR_OVP_TH_SET 0xe5
#define MP2975_MFR_UVP_SET 0xe6
+#define MP2973_MFR_RESO_SET 0xc7
+
#define MP2975_VOUT_FORMAT BIT(15)
#define MP2975_VID_STEP_SEL_R1 BIT(4)
#define MP2975_IMVP9_EN_R1 BIT(13)
@@ -48,43 +51,80 @@
#define MP2975_SENSE_AMPL_HALF 2
#define MP2975_VIN_UV_LIMIT_UNIT 8
+#define MP2973_VOUT_FORMAT_R1 GENMASK(7, 6)
+#define MP2973_VOUT_FORMAT_R2 GENMASK(4, 3)
+#define MP2973_VOUT_FORMAT_DIRECT_R1 BIT(7)
+#define MP2973_VOUT_FORMAT_LINEAR_R1 BIT(6)
+#define MP2973_VOUT_FORMAT_DIRECT_R2 BIT(4)
+#define MP2973_VOUT_FORMAT_LINEAR_R2 BIT(3)
+
+#define MP2973_MFR_VR_MULTI_CONFIG_R1 0x0d
+#define MP2973_MFR_VR_MULTI_CONFIG_R2 0x1d
+#define MP2973_VID_STEP_SEL_R1 BIT(4)
+#define MP2973_IMVP9_EN_R1 BIT(14)
+#define MP2973_VID_STEP_SEL_R2 BIT(3)
+#define MP2973_IMVP9_EN_R2 BIT(13)
+
+#define MP2973_MFR_OCP_TOTAL_SET 0x5f
+#define MP2973_OCP_TOTAL_CUR_MASK GENMASK(6, 0)
+#define MP2973_MFR_OCP_LEVEL_RES BIT(15)
+
+#define MP2973_MFR_READ_IOUT_PK 0x90
+#define MP2973_MFR_READ_POUT_PK 0x91
+
#define MP2975_MAX_PHASE_RAIL1 8
#define MP2975_MAX_PHASE_RAIL2 4
+
+#define MP2973_MAX_PHASE_RAIL1 14
+#define MP2973_MAX_PHASE_RAIL2 6
+
+#define MP2971_MAX_PHASE_RAIL1 8
+#define MP2971_MAX_PHASE_RAIL2 3
+
#define MP2975_PAGE_NUM 2
#define MP2975_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \
PMBUS_HAVE_POUT | PMBUS_PHASE_VIRTUAL)
+enum chips {
+ mp2971, mp2973, mp2975
+};
+
+static const int mp2975_max_phases[][MP2975_PAGE_NUM] = {
+ [mp2975] = { MP2975_MAX_PHASE_RAIL1, MP2975_MAX_PHASE_RAIL2 },
+ [mp2973] = { MP2973_MAX_PHASE_RAIL1, MP2973_MAX_PHASE_RAIL2 },
+ [mp2971] = { MP2971_MAX_PHASE_RAIL1, MP2971_MAX_PHASE_RAIL2 },
+};
+
struct mp2975_data {
struct pmbus_driver_info info;
+ enum chips chip_id;
int vout_scale;
+ int max_phases[MP2975_PAGE_NUM];
int vid_step[MP2975_PAGE_NUM];
int vref[MP2975_PAGE_NUM];
int vref_off[MP2975_PAGE_NUM];
int vout_max[MP2975_PAGE_NUM];
int vout_ov_fixed[MP2975_PAGE_NUM];
- int vout_format[MP2975_PAGE_NUM];
int curr_sense_gain[MP2975_PAGE_NUM];
};
-#define to_mp2975_data(x) container_of(x, struct mp2975_data, info)
+static const struct i2c_device_id mp2975_id[] = {
+ {"mp2971", mp2971},
+ {"mp2973", mp2973},
+ {"mp2975", mp2975},
+ {}
+};
-static int mp2975_read_byte_data(struct i2c_client *client, int page, int reg)
-{
- switch (reg) {
- case PMBUS_VOUT_MODE:
- /*
- * Enforce VOUT direct format, since device allows to set the
- * different formats for the different rails. Conversion from
- * VID to direct provided by driver internally, in case it is
- * necessary.
- */
- return PB_VOUT_MODE_DIRECT;
- default:
- return -ENODATA;
- }
-}
+MODULE_DEVICE_TABLE(i2c, mp2975_id);
+
+static const struct regulator_desc __maybe_unused mp2975_reg_desc[] = {
+ PMBUS_REGULATOR("vout", 0),
+ PMBUS_REGULATOR("vout", 1),
+};
+
+#define to_mp2975_data(x) container_of(x, struct mp2975_data, info)
static int
mp2975_read_word_helper(struct i2c_client *client, int page, int phase, u8 reg,
@@ -117,6 +157,41 @@ mp2975_vid2direct(int vrf, int val)
return 0;
}
+#define MAX_LIN_MANTISSA (1023 * 1000)
+#define MIN_LIN_MANTISSA (511 * 1000)
+
+/* Converts a milli-unit DIRECT value to LINEAR11 format */
+static u16 mp2975_data2reg_linear11(s64 val)
+{
+ s16 exponent = 0, mantissa;
+ bool negative = false;
+
+ /* simple case */
+ if (val == 0)
+ return 0;
+
+ /* Reduce large mantissa until it fits into 10 bit */
+ while (val >= MAX_LIN_MANTISSA && exponent < 15) {
+ exponent++;
+ val >>= 1;
+ }
+ /* Increase small mantissa to improve precision */
+ while (val < MIN_LIN_MANTISSA && exponent > -15) {
+ exponent--;
+ val <<= 1;
+ }
+
+ /* Convert mantissa from milli-units to units */
+ mantissa = clamp_val(DIV_ROUND_CLOSEST_ULL(val, 1000), 0, 0x3ff);
+
+ /* restore sign */
+ if (negative)
+ mantissa = -mantissa;
+
+ /* Convert to 5 bit exponent, 11 bit mantissa */
+ return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
+}
+
static int
mp2975_read_phase(struct i2c_client *client, struct mp2975_data *data,
int page, int phase, u8 reg)
@@ -214,6 +289,89 @@ mp2975_read_phases(struct i2c_client *client, struct mp2975_data *data,
return ret;
}
+static int mp2973_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2975_data *data = to_mp2975_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_OT_FAULT_LIMIT:
+ ret = mp2975_read_word_helper(client, page, phase, reg,
+ GENMASK(7, 0));
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ ret = mp2975_read_word_helper(client, page, phase, reg,
+ GENMASK(7, 0));
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST(ret, MP2975_VIN_UV_LIMIT_UNIT);
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ /*
+ * MP2971 and mp2973 only supports tracking (ovp1) mode.
+ */
+ ret = mp2975_read_word_helper(client, page, phase,
+ MP2975_MFR_OVP_TH_SET,
+ GENMASK(2, 0));
+ if (ret < 0)
+ return ret;
+
+ ret = data->vout_max[page] + 50 * (ret + 1);
+ break;
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = mp2975_read_word_helper(client, page, phase, reg,
+ GENMASK(8, 0));
+ if (ret < 0)
+ return ret;
+ ret = mp2975_vid2direct(info->vrm_version[page], ret);
+ break;
+ case PMBUS_VIRT_READ_POUT_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ MP2973_MFR_READ_POUT_PK);
+ break;
+ case PMBUS_VIRT_READ_IOUT_MAX:
+ ret = pmbus_read_word_data(client, page, phase,
+ MP2973_MFR_READ_IOUT_PK);
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ ret = mp2975_read_word_helper(client, page, phase,
+ MP2973_MFR_OCP_TOTAL_SET,
+ GENMASK(15, 0));
+ if (ret < 0)
+ return ret;
+
+ if (ret & MP2973_MFR_OCP_LEVEL_RES)
+ ret = 2 * (ret & MP2973_OCP_TOTAL_CUR_MASK);
+ else
+ ret = ret & MP2973_OCP_TOTAL_CUR_MASK;
+
+ ret = mp2975_data2reg_linear11(ret * info->phases[page] * 1000);
+ break;
+ case PMBUS_UT_WARN_LIMIT:
+ case PMBUS_UT_FAULT_LIMIT:
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_VOUT_OV_WARN_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_IIN_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_LV_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ case PMBUS_IOUT_UC_FAULT_LIMIT:
+ case PMBUS_POUT_OP_FAULT_LIMIT:
+ case PMBUS_POUT_OP_WARN_LIMIT:
+ case PMBUS_PIN_OP_WARN_LIMIT:
+ return -ENXIO;
+ default:
+ return -ENODATA;
+ }
+
+ return ret;
+}
+
static int mp2975_read_word_data(struct i2c_client *client, int page,
int phase, int reg)
{
@@ -222,6 +380,11 @@ static int mp2975_read_word_data(struct i2c_client *client, int page,
int ret;
switch (reg) {
+ case PMBUS_STATUS_WORD:
+ /* MP2973 & MP2971 return PGOOD instead of PB_STATUS_POWER_GOOD_N. */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ ret ^= PB_STATUS_POWER_GOOD_N;
+ break;
case PMBUS_OT_FAULT_LIMIT:
ret = mp2975_read_word_helper(client, page, phase, reg,
GENMASK(7, 0));
@@ -260,24 +423,6 @@ static int mp2975_read_word_data(struct i2c_client *client, int page,
ret = DIV_ROUND_CLOSEST(data->vref[page] * 10 - 50 *
(ret + 1) * data->vout_scale, 10);
break;
- case PMBUS_READ_VOUT:
- ret = mp2975_read_word_helper(client, page, phase, reg,
- GENMASK(11, 0));
- if (ret < 0)
- return ret;
-
- /*
- * READ_VOUT can be provided in VID or direct format. The
- * format type is specified by bit 15 of the register
- * MP2975_MFR_DC_LOOP_CTRL. The driver enforces VOUT direct
- * format, since device allows to set the different formats for
- * the different rails and also all VOUT limits registers are
- * provided in a direct format. In case format is VID - convert
- * to direct.
- */
- if (data->vout_format[page] == vid)
- ret = mp2975_vid2direct(info->vrm_version[page], ret);
- break;
case PMBUS_VIRT_READ_POUT_MAX:
ret = mp2975_read_word_helper(client, page, phase,
MP2975_MFR_READ_POUT_PK,
@@ -326,25 +471,25 @@ static int mp2975_read_word_data(struct i2c_client *client, int page,
return ret;
}
-static int mp2975_identify_multiphase_rail2(struct i2c_client *client)
+static int mp2975_identify_multiphase_rail2(struct i2c_client *client,
+ struct mp2975_data *data)
{
int ret;
/*
- * Identify multiphase for rail 2 - could be from 0 to 4.
+ * Identify multiphase for rail 2 - could be from 0 to data->max_phases[1].
* In case phase number is zero – only page zero is supported
*/
ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 2);
if (ret < 0)
return ret;
- /* Identify multiphase for rail 2 - could be from 0 to 4. */
ret = i2c_smbus_read_word_data(client, MP2975_MFR_VR_MULTI_CONFIG_R2);
if (ret < 0)
return ret;
ret &= GENMASK(2, 0);
- return (ret >= 4) ? 4 : ret;
+ return (ret >= data->max_phases[1]) ? data->max_phases[1] : ret;
}
static void mp2975_set_phase_rail1(struct pmbus_driver_info *info)
@@ -375,7 +520,7 @@ mp2975_identify_multiphase(struct i2c_client *client, struct mp2975_data *data,
if (ret < 0)
return ret;
- /* Identify multiphase for rail 1 - could be from 1 to 8. */
+ /* Identify multiphase for rail 1 - could be from 1 to data->max_phases[0]. */
ret = i2c_smbus_read_word_data(client, MP2975_MFR_VR_MULTI_CONFIG_R1);
if (ret <= 0)
return ret;
@@ -383,21 +528,23 @@ mp2975_identify_multiphase(struct i2c_client *client, struct mp2975_data *data,
info->phases[0] = ret & GENMASK(3, 0);
/*
- * The device provides a total of 8 PWM pins, and can be configured
+ * The device provides a total of $n PWM pins, and can be configured
* to different phase count applications for rail 1 and rail 2.
- * Rail 1 can be set to 8 phases, while rail 2 can only be set to 4
- * phases at most. When rail 1’s phase count is configured as 0, rail
+ * Rail 1 can be set to $n phases, while rail 2 can be set to less than
+ * that. When rail 1’s phase count is configured as 0, rail
* 1 operates with 1-phase DCM. When rail 2 phase count is configured
* as 0, rail 2 is disabled.
*/
- if (info->phases[0] > MP2975_MAX_PHASE_RAIL1)
+ if (info->phases[0] > data->max_phases[0])
return -EINVAL;
- mp2975_set_phase_rail1(info);
- num_phases2 = min(MP2975_MAX_PHASE_RAIL1 - info->phases[0],
- MP2975_MAX_PHASE_RAIL2);
- if (info->phases[1] && info->phases[1] <= num_phases2)
- mp2975_set_phase_rail2(info, num_phases2);
+ if (data->chip_id == mp2975) {
+ mp2975_set_phase_rail1(info);
+ num_phases2 = min(data->max_phases[0] - info->phases[0],
+ data->max_phases[1]);
+ if (info->phases[1] && info->phases[1] <= num_phases2)
+ mp2975_set_phase_rail2(info, num_phases2);
+ }
return 0;
}
@@ -451,6 +598,35 @@ mp2975_identify_rails_vid(struct i2c_client *client, struct mp2975_data *data,
MP2975_MFR_VR_MULTI_CONFIG_R2, 1,
MP2975_IMVP9_EN_R2,
MP2975_VID_STEP_SEL_R2);
+
+ return ret;
+}
+
+static int
+mp2973_identify_rails_vid(struct i2c_client *client, struct mp2975_data *data,
+ struct pmbus_driver_info *info)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 2);
+ if (ret < 0)
+ return ret;
+
+ /* Identify VID mode for rail 1. */
+ ret = mp2975_identify_vid(client, data, info,
+ MP2973_MFR_VR_MULTI_CONFIG_R1, 0,
+ MP2973_IMVP9_EN_R1, MP2973_VID_STEP_SEL_R1);
+
+ if (ret < 0)
+ return ret;
+
+ /* Identify VID mode for rail 2, if connected. */
+ if (info->phases[1])
+ ret = mp2975_identify_vid(client, data, info,
+ MP2973_MFR_VR_MULTI_CONFIG_R2, 1,
+ MP2973_IMVP9_EN_R2,
+ MP2973_VID_STEP_SEL_R2);
+
return ret;
}
@@ -565,20 +741,37 @@ mp2975_vout_max_get(struct i2c_client *client, struct mp2975_data *data,
}
static int
-mp2975_identify_vout_format(struct i2c_client *client,
- struct mp2975_data *data, int page)
+mp2975_set_vout_format(struct i2c_client *client,
+ struct mp2975_data *data, int page)
{
- int ret;
+ int ret, i;
- ret = i2c_smbus_read_word_data(client, MP2975_MFR_DC_LOOP_CTRL);
- if (ret < 0)
- return ret;
-
- if (ret & MP2975_VOUT_FORMAT)
- data->vout_format[page] = vid;
- else
- data->vout_format[page] = direct;
- return 0;
+ /* Enable DIRECT VOUT format 1mV/LSB */
+ if (data->chip_id == mp2975) {
+ ret = i2c_smbus_read_word_data(client, MP2975_MFR_DC_LOOP_CTRL);
+ if (ret < 0)
+ return ret;
+ if (ret & MP2975_VOUT_FORMAT) {
+ ret &= ~MP2975_VOUT_FORMAT;
+ ret = i2c_smbus_write_word_data(client, MP2975_MFR_DC_LOOP_CTRL, ret);
+ }
+ } else {
+ ret = i2c_smbus_read_word_data(client, MP2973_MFR_RESO_SET);
+ if (ret < 0)
+ return ret;
+ i = ret;
+
+ if (page == 0) {
+ i &= ~MP2973_VOUT_FORMAT_R1;
+ i |= MP2973_VOUT_FORMAT_DIRECT_R1;
+ } else {
+ i &= ~MP2973_VOUT_FORMAT_R2;
+ i |= MP2973_VOUT_FORMAT_DIRECT_R2;
+ }
+ if (i != ret)
+ ret = i2c_smbus_write_word_data(client, MP2973_MFR_RESO_SET, i);
+ }
+ return ret;
}
static int
@@ -600,7 +793,7 @@ mp2975_vout_ov_scale_get(struct i2c_client *client, struct mp2975_data *data,
if (ret < 0)
return ret;
thres_dev = ret & MP2975_PRT_THRES_DIV_OV_EN ? MP2975_PROT_DEV_OV_ON :
- MP2975_PROT_DEV_OV_OFF;
+ MP2975_PROT_DEV_OV_OFF;
/* Select the gain of remote sense amplifier. */
ret = i2c_smbus_read_word_data(client, PMBUS_VOUT_SCALE_LOOP);
@@ -624,10 +817,10 @@ mp2975_vout_per_rail_config_get(struct i2c_client *client,
for (i = 0; i < data->info.pages; i++) {
ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
if (ret < 0)
- return ret;
+ continue;
- /* Obtain voltage reference offsets. */
- ret = mp2975_vref_offset_get(client, data, i);
+ /* Set VOUT format for READ_VOUT command : direct. */
+ ret = mp2975_set_vout_format(client, data, i);
if (ret < 0)
return ret;
@@ -636,12 +829,12 @@ mp2975_vout_per_rail_config_get(struct i2c_client *client,
if (ret < 0)
return ret;
- /*
- * Get VOUT format for READ_VOUT command : VID or direct.
- * Pages on same device can be configured with different
- * formats.
- */
- ret = mp2975_identify_vout_format(client, data, i);
+ /* Skip if reading Vref is unsupported */
+ if (data->chip_id != mp2975)
+ continue;
+
+ /* Obtain voltage reference offsets. */
+ ret = mp2975_vref_offset_get(client, data, i);
if (ret < 0)
return ret;
@@ -676,8 +869,32 @@ static struct pmbus_driver_info mp2975_info = {
PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT |
PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT | PMBUS_PHASE_VIRTUAL,
- .read_byte_data = mp2975_read_byte_data,
.read_word_data = mp2975_read_word_data,
+#if IS_ENABLED(CONFIG_SENSORS_MP2975_REGULATOR)
+ .num_regulators = 1,
+ .reg_desc = mp2975_reg_desc,
+#endif
+};
+
+static struct pmbus_driver_info mp2973_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_POUT |
+ PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
+ .read_word_data = mp2973_read_word_data,
+#if IS_ENABLED(CONFIG_SENSORS_MP2975_REGULATOR)
+ .num_regulators = 1,
+ .reg_desc = mp2975_reg_desc,
+#endif
};
static int mp2975_probe(struct i2c_client *client)
@@ -691,11 +908,23 @@ static int mp2975_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- memcpy(&data->info, &mp2975_info, sizeof(*info));
+ if (client->dev.of_node)
+ data->chip_id = (enum chips)(unsigned long)of_device_get_match_data(&client->dev);
+ else
+ data->chip_id = i2c_match_id(mp2975_id, client)->driver_data;
+
+ memcpy(data->max_phases, mp2975_max_phases[data->chip_id],
+ sizeof(data->max_phases));
+
+ if (data->chip_id == mp2975)
+ memcpy(&data->info, &mp2975_info, sizeof(*info));
+ else
+ memcpy(&data->info, &mp2973_info, sizeof(*info));
+
info = &data->info;
/* Identify multiphase configuration for rail 2. */
- ret = mp2975_identify_multiphase_rail2(client);
+ ret = mp2975_identify_multiphase_rail2(client, data);
if (ret < 0)
return ret;
@@ -704,6 +933,8 @@ static int mp2975_probe(struct i2c_client *client)
data->info.pages = MP2975_PAGE_NUM;
data->info.phases[1] = ret;
data->info.func[1] = MP2975_RAIL2_FUNC;
+ if (IS_ENABLED(CONFIG_SENSORS_MP2975_REGULATOR))
+ data->info.num_regulators = MP2975_PAGE_NUM;
}
/* Identify multiphase configuration. */
@@ -711,25 +942,32 @@ static int mp2975_probe(struct i2c_client *client)
if (ret)
return ret;
- /* Identify VID setting per rail. */
- ret = mp2975_identify_rails_vid(client, data, info);
- if (ret < 0)
- return ret;
+ if (data->chip_id == mp2975) {
+ /* Identify VID setting per rail. */
+ ret = mp2975_identify_rails_vid(client, data, info);
+ if (ret < 0)
+ return ret;
- /* Obtain current sense gain of power stage. */
- ret = mp2975_current_sense_gain_get(client, data);
- if (ret)
- return ret;
+ /* Obtain current sense gain of power stage. */
+ ret = mp2975_current_sense_gain_get(client, data);
+ if (ret)
+ return ret;
- /* Obtain voltage reference values. */
- ret = mp2975_vref_get(client, data, info);
- if (ret)
- return ret;
+ /* Obtain voltage reference values. */
+ ret = mp2975_vref_get(client, data, info);
+ if (ret)
+ return ret;
- /* Obtain vout over-voltage scales. */
- ret = mp2975_vout_ov_scale_get(client, data, info);
- if (ret < 0)
- return ret;
+ /* Obtain vout over-voltage scales. */
+ ret = mp2975_vout_ov_scale_get(client, data, info);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Identify VID setting per rail. */
+ ret = mp2973_identify_rails_vid(client, data, info);
+ if (ret < 0)
+ return ret;
+ }
/* Obtain offsets, maximum and format for vout. */
ret = mp2975_vout_per_rail_config_get(client, data, info);
@@ -739,15 +977,10 @@ static int mp2975_probe(struct i2c_client *client)
return pmbus_do_probe(client, info);
}
-static const struct i2c_device_id mp2975_id[] = {
- {"mp2975", 0},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, mp2975_id);
-
static const struct of_device_id __maybe_unused mp2975_of_match[] = {
- {.compatible = "mps,mp2975"},
+ {.compatible = "mps,mp2971", .data = (void *)mp2971},
+ {.compatible = "mps,mp2973", .data = (void *)mp2973},
+ {.compatible = "mps,mp2975", .data = (void *)mp2975},
{}
};
MODULE_DEVICE_TABLE(of, mp2975_of_match);
diff --git a/drivers/hwmon/pmbus/mp5023.c b/drivers/hwmon/pmbus/mp5023.c
index c4c4324d2b74..21acb7fd9a1a 100644
--- a/drivers/hwmon/pmbus/mp5023.c
+++ b/drivers/hwmon/pmbus/mp5023.c
@@ -5,7 +5,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include "pmbus.h"
static struct pmbus_driver_info mp5023_info = {
diff --git a/drivers/hwmon/pmbus/mpq7932.c b/drivers/hwmon/pmbus/mpq7932.c
index 865d42edda1a..6c62f01da7c6 100644
--- a/drivers/hwmon/pmbus/mpq7932.c
+++ b/drivers/hwmon/pmbus/mpq7932.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pmbus.h>
#include "pmbus.h"
diff --git a/drivers/hwmon/pmbus/pli1209bc.c b/drivers/hwmon/pmbus/pli1209bc.c
index 7d8bd3167b21..c95433790b11 100644
--- a/drivers/hwmon/pmbus/pli1209bc.c
+++ b/drivers/hwmon/pmbus/pli1209bc.c
@@ -5,6 +5,7 @@
* Copyright (c) 2022 9elements GmbH
*/
+#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pmbus.h>
@@ -53,6 +54,30 @@ static int pli1209bc_read_word_data(struct i2c_client *client, int page,
}
}
+static int pli1209bc_write_byte(struct i2c_client *client, int page, u8 reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_CLEAR_FAULTS:
+ ret = pmbus_write_byte(client, page, reg);
+ /*
+ * PLI1209 takes 230 usec to execute the CLEAR_FAULTS command.
+ * During that time it's busy and NACKs all requests on the
+ * SMBUS interface. It also NACKs reads on PMBUS_STATUS_BYTE
+ * making it impossible to poll the BUSY flag.
+ *
+ * Just wait for not BUSY unconditionally.
+ */
+ usleep_range(250, 300);
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
#if IS_ENABLED(CONFIG_SENSORS_PLI1209BC_REGULATOR)
static const struct regulator_desc pli1209bc_reg_desc = {
.name = "vout2",
@@ -102,6 +127,7 @@ static struct pmbus_driver_info pli1209bc_info = {
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
| PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT,
.read_word_data = pli1209bc_read_word_data,
+ .write_byte = pli1209bc_write_byte,
#if IS_ENABLED(CONFIG_SENSORS_PLI1209BC_REGULATOR)
.num_regulators = 1,
.reg_desc = &pli1209bc_reg_desc,
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 69a4e62b6c8d..1363d9f89181 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -561,7 +561,8 @@ static bool pmbus_check_register(struct i2c_client *client,
rv = pmbus_check_status_cml(client);
if (rv < 0 && (data->flags & PMBUS_READ_STATUS_AFTER_FAILED_CHECK))
data->read_status(client, -1);
- pmbus_clear_fault_page(client, -1);
+ if (reg < PMBUS_VIRT_BASE)
+ pmbus_clear_fault_page(client, -1);
return rv >= 0;
}
@@ -2540,7 +2541,6 @@ static int pmbus_identify_common(struct i2c_client *client,
}
}
- pmbus_clear_fault_page(client, page);
return 0;
}
diff --git a/drivers/hwmon/pmbus/q54sj108a2.c b/drivers/hwmon/pmbus/q54sj108a2.c
index b830f3b02bcc..a235c1cdf4fe 100644
--- a/drivers/hwmon/pmbus/q54sj108a2.c
+++ b/drivers/hwmon/pmbus/q54sj108a2.c
@@ -10,7 +10,7 @@
#include <linux/i2c.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include "pmbus.h"
#define STORE_DEFAULT_ALL 0x11
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index ef99005a3af5..5c9466244d70 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include "pmbus.h"
enum chips {
@@ -235,7 +235,7 @@ static int tps53679_probe(struct i2c_client *client)
enum chips chip_id;
if (dev->of_node)
- chip_id = (enum chips)of_device_get_match_data(dev);
+ chip_id = (uintptr_t)of_device_get_match_data(dev);
else
chip_id = i2c_match_id(tps53679_id, client)->driver_data;
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index c404d306e8f7..8d9d422450e5 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -10,7 +10,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -588,7 +588,7 @@ static int ucd9000_probe(struct i2c_client *client)
}
if (client->dev.of_node)
- chip = (enum chips)of_device_get_match_data(&client->dev);
+ chip = (uintptr_t)of_device_get_match_data(&client->dev);
else
chip = mid->driver_data;
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index a82847945508..7920d1c06df0 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -103,7 +103,7 @@ static int ucd9200_probe(struct i2c_client *client)
}
if (client->dev.of_node)
- chip = (enum chips)of_device_get_match_data(&client->dev);
+ chip = (uintptr_t)of_device_get_match_data(&client->dev);
else
chip = mid->driver_data;
diff --git a/drivers/hwmon/sbtsi_temp.c b/drivers/hwmon/sbtsi_temp.c
index b79cece4ac9a..dd85cf89f008 100644
--- a/drivers/hwmon/sbtsi_temp.c
+++ b/drivers/hwmon/sbtsi_temp.c
@@ -13,7 +13,6 @@
#include <linux/hwmon.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
#include <linux/of.h>
/*
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 121e5e9f487f..a0d220609565 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -8,7 +8,7 @@
#include <linux/hwmon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scpi_protocol.h>
#include <linux/slab.h>
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index bf18630619e0..79657910b79e 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -147,8 +147,20 @@ static const u16 mode_to_update_interval[] = {
100,
};
+static const struct hwmon_channel_info * const sht3x_channel_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MIN |
+ HWMON_T_MIN_HYST | HWMON_T_MAX |
+ HWMON_T_MAX_HYST | HWMON_T_ALARM),
+ HWMON_CHANNEL_INFO(humidity, HWMON_H_INPUT | HWMON_H_MIN |
+ HWMON_H_MIN_HYST | HWMON_H_MAX |
+ HWMON_H_MAX_HYST | HWMON_H_ALARM),
+ NULL,
+};
+
struct sht3x_data {
struct i2c_client *client;
+ enum sht3x_chips chip_id;
struct mutex i2c_lock; /* lock for sending i2c commands */
struct mutex data_lock; /* lock for updating driver data */
@@ -276,27 +288,24 @@ out:
return data;
}
-/* sysfs attributes */
-static ssize_t temp1_input_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int temp1_input_read(struct device *dev)
{
struct sht3x_data *data = sht3x_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
- return sprintf(buf, "%d\n", data->temperature);
+ return data->temperature;
}
-static ssize_t humidity1_input_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int humidity1_input_read(struct device *dev)
{
struct sht3x_data *data = sht3x_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
- return sprintf(buf, "%u\n", data->humidity);
+ return data->humidity;
}
/*
@@ -332,33 +341,24 @@ static int limits_update(struct sht3x_data *data)
return ret;
}
-static ssize_t temp1_limit_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int temp1_limit_read(struct device *dev, int index)
{
struct sht3x_data *data = dev_get_drvdata(dev);
- u8 index = to_sensor_dev_attr(attr)->index;
- int temperature_limit = data->temperature_limits[index];
- return sysfs_emit(buf, "%d\n", temperature_limit);
+ return data->temperature_limits[index];
}
-static ssize_t humidity1_limit_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int humidity1_limit_read(struct device *dev, int index)
{
struct sht3x_data *data = dev_get_drvdata(dev);
- u8 index = to_sensor_dev_attr(attr)->index;
- u32 humidity_limit = data->humidity_limits[index];
- return sysfs_emit(buf, "%u\n", humidity_limit);
+ return data->humidity_limits[index];
}
/*
- * limit_store must only be called with data_lock held
+ * limit_write must only be called with data_lock held
*/
-static size_t limit_store(struct device *dev,
- size_t count,
+static size_t limit_write(struct device *dev,
u8 index,
int temperature,
u32 humidity)
@@ -379,7 +379,7 @@ static size_t limit_store(struct device *dev,
* ST = (T + 45) / 175 * 2^16
* SRH = RH / 100 * 2^16
* adapted for fixed point arithmetic and packed the same as
- * in limit_show()
+ * in limit_read()
*/
raw = ((u32)(temperature + 45000) * 24543) >> (16 + 7);
raw |= ((humidity * 42950) >> 16) & 0xfe00;
@@ -400,50 +400,35 @@ static size_t limit_store(struct device *dev,
data->temperature_limits[index] = temperature;
data->humidity_limits[index] = humidity;
- return count;
+
+ return 0;
}
-static ssize_t temp1_limit_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static int temp1_limit_write(struct device *dev, int index, int val)
{
int temperature;
int ret;
struct sht3x_data *data = dev_get_drvdata(dev);
- u8 index = to_sensor_dev_attr(attr)->index;
- ret = kstrtoint(buf, 0, &temperature);
- if (ret)
- return ret;
-
- temperature = clamp_val(temperature, SHT3X_MIN_TEMPERATURE,
+ temperature = clamp_val(val, SHT3X_MIN_TEMPERATURE,
SHT3X_MAX_TEMPERATURE);
mutex_lock(&data->data_lock);
- ret = limit_store(dev, count, index, temperature,
+ ret = limit_write(dev, index, temperature,
data->humidity_limits[index]);
mutex_unlock(&data->data_lock);
return ret;
}
-static ssize_t humidity1_limit_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static int humidity1_limit_write(struct device *dev, int index, int val)
{
u32 humidity;
int ret;
struct sht3x_data *data = dev_get_drvdata(dev);
- u8 index = to_sensor_dev_attr(attr)->index;
-
- ret = kstrtou32(buf, 0, &humidity);
- if (ret)
- return ret;
- humidity = clamp_val(humidity, SHT3X_MIN_HUMIDITY, SHT3X_MAX_HUMIDITY);
+ humidity = clamp_val(val, SHT3X_MIN_HUMIDITY, SHT3X_MAX_HUMIDITY);
mutex_lock(&data->data_lock);
- ret = limit_store(dev, count, index, data->temperature_limits[index],
+ ret = limit_write(dev, index, data->temperature_limits[index],
humidity);
mutex_unlock(&data->data_lock);
@@ -474,7 +459,6 @@ static void sht3x_select_command(struct sht3x_data *data)
}
static int status_register_read(struct device *dev,
- struct device_attribute *attr,
char *buffer, int length)
{
int ret;
@@ -487,34 +471,30 @@ static int status_register_read(struct device *dev,
return ret;
}
-static ssize_t temp1_alarm_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int temp1_alarm_read(struct device *dev)
{
char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
int ret;
- ret = status_register_read(dev, attr, buffer,
+ ret = status_register_read(dev, buffer,
SHT3X_WORD_LEN + SHT3X_CRC8_LEN);
if (ret)
return ret;
- return sysfs_emit(buf, "%d\n", !!(buffer[0] & 0x04));
+ return !!(buffer[0] & 0x04);
}
-static ssize_t humidity1_alarm_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int humidity1_alarm_read(struct device *dev)
{
char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
int ret;
- ret = status_register_read(dev, attr, buffer,
+ ret = status_register_read(dev, buffer,
SHT3X_WORD_LEN + SHT3X_CRC8_LEN);
if (ret)
return ret;
- return sysfs_emit(buf, "%d\n", !!(buffer[0] & 0x08));
+ return !!(buffer[0] & 0x08);
}
static ssize_t heater_enable_show(struct device *dev,
@@ -524,7 +504,7 @@ static ssize_t heater_enable_show(struct device *dev,
char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
int ret;
- ret = status_register_read(dev, attr, buffer,
+ ret = status_register_read(dev, buffer,
SHT3X_WORD_LEN + SHT3X_CRC8_LEN);
if (ret)
return ret;
@@ -560,39 +540,28 @@ static ssize_t heater_enable_store(struct device *dev,
return ret;
}
-static ssize_t update_interval_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int update_interval_read(struct device *dev)
{
struct sht3x_data *data = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%u\n",
- mode_to_update_interval[data->mode]);
+ return mode_to_update_interval[data->mode];
}
-static ssize_t update_interval_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static int update_interval_write(struct device *dev, int val)
{
- u16 update_interval;
u8 mode;
int ret;
const char *command;
struct sht3x_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- ret = kstrtou16(buf, 0, &update_interval);
- if (ret)
- return ret;
-
- mode = get_mode_from_update_interval(update_interval);
+ mode = get_mode_from_update_interval(val);
mutex_lock(&data->data_lock);
/* mode did not change */
if (mode == data->mode) {
mutex_unlock(&data->data_lock);
- return count;
+ return 0;
}
mutex_lock(&data->i2c_lock);
@@ -634,7 +603,7 @@ out:
if (ret != SHT3X_CMD_LENGTH)
return ret < 0 ? ret : -EIO;
- return count;
+ return 0;
}
static ssize_t repeatability_show(struct device *dev,
@@ -668,60 +637,219 @@ static ssize_t repeatability_store(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp1_input, 0);
-static SENSOR_DEVICE_ATTR_RO(humidity1_input, humidity1_input, 0);
-static SENSOR_DEVICE_ATTR_RW(temp1_max, temp1_limit, limit_max);
-static SENSOR_DEVICE_ATTR_RW(humidity1_max, humidity1_limit, limit_max);
-static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp1_limit, limit_max_hyst);
-static SENSOR_DEVICE_ATTR_RW(humidity1_max_hyst, humidity1_limit,
- limit_max_hyst);
-static SENSOR_DEVICE_ATTR_RW(temp1_min, temp1_limit, limit_min);
-static SENSOR_DEVICE_ATTR_RW(humidity1_min, humidity1_limit, limit_min);
-static SENSOR_DEVICE_ATTR_RW(temp1_min_hyst, temp1_limit, limit_min_hyst);
-static SENSOR_DEVICE_ATTR_RW(humidity1_min_hyst, humidity1_limit,
- limit_min_hyst);
-static SENSOR_DEVICE_ATTR_RO(temp1_alarm, temp1_alarm, 0);
-static SENSOR_DEVICE_ATTR_RO(humidity1_alarm, humidity1_alarm, 0);
static SENSOR_DEVICE_ATTR_RW(heater_enable, heater_enable, 0);
-static SENSOR_DEVICE_ATTR_RW(update_interval, update_interval, 0);
static SENSOR_DEVICE_ATTR_RW(repeatability, repeatability, 0);
static struct attribute *sht3x_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_humidity1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_humidity1_max.dev_attr.attr,
- &sensor_dev_attr_humidity1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_min_hyst.dev_attr.attr,
- &sensor_dev_attr_humidity1_min.dev_attr.attr,
- &sensor_dev_attr_humidity1_min_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_alarm.dev_attr.attr,
- &sensor_dev_attr_humidity1_alarm.dev_attr.attr,
&sensor_dev_attr_heater_enable.dev_attr.attr,
- &sensor_dev_attr_update_interval.dev_attr.attr,
&sensor_dev_attr_repeatability.dev_attr.attr,
NULL
};
-static struct attribute *sts3x_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_min_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_alarm.dev_attr.attr,
- &sensor_dev_attr_heater_enable.dev_attr.attr,
- &sensor_dev_attr_update_interval.dev_attr.attr,
- &sensor_dev_attr_repeatability.dev_attr.attr,
- NULL
+ATTRIBUTE_GROUPS(sht3x);
+
+static umode_t sht3x_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct sht3x_data *chip_data = data;
+
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_alarm:
+ return 0444;
+ case hwmon_temp_max:
+ case hwmon_temp_max_hyst:
+ case hwmon_temp_min:
+ case hwmon_temp_min_hyst:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ case hwmon_humidity:
+ if (chip_data->chip_id == sts3x)
+ break;
+ switch (attr) {
+ case hwmon_humidity_input:
+ case hwmon_humidity_alarm:
+ return 0444;
+ case hwmon_humidity_max:
+ case hwmon_humidity_max_hyst:
+ case hwmon_humidity_min:
+ case hwmon_humidity_min_hyst:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ enum sht3x_limits index;
+
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ *val = update_interval_read(dev);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = temp1_input_read(dev);
+ break;
+ case hwmon_temp_alarm:
+ *val = temp1_alarm_read(dev);
+ break;
+ case hwmon_temp_max:
+ index = limit_max;
+ *val = temp1_limit_read(dev, index);
+ break;
+ case hwmon_temp_max_hyst:
+ index = limit_max_hyst;
+ *val = temp1_limit_read(dev, index);
+ break;
+ case hwmon_temp_min:
+ index = limit_min;
+ *val = temp1_limit_read(dev, index);
+ break;
+ case hwmon_temp_min_hyst:
+ index = limit_min_hyst;
+ *val = temp1_limit_read(dev, index);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_humidity:
+ switch (attr) {
+ case hwmon_humidity_input:
+ *val = humidity1_input_read(dev);
+ break;
+ case hwmon_humidity_alarm:
+ *val = humidity1_alarm_read(dev);
+ break;
+ case hwmon_humidity_max:
+ index = limit_max;
+ *val = humidity1_limit_read(dev, index);
+ break;
+ case hwmon_humidity_max_hyst:
+ index = limit_max_hyst;
+ *val = humidity1_limit_read(dev, index);
+ break;
+ case hwmon_humidity_min:
+ index = limit_min;
+ *val = humidity1_limit_read(dev, index);
+ break;
+ case hwmon_humidity_min_hyst:
+ index = limit_min_hyst;
+ *val = humidity1_limit_read(dev, index);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int sht3x_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ enum sht3x_limits index;
+
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return update_interval_write(dev, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_max:
+ index = limit_max;
+ break;
+ case hwmon_temp_max_hyst:
+ index = limit_max_hyst;
+ break;
+ case hwmon_temp_min:
+ index = limit_min;
+ break;
+ case hwmon_temp_min_hyst:
+ index = limit_min_hyst;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return temp1_limit_write(dev, index, val);
+ case hwmon_humidity:
+ switch (attr) {
+ case hwmon_humidity_max:
+ index = limit_max;
+ break;
+ case hwmon_humidity_max_hyst:
+ index = limit_max_hyst;
+ break;
+ case hwmon_humidity_min:
+ index = limit_min;
+ break;
+ case hwmon_humidity_min_hyst:
+ index = limit_min_hyst;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return humidity1_limit_write(dev, index, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops sht3x_ops = {
+ .is_visible = sht3x_is_visible,
+ .read = sht3x_read,
+ .write = sht3x_write,
};
-ATTRIBUTE_GROUPS(sht3x);
-ATTRIBUTE_GROUPS(sts3x);
+static const struct hwmon_chip_info sht3x_chip_info = {
+ .ops = &sht3x_ops,
+ .info = sht3x_channel_info,
+};
+
+/* device ID table */
+static const struct i2c_device_id sht3x_ids[] = {
+ {"sht3x", sht3x},
+ {"sts3x", sts3x},
+ {}
+};
-static const struct i2c_device_id sht3x_ids[];
+MODULE_DEVICE_TABLE(i2c, sht3x_ids);
static int sht3x_probe(struct i2c_client *client)
{
@@ -730,7 +858,6 @@ static int sht3x_probe(struct i2c_client *client)
struct device *hwmon_dev;
struct i2c_adapter *adap = client->adapter;
struct device *dev = &client->dev;
- const struct attribute_group **attribute_groups;
/*
* we require full i2c support since the sht3x uses multi-byte read and
@@ -753,6 +880,7 @@ static int sht3x_probe(struct i2c_client *client)
data->mode = 0;
data->last_update = jiffies - msecs_to_jiffies(3000);
data->client = client;
+ data->chip_id = i2c_match_id(sht3x_ids, client)->driver_data;
crc8_populate_msb(sht3x_crc8_table, SHT3X_CRC8_POLYNOMIAL);
sht3x_select_command(data);
@@ -771,15 +899,11 @@ static int sht3x_probe(struct i2c_client *client)
if (ret)
return ret;
- if (i2c_match_id(sht3x_ids, client)->driver_data == sts3x)
- attribute_groups = sts3x_groups;
- else
- attribute_groups = sht3x_groups;
-
- hwmon_dev = devm_hwmon_device_register_with_groups(dev,
- client->name,
- data,
- attribute_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ client->name,
+ data,
+ &sht3x_chip_info,
+ sht3x_groups);
if (IS_ERR(hwmon_dev))
dev_dbg(dev, "unable to register hwmon device\n");
@@ -787,15 +911,6 @@ static int sht3x_probe(struct i2c_client *client)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-/* device ID table */
-static const struct i2c_device_id sht3x_ids[] = {
- {"sht3x", sht3x},
- {"sts3x", sts3x},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, sht3x_ids);
-
static struct i2c_driver sht3x_i2c_driver = {
.driver.name = "sht3x",
.probe = sht3x_probe,
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index b0b05fd12221..0a0479501e11 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -798,7 +798,7 @@ static int sis5595_pci_probe(struct pci_dev *dev,
{
u16 address;
u8 enable;
- int *i;
+ int *i, err;
for (i = blacklist; *i != 0; i++) {
struct pci_dev *d;
@@ -818,8 +818,8 @@ static int sis5595_pci_probe(struct pci_dev *dev,
pci_write_config_word(dev, SIS5595_BASE_REG, force_addr);
}
- if (PCIBIOS_SUCCESSFUL !=
- pci_read_config_word(dev, SIS5595_BASE_REG, &address)) {
+ err = pci_read_config_word(dev, SIS5595_BASE_REG, &address);
+ if (err != PCIBIOS_SUCCESSFUL) {
dev_err(&dev->dev, "Failed to read ISA address\n");
return -ENODEV;
}
@@ -836,22 +836,23 @@ static int sis5595_pci_probe(struct pci_dev *dev,
return -ENODEV;
}
- if (PCIBIOS_SUCCESSFUL !=
- pci_read_config_byte(dev, SIS5595_ENABLE_REG, &enable)) {
+ err = pci_read_config_byte(dev, SIS5595_ENABLE_REG, &enable);
+ if (err != PCIBIOS_SUCCESSFUL) {
dev_err(&dev->dev, "Failed to read enable register\n");
return -ENODEV;
}
if (!(enable & 0x80)) {
- if ((PCIBIOS_SUCCESSFUL !=
- pci_write_config_byte(dev, SIS5595_ENABLE_REG,
- enable | 0x80))
- || (PCIBIOS_SUCCESSFUL !=
- pci_read_config_byte(dev, SIS5595_ENABLE_REG, &enable))
- || (!(enable & 0x80))) {
- /* doesn't work for some chips! */
- dev_err(&dev->dev, "Failed to enable HWM device\n");
- return -ENODEV;
- }
+ err = pci_write_config_byte(dev, SIS5595_ENABLE_REG, enable | 0x80);
+ if (err != PCIBIOS_SUCCESSFUL)
+ goto enable_fail;
+
+ err = pci_read_config_byte(dev, SIS5595_ENABLE_REG, &enable);
+ if (err != PCIBIOS_SUCCESSFUL)
+ goto enable_fail;
+
+ /* doesn't work for some chips! */
+ if (!(enable & 0x80))
+ goto enable_fail;
}
if (platform_driver_register(&sis5595_driver)) {
@@ -871,6 +872,10 @@ static int sis5595_pci_probe(struct pci_dev *dev,
*/
return -ENODEV;
+enable_fail:
+ dev_err(&dev->dev, "Failed to enable HWM device\n");
+ goto exit;
+
exit_unregister:
pci_dev_put(dev);
platform_driver_unregister(&sis5595_driver);
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
deleted file mode 100644
index 026c76f8c22e..000000000000
--- a/drivers/hwmon/smm665.c
+++ /dev/null
@@ -1,706 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for SMM665 Power Controller / Monitor
- *
- * Copyright (C) 2010 Ericsson AB.
- *
- * This driver should also work for SMM465, SMM764, and SMM766, but is untested
- * for those chips. Only monitoring functionality is implemented.
- *
- * Datasheets:
- * http://www.summitmicro.com/prod_select/summary/SMM665/SMM665B_2089_20.pdf
- * http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-
-/* Internal reference voltage (VREF, x 1000 */
-#define SMM665_VREF_ADC_X1000 1250
-
-/* module parameters */
-static int vref = SMM665_VREF_ADC_X1000;
-module_param(vref, int, 0);
-MODULE_PARM_DESC(vref, "Reference voltage in mV");
-
-enum chips { smm465, smm665, smm665c, smm764, smm766 };
-
-/*
- * ADC channel addresses
- */
-#define SMM665_MISC16_ADC_DATA_A 0x00
-#define SMM665_MISC16_ADC_DATA_B 0x01
-#define SMM665_MISC16_ADC_DATA_C 0x02
-#define SMM665_MISC16_ADC_DATA_D 0x03
-#define SMM665_MISC16_ADC_DATA_E 0x04
-#define SMM665_MISC16_ADC_DATA_F 0x05
-#define SMM665_MISC16_ADC_DATA_VDD 0x06
-#define SMM665_MISC16_ADC_DATA_12V 0x07
-#define SMM665_MISC16_ADC_DATA_INT_TEMP 0x08
-#define SMM665_MISC16_ADC_DATA_AIN1 0x09
-#define SMM665_MISC16_ADC_DATA_AIN2 0x0a
-
-/*
- * Command registers
- */
-#define SMM665_MISC8_CMD_STS 0x80
-#define SMM665_MISC8_STATUS1 0x81
-#define SMM665_MISC8_STATUSS2 0x82
-#define SMM665_MISC8_IO_POLARITY 0x83
-#define SMM665_MISC8_PUP_POLARITY 0x84
-#define SMM665_MISC8_ADOC_STATUS1 0x85
-#define SMM665_MISC8_ADOC_STATUS2 0x86
-#define SMM665_MISC8_WRITE_PROT 0x87
-#define SMM665_MISC8_STS_TRACK 0x88
-
-/*
- * Configuration registers and register groups
- */
-#define SMM665_ADOC_ENABLE 0x0d
-#define SMM665_LIMIT_BASE 0x80 /* First limit register */
-
-/*
- * Limit register bit masks
- */
-#define SMM665_TRIGGER_RST 0x8000
-#define SMM665_TRIGGER_HEALTHY 0x4000
-#define SMM665_TRIGGER_POWEROFF 0x2000
-#define SMM665_TRIGGER_SHUTDOWN 0x1000
-#define SMM665_ADC_MASK 0x03ff
-
-#define smm665_is_critical(lim) ((lim) & (SMM665_TRIGGER_RST \
- | SMM665_TRIGGER_POWEROFF \
- | SMM665_TRIGGER_SHUTDOWN))
-/*
- * Fault register bit definitions
- * Values are merged from status registers 1/2,
- * with status register 1 providing the upper 8 bits.
- */
-#define SMM665_FAULT_A 0x0001
-#define SMM665_FAULT_B 0x0002
-#define SMM665_FAULT_C 0x0004
-#define SMM665_FAULT_D 0x0008
-#define SMM665_FAULT_E 0x0010
-#define SMM665_FAULT_F 0x0020
-#define SMM665_FAULT_VDD 0x0040
-#define SMM665_FAULT_12V 0x0080
-#define SMM665_FAULT_TEMP 0x0100
-#define SMM665_FAULT_AIN1 0x0200
-#define SMM665_FAULT_AIN2 0x0400
-
-/*
- * I2C Register addresses
- *
- * The configuration register needs to be the configured base register.
- * The command/status register address is derived from it.
- */
-#define SMM665_REGMASK 0x78
-#define SMM665_CMDREG_BASE 0x48
-#define SMM665_CONFREG_BASE 0x50
-
-/*
- * Equations given by chip manufacturer to calculate voltage/temperature values
- * vref = Reference voltage on VREF_ADC pin (module parameter)
- * adc = 10bit ADC value read back from registers
- */
-
-/* Voltage A-F and VDD */
-#define SMM665_VMON_ADC_TO_VOLTS(adc) ((adc) * vref / 256)
-
-/* Voltage 12VIN */
-#define SMM665_12VIN_ADC_TO_VOLTS(adc) ((adc) * vref * 3 / 256)
-
-/* Voltage AIN1, AIN2 */
-#define SMM665_AIN_ADC_TO_VOLTS(adc) ((adc) * vref / 512)
-
-/* Temp Sensor */
-#define SMM665_TEMP_ADC_TO_CELSIUS(adc) (((adc) <= 511) ? \
- ((int)(adc) * 1000 / 4) : \
- (((int)(adc) - 0x400) * 1000 / 4))
-
-#define SMM665_NUM_ADC 11
-
-/*
- * Chip dependent ADC conversion time, in uS
- */
-#define SMM665_ADC_WAIT_SMM665 70
-#define SMM665_ADC_WAIT_SMM766 185
-
-struct smm665_data {
- enum chips type;
- int conversion_time; /* ADC conversion time */
- struct i2c_client *client;
- struct mutex update_lock;
- bool valid;
- unsigned long last_updated; /* in jiffies */
- u16 adc[SMM665_NUM_ADC]; /* adc values (raw) */
- u16 faults; /* fault status */
- /* The following values are in mV */
- int critical_min_limit[SMM665_NUM_ADC];
- int alarm_min_limit[SMM665_NUM_ADC];
- int critical_max_limit[SMM665_NUM_ADC];
- int alarm_max_limit[SMM665_NUM_ADC];
- struct i2c_client *cmdreg;
-};
-
-/*
- * smm665_read16()
- *
- * Read 16 bit value from <reg>, <reg+1>. Upper 8 bits are in <reg>.
- */
-static int smm665_read16(struct i2c_client *client, int reg)
-{
- int rv, val;
-
- rv = i2c_smbus_read_byte_data(client, reg);
- if (rv < 0)
- return rv;
- val = rv << 8;
- rv = i2c_smbus_read_byte_data(client, reg + 1);
- if (rv < 0)
- return rv;
- val |= rv;
- return val;
-}
-
-/*
- * Read adc value.
- */
-static int smm665_read_adc(struct smm665_data *data, int adc)
-{
- struct i2c_client *client = data->cmdreg;
- int rv;
- int radc;
-
- /*
- * Algorithm for reading ADC, per SMM665 datasheet
- *
- * {[S][addr][W][Ack]} {[offset][Ack]} {[S][addr][R][Nack]}
- * [wait conversion time]
- * {[S][addr][R][Ack]} {[datahi][Ack]} {[datalo][Ack][P]}
- *
- * To implement the first part of this exchange,
- * do a full read transaction and expect a failure/Nack.
- * This sets up the address pointer on the SMM665
- * and starts the ADC conversion.
- * Then do a two-byte read transaction.
- */
- rv = i2c_smbus_read_byte_data(client, adc << 3);
- if (rv != -ENXIO) {
- /*
- * We expect ENXIO to reflect NACK
- * (per Documentation/i2c/fault-codes.rst).
- * Everything else is an error.
- */
- dev_dbg(&client->dev,
- "Unexpected return code %d when setting ADC index", rv);
- return (rv < 0) ? rv : -EIO;
- }
-
- udelay(data->conversion_time);
-
- /*
- * Now read two bytes.
- *
- * Neither i2c_smbus_read_byte() nor
- * i2c_smbus_read_block_data() worked here,
- * so use i2c_smbus_read_word_swapped() instead.
- * We could also try to use i2c_master_recv(),
- * but that is not always supported.
- */
- rv = i2c_smbus_read_word_swapped(client, 0);
- if (rv < 0) {
- dev_dbg(&client->dev, "Failed to read ADC value: error %d", rv);
- return rv;
- }
- /*
- * Validate/verify readback adc channel (in bit 11..14).
- */
- radc = (rv >> 11) & 0x0f;
- if (radc != adc) {
- dev_dbg(&client->dev, "Unexpected RADC: Expected %d got %d",
- adc, radc);
- return -EIO;
- }
-
- return rv & SMM665_ADC_MASK;
-}
-
-static struct smm665_data *smm665_update_device(struct device *dev)
-{
- struct smm665_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- struct smm665_data *ret = data;
-
- mutex_lock(&data->update_lock);
-
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
- int i, val;
-
- /*
- * read status registers
- */
- val = smm665_read16(client, SMM665_MISC8_STATUS1);
- if (unlikely(val < 0)) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->faults = val;
-
- /* Read adc registers */
- for (i = 0; i < SMM665_NUM_ADC; i++) {
- val = smm665_read_adc(data, i);
- if (unlikely(val < 0)) {
- ret = ERR_PTR(val);
- goto abort;
- }
- data->adc[i] = val;
- }
- data->last_updated = jiffies;
- data->valid = true;
- }
-abort:
- mutex_unlock(&data->update_lock);
- return ret;
-}
-
-/* Return converted value from given adc */
-static int smm665_convert(u16 adcval, int index)
-{
- int val = 0;
-
- switch (index) {
- case SMM665_MISC16_ADC_DATA_12V:
- val = SMM665_12VIN_ADC_TO_VOLTS(adcval & SMM665_ADC_MASK);
- break;
-
- case SMM665_MISC16_ADC_DATA_VDD:
- case SMM665_MISC16_ADC_DATA_A:
- case SMM665_MISC16_ADC_DATA_B:
- case SMM665_MISC16_ADC_DATA_C:
- case SMM665_MISC16_ADC_DATA_D:
- case SMM665_MISC16_ADC_DATA_E:
- case SMM665_MISC16_ADC_DATA_F:
- val = SMM665_VMON_ADC_TO_VOLTS(adcval & SMM665_ADC_MASK);
- break;
-
- case SMM665_MISC16_ADC_DATA_AIN1:
- case SMM665_MISC16_ADC_DATA_AIN2:
- val = SMM665_AIN_ADC_TO_VOLTS(adcval & SMM665_ADC_MASK);
- break;
-
- case SMM665_MISC16_ADC_DATA_INT_TEMP:
- val = SMM665_TEMP_ADC_TO_CELSIUS(adcval & SMM665_ADC_MASK);
- break;
-
- default:
- /* If we get here, the developer messed up */
- WARN_ON_ONCE(1);
- break;
- }
-
- return val;
-}
-
-static int smm665_get_min(struct device *dev, int index)
-{
- struct smm665_data *data = dev_get_drvdata(dev);
-
- return data->alarm_min_limit[index];
-}
-
-static int smm665_get_max(struct device *dev, int index)
-{
- struct smm665_data *data = dev_get_drvdata(dev);
-
- return data->alarm_max_limit[index];
-}
-
-static int smm665_get_lcrit(struct device *dev, int index)
-{
- struct smm665_data *data = dev_get_drvdata(dev);
-
- return data->critical_min_limit[index];
-}
-
-static int smm665_get_crit(struct device *dev, int index)
-{
- struct smm665_data *data = dev_get_drvdata(dev);
-
- return data->critical_max_limit[index];
-}
-
-static ssize_t smm665_show_crit_alarm(struct device *dev,
- struct device_attribute *da, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct smm665_data *data = smm665_update_device(dev);
- int val = 0;
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- if (data->faults & (1 << attr->index))
- val = 1;
-
- return sysfs_emit(buf, "%d\n", val);
-}
-
-static ssize_t smm665_show_input(struct device *dev,
- struct device_attribute *da, char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct smm665_data *data = smm665_update_device(dev);
- int adc = attr->index;
- int val;
-
- if (IS_ERR(data))
- return PTR_ERR(data);
-
- val = smm665_convert(data->adc[adc], adc);
- return sysfs_emit(buf, "%d\n", val);
-}
-
-#define SMM665_SHOW(what) \
-static ssize_t smm665_show_##what(struct device *dev, \
- struct device_attribute *da, char *buf) \
-{ \
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da); \
- const int val = smm665_get_##what(dev, attr->index); \
- return snprintf(buf, PAGE_SIZE, "%d\n", val); \
-}
-
-SMM665_SHOW(min);
-SMM665_SHOW(max);
-SMM665_SHOW(lcrit);
-SMM665_SHOW(crit);
-
-/*
- * These macros are used below in constructing device attribute objects
- * for use with sysfs_create_group() to make a sysfs device file
- * for each register.
- */
-
-#define SMM665_ATTR(name, type, cmd_idx) \
- static SENSOR_DEVICE_ATTR(name##_##type, S_IRUGO, \
- smm665_show_##type, NULL, cmd_idx)
-
-/* Construct a sensor_device_attribute structure for each register */
-
-/* Input voltages */
-SMM665_ATTR(in1, input, SMM665_MISC16_ADC_DATA_12V);
-SMM665_ATTR(in2, input, SMM665_MISC16_ADC_DATA_VDD);
-SMM665_ATTR(in3, input, SMM665_MISC16_ADC_DATA_A);
-SMM665_ATTR(in4, input, SMM665_MISC16_ADC_DATA_B);
-SMM665_ATTR(in5, input, SMM665_MISC16_ADC_DATA_C);
-SMM665_ATTR(in6, input, SMM665_MISC16_ADC_DATA_D);
-SMM665_ATTR(in7, input, SMM665_MISC16_ADC_DATA_E);
-SMM665_ATTR(in8, input, SMM665_MISC16_ADC_DATA_F);
-SMM665_ATTR(in9, input, SMM665_MISC16_ADC_DATA_AIN1);
-SMM665_ATTR(in10, input, SMM665_MISC16_ADC_DATA_AIN2);
-
-/* Input voltages min */
-SMM665_ATTR(in1, min, SMM665_MISC16_ADC_DATA_12V);
-SMM665_ATTR(in2, min, SMM665_MISC16_ADC_DATA_VDD);
-SMM665_ATTR(in3, min, SMM665_MISC16_ADC_DATA_A);
-SMM665_ATTR(in4, min, SMM665_MISC16_ADC_DATA_B);
-SMM665_ATTR(in5, min, SMM665_MISC16_ADC_DATA_C);
-SMM665_ATTR(in6, min, SMM665_MISC16_ADC_DATA_D);
-SMM665_ATTR(in7, min, SMM665_MISC16_ADC_DATA_E);
-SMM665_ATTR(in8, min, SMM665_MISC16_ADC_DATA_F);
-SMM665_ATTR(in9, min, SMM665_MISC16_ADC_DATA_AIN1);
-SMM665_ATTR(in10, min, SMM665_MISC16_ADC_DATA_AIN2);
-
-/* Input voltages max */
-SMM665_ATTR(in1, max, SMM665_MISC16_ADC_DATA_12V);
-SMM665_ATTR(in2, max, SMM665_MISC16_ADC_DATA_VDD);
-SMM665_ATTR(in3, max, SMM665_MISC16_ADC_DATA_A);
-SMM665_ATTR(in4, max, SMM665_MISC16_ADC_DATA_B);
-SMM665_ATTR(in5, max, SMM665_MISC16_ADC_DATA_C);
-SMM665_ATTR(in6, max, SMM665_MISC16_ADC_DATA_D);
-SMM665_ATTR(in7, max, SMM665_MISC16_ADC_DATA_E);
-SMM665_ATTR(in8, max, SMM665_MISC16_ADC_DATA_F);
-SMM665_ATTR(in9, max, SMM665_MISC16_ADC_DATA_AIN1);
-SMM665_ATTR(in10, max, SMM665_MISC16_ADC_DATA_AIN2);
-
-/* Input voltages lcrit */
-SMM665_ATTR(in1, lcrit, SMM665_MISC16_ADC_DATA_12V);
-SMM665_ATTR(in2, lcrit, SMM665_MISC16_ADC_DATA_VDD);
-SMM665_ATTR(in3, lcrit, SMM665_MISC16_ADC_DATA_A);
-SMM665_ATTR(in4, lcrit, SMM665_MISC16_ADC_DATA_B);
-SMM665_ATTR(in5, lcrit, SMM665_MISC16_ADC_DATA_C);
-SMM665_ATTR(in6, lcrit, SMM665_MISC16_ADC_DATA_D);
-SMM665_ATTR(in7, lcrit, SMM665_MISC16_ADC_DATA_E);
-SMM665_ATTR(in8, lcrit, SMM665_MISC16_ADC_DATA_F);
-SMM665_ATTR(in9, lcrit, SMM665_MISC16_ADC_DATA_AIN1);
-SMM665_ATTR(in10, lcrit, SMM665_MISC16_ADC_DATA_AIN2);
-
-/* Input voltages crit */
-SMM665_ATTR(in1, crit, SMM665_MISC16_ADC_DATA_12V);
-SMM665_ATTR(in2, crit, SMM665_MISC16_ADC_DATA_VDD);
-SMM665_ATTR(in3, crit, SMM665_MISC16_ADC_DATA_A);
-SMM665_ATTR(in4, crit, SMM665_MISC16_ADC_DATA_B);
-SMM665_ATTR(in5, crit, SMM665_MISC16_ADC_DATA_C);
-SMM665_ATTR(in6, crit, SMM665_MISC16_ADC_DATA_D);
-SMM665_ATTR(in7, crit, SMM665_MISC16_ADC_DATA_E);
-SMM665_ATTR(in8, crit, SMM665_MISC16_ADC_DATA_F);
-SMM665_ATTR(in9, crit, SMM665_MISC16_ADC_DATA_AIN1);
-SMM665_ATTR(in10, crit, SMM665_MISC16_ADC_DATA_AIN2);
-
-/* critical alarms */
-SMM665_ATTR(in1, crit_alarm, SMM665_FAULT_12V);
-SMM665_ATTR(in2, crit_alarm, SMM665_FAULT_VDD);
-SMM665_ATTR(in3, crit_alarm, SMM665_FAULT_A);
-SMM665_ATTR(in4, crit_alarm, SMM665_FAULT_B);
-SMM665_ATTR(in5, crit_alarm, SMM665_FAULT_C);
-SMM665_ATTR(in6, crit_alarm, SMM665_FAULT_D);
-SMM665_ATTR(in7, crit_alarm, SMM665_FAULT_E);
-SMM665_ATTR(in8, crit_alarm, SMM665_FAULT_F);
-SMM665_ATTR(in9, crit_alarm, SMM665_FAULT_AIN1);
-SMM665_ATTR(in10, crit_alarm, SMM665_FAULT_AIN2);
-
-/* Temperature */
-SMM665_ATTR(temp1, input, SMM665_MISC16_ADC_DATA_INT_TEMP);
-SMM665_ATTR(temp1, min, SMM665_MISC16_ADC_DATA_INT_TEMP);
-SMM665_ATTR(temp1, max, SMM665_MISC16_ADC_DATA_INT_TEMP);
-SMM665_ATTR(temp1, lcrit, SMM665_MISC16_ADC_DATA_INT_TEMP);
-SMM665_ATTR(temp1, crit, SMM665_MISC16_ADC_DATA_INT_TEMP);
-SMM665_ATTR(temp1, crit_alarm, SMM665_FAULT_TEMP);
-
-/*
- * Finally, construct an array of pointers to members of the above objects,
- * as required for sysfs_create_group()
- */
-static struct attribute *smm665_attrs[] = {
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in1_min.dev_attr.attr,
- &sensor_dev_attr_in1_max.dev_attr.attr,
- &sensor_dev_attr_in1_lcrit.dev_attr.attr,
- &sensor_dev_attr_in1_crit.dev_attr.attr,
- &sensor_dev_attr_in1_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_in2_min.dev_attr.attr,
- &sensor_dev_attr_in2_max.dev_attr.attr,
- &sensor_dev_attr_in2_lcrit.dev_attr.attr,
- &sensor_dev_attr_in2_crit.dev_attr.attr,
- &sensor_dev_attr_in2_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_in3_input.dev_attr.attr,
- &sensor_dev_attr_in3_min.dev_attr.attr,
- &sensor_dev_attr_in3_max.dev_attr.attr,
- &sensor_dev_attr_in3_lcrit.dev_attr.attr,
- &sensor_dev_attr_in3_crit.dev_attr.attr,
- &sensor_dev_attr_in3_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_in4_input.dev_attr.attr,
- &sensor_dev_attr_in4_min.dev_attr.attr,
- &sensor_dev_attr_in4_max.dev_attr.attr,
- &sensor_dev_attr_in4_lcrit.dev_attr.attr,
- &sensor_dev_attr_in4_crit.dev_attr.attr,
- &sensor_dev_attr_in4_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_in5_input.dev_attr.attr,
- &sensor_dev_attr_in5_min.dev_attr.attr,
- &sensor_dev_attr_in5_max.dev_attr.attr,
- &sensor_dev_attr_in5_lcrit.dev_attr.attr,
- &sensor_dev_attr_in5_crit.dev_attr.attr,
- &sensor_dev_attr_in5_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_in6_input.dev_attr.attr,
- &sensor_dev_attr_in6_min.dev_attr.attr,
- &sensor_dev_attr_in6_max.dev_attr.attr,
- &sensor_dev_attr_in6_lcrit.dev_attr.attr,
- &sensor_dev_attr_in6_crit.dev_attr.attr,
- &sensor_dev_attr_in6_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_in7_input.dev_attr.attr,
- &sensor_dev_attr_in7_min.dev_attr.attr,
- &sensor_dev_attr_in7_max.dev_attr.attr,
- &sensor_dev_attr_in7_lcrit.dev_attr.attr,
- &sensor_dev_attr_in7_crit.dev_attr.attr,
- &sensor_dev_attr_in7_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_in8_input.dev_attr.attr,
- &sensor_dev_attr_in8_min.dev_attr.attr,
- &sensor_dev_attr_in8_max.dev_attr.attr,
- &sensor_dev_attr_in8_lcrit.dev_attr.attr,
- &sensor_dev_attr_in8_crit.dev_attr.attr,
- &sensor_dev_attr_in8_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_in9_input.dev_attr.attr,
- &sensor_dev_attr_in9_min.dev_attr.attr,
- &sensor_dev_attr_in9_max.dev_attr.attr,
- &sensor_dev_attr_in9_lcrit.dev_attr.attr,
- &sensor_dev_attr_in9_crit.dev_attr.attr,
- &sensor_dev_attr_in9_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_in10_input.dev_attr.attr,
- &sensor_dev_attr_in10_min.dev_attr.attr,
- &sensor_dev_attr_in10_max.dev_attr.attr,
- &sensor_dev_attr_in10_lcrit.dev_attr.attr,
- &sensor_dev_attr_in10_crit.dev_attr.attr,
- &sensor_dev_attr_in10_crit_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_lcrit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
-
- NULL,
-};
-
-ATTRIBUTE_GROUPS(smm665);
-
-static const struct i2c_device_id smm665_id[];
-
-static int smm665_probe(struct i2c_client *client)
-{
- struct i2c_adapter *adapter = client->adapter;
- struct smm665_data *data;
- struct device *hwmon_dev;
- int i, ret;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
- | I2C_FUNC_SMBUS_WORD_DATA))
- return -ENODEV;
-
- if (i2c_smbus_read_byte_data(client, SMM665_ADOC_ENABLE) < 0)
- return -ENODEV;
-
- data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
-
- data->client = client;
- data->type = i2c_match_id(smm665_id, client)->driver_data;
- data->cmdreg = i2c_new_dummy_device(adapter, (client->addr & ~SMM665_REGMASK)
- | SMM665_CMDREG_BASE);
- if (IS_ERR(data->cmdreg))
- return PTR_ERR(data->cmdreg);
-
- switch (data->type) {
- case smm465:
- case smm665:
- data->conversion_time = SMM665_ADC_WAIT_SMM665;
- break;
- case smm665c:
- case smm764:
- case smm766:
- data->conversion_time = SMM665_ADC_WAIT_SMM766;
- break;
- }
-
- ret = -ENODEV;
- if (i2c_smbus_read_byte_data(data->cmdreg, SMM665_MISC8_CMD_STS) < 0)
- goto out_unregister;
-
- /*
- * Read limits.
- *
- * Limit registers start with register SMM665_LIMIT_BASE.
- * Each channel uses 8 registers, providing four limit values
- * per channel. Each limit value requires two registers, with the
- * high byte in the first register and the low byte in the second
- * register. The first two limits are under limit values, followed
- * by two over limit values.
- *
- * Limit register order matches the ADC register order, so we use
- * ADC register defines throughout the code to index limit registers.
- *
- * We save the first retrieved value both as "critical" and "alarm"
- * value. The second value overwrites either the critical or the
- * alarm value, depending on its configuration. This ensures that both
- * critical and alarm values are initialized, even if both registers are
- * configured as critical or non-critical.
- */
- for (i = 0; i < SMM665_NUM_ADC; i++) {
- int val;
-
- val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8);
- if (unlikely(val < 0))
- goto out_unregister;
- data->critical_min_limit[i] = data->alarm_min_limit[i]
- = smm665_convert(val, i);
- val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8 + 2);
- if (unlikely(val < 0))
- goto out_unregister;
- if (smm665_is_critical(val))
- data->critical_min_limit[i] = smm665_convert(val, i);
- else
- data->alarm_min_limit[i] = smm665_convert(val, i);
- val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8 + 4);
- if (unlikely(val < 0))
- goto out_unregister;
- data->critical_max_limit[i] = data->alarm_max_limit[i]
- = smm665_convert(val, i);
- val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8 + 6);
- if (unlikely(val < 0))
- goto out_unregister;
- if (smm665_is_critical(val))
- data->critical_max_limit[i] = smm665_convert(val, i);
- else
- data->alarm_max_limit[i] = smm665_convert(val, i);
- }
-
- hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
- client->name, data,
- smm665_groups);
- if (IS_ERR(hwmon_dev)) {
- ret = PTR_ERR(hwmon_dev);
- goto out_unregister;
- }
-
- return 0;
-
-out_unregister:
- i2c_unregister_device(data->cmdreg);
- return ret;
-}
-
-static void smm665_remove(struct i2c_client *client)
-{
- struct smm665_data *data = i2c_get_clientdata(client);
-
- i2c_unregister_device(data->cmdreg);
-}
-
-static const struct i2c_device_id smm665_id[] = {
- {"smm465", smm465},
- {"smm665", smm665},
- {"smm665c", smm665c},
- {"smm764", smm764},
- {"smm766", smm766},
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, smm665_id);
-
-/* This is the driver that will be inserted */
-static struct i2c_driver smm665_driver = {
- .driver = {
- .name = "smm665",
- },
- .probe = smm665_probe,
- .remove = smm665_remove,
- .id_table = smm665_id,
-};
-
-module_i2c_driver(smm665_driver);
-
-MODULE_AUTHOR("Guenter Roeck");
-MODULE_DESCRIPTION("SMM665 driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 3cde3916ab6d..10b66c9ce045 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -20,7 +20,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/sysfs.h>
/* Addresses to scan */
diff --git a/drivers/hwmon/tmp464.c b/drivers/hwmon/tmp464.c
index 4b79c3f4d9fe..f58ca4c6acb6 100644
--- a/drivers/hwmon/tmp464.c
+++ b/drivers/hwmon/tmp464.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index bff10f4b56e1..9a180b1030c9 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -434,7 +434,7 @@ static umode_t tmp51x_is_visible(const void *_data,
switch (type) {
case hwmon_temp:
- if (data->id == tmp512 && channel == 4)
+ if (data->id == tmp512 && channel == 3)
return 0;
switch (attr) {
case hwmon_temp_input:
@@ -720,10 +720,7 @@ static int tmp51x_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- if (client->dev.of_node)
- data->id = (enum tmp51x_ids)device_get_match_data(&client->dev);
- else
- data->id = i2c_match_id(tmp51x_id, client)->driver_data;
+ data->id = (uintptr_t)i2c_get_match_data(client);
ret = tmp51x_configure(dev, data);
if (ret < 0) {
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index 8fbbb29ae11d..d33ecbac00d6 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -14,7 +14,7 @@
#include <linux/hwmon.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#define TEMPERATURE 0x2c
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index b23cff2e3d5d..3b580f229887 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -9,7 +9,8 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
diff --git a/drivers/hwmon/vexpress-hwmon.c b/drivers/hwmon/vexpress-hwmon.c
index 2ac5fb96bba4..d82a3b454d0e 100644
--- a/drivers/hwmon/vexpress-hwmon.c
+++ b/drivers/hwmon/vexpress-hwmon.c
@@ -13,7 +13,6 @@
#include <linux/hwmon-sysfs.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/vexpress.h>
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 37d7374896f6..407933d6e425 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -855,16 +855,17 @@ static int via686a_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
u16 address, val;
+ int ret;
if (force_addr) {
address = force_addr & ~(VIA686A_EXTENT - 1);
dev_warn(&dev->dev, "Forcing ISA address 0x%x\n", address);
- if (PCIBIOS_SUCCESSFUL !=
- pci_write_config_word(dev, VIA686A_BASE_REG, address | 1))
+ ret = pci_write_config_word(dev, VIA686A_BASE_REG, address | 1);
+ if (ret != PCIBIOS_SUCCESSFUL)
return -ENODEV;
}
- if (PCIBIOS_SUCCESSFUL !=
- pci_read_config_word(dev, VIA686A_BASE_REG, &val))
+ ret = pci_read_config_word(dev, VIA686A_BASE_REG, &val);
+ if (ret != PCIBIOS_SUCCESSFUL)
return -ENODEV;
address = val & ~(VIA686A_EXTENT - 1);
@@ -874,8 +875,8 @@ static int via686a_pci_probe(struct pci_dev *dev,
return -ENODEV;
}
- if (PCIBIOS_SUCCESSFUL !=
- pci_read_config_word(dev, VIA686A_ENABLE_REG, &val))
+ ret = pci_read_config_word(dev, VIA686A_ENABLE_REG, &val);
+ if (ret != PCIBIOS_SUCCESSFUL)
return -ENODEV;
if (!(val & 0x0001)) {
if (!force_addr) {
@@ -886,9 +887,8 @@ static int via686a_pci_probe(struct pci_dev *dev,
}
dev_warn(&dev->dev, "Enabling sensors\n");
- if (PCIBIOS_SUCCESSFUL !=
- pci_write_config_word(dev, VIA686A_ENABLE_REG,
- val | 0x0001))
+ ret = pci_write_config_word(dev, VIA686A_ENABLE_REG, val | 0x1);
+ if (ret != PCIBIOS_SUCCESSFUL)
return -ENODEV;
}
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index b7c6392ba673..16bc16d33cd1 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -971,13 +971,15 @@ static int vt8231_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
u16 address, val;
+ int ret;
+
if (force_addr) {
address = force_addr & 0xff00;
dev_warn(&dev->dev, "Forcing ISA address 0x%x\n",
address);
- if (PCIBIOS_SUCCESSFUL !=
- pci_write_config_word(dev, VT8231_BASE_REG, address | 1))
+ ret = pci_write_config_word(dev, VT8231_BASE_REG, address | 1);
+ if (ret != PCIBIOS_SUCCESSFUL)
return -ENODEV;
}
@@ -997,9 +999,8 @@ static int vt8231_pci_probe(struct pci_dev *dev,
if (!(val & 0x0001)) {
dev_warn(&dev->dev, "enabling sensors\n");
- if (PCIBIOS_SUCCESSFUL !=
- pci_write_config_word(dev, VT8231_ENABLE_REG,
- val | 0x0001))
+ ret = pci_write_config_word(dev, VT8231_ENABLE_REG, val | 0x1);
+ if (ret != PCIBIOS_SUCCESSFUL)
return -ENODEV;
}
diff --git a/drivers/hwmon/w83773g.c b/drivers/hwmon/w83773g.c
index 7f3615f5587c..045eea8378c2 100644
--- a/drivers/hwmon/w83773g.c
+++ b/drivers/hwmon/w83773g.c
@@ -12,7 +12,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
/* W83773 has 3 channels */
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 256c2d42e350..ea5a6a14c553 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -923,7 +923,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.enter = NULL }
};
-static struct cpuidle_state adl_n_cstates[] __initdata = {
+static struct cpuidle_state gmt_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
@@ -1349,8 +1349,8 @@ static const struct idle_cpu idle_cpu_adl_l __initconst = {
.state_table = adl_l_cstates,
};
-static const struct idle_cpu idle_cpu_adl_n __initconst = {
- .state_table = adl_n_cstates,
+static const struct idle_cpu idle_cpu_gmt __initconst = {
+ .state_table = gmt_cstates,
};
static const struct idle_cpu idle_cpu_spr __initconst = {
@@ -1423,7 +1423,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &idle_cpu_adl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &idle_cpu_adl_n),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &idle_cpu_gmt),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
@@ -1898,7 +1898,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
break;
case INTEL_FAM6_ALDERLAKE:
case INTEL_FAM6_ALDERLAKE_L:
- case INTEL_FAM6_ALDERLAKE_N:
+ case INTEL_FAM6_ATOM_GRACEMONT:
adl_idle_state_table_update();
break;
}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index a973905afd13..ed7d4b02f45a 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -64,9 +64,8 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
inode->i_blocks = 0;
- inode->i_atime = current_time(inode);
+ inode->i_atime = inode_set_ctime_current(inode);
inode->i_mtime = inode->i_atime;
- inode->i_ctime = inode->i_atime;
inode->i_private = data;
if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index 6341c0167c4a..9745a119d0e6 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -60,7 +60,6 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/smp.h>
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index a62b96237b82..24ca1d656adc 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -20,7 +20,6 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/smp.h>
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 091b0fe7e324..5559c943f03f 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
index 3989d16f997b..a275a8071a25 100644
--- a/drivers/irqchip/irq-gic-pm.c
+++ b/drivers/irqchip/irq-gic-pm.c
@@ -4,7 +4,7 @@
*/
#include <linux/module.h>
#include <linux/clk.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/platform_device.h>
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 634263dfd7b5..8e87fc35f8aa 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -9,8 +9,6 @@
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/of.h>
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index b70ce0d3c092..115bdcffab24 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -340,7 +340,7 @@ static void i8259_irq_dispatch(struct irq_desc *desc)
generic_handle_domain_irq(domain, hwirq);
}
-int __init i8259_of_init(struct device_node *node, struct device_node *parent)
+static int __init i8259_of_init(struct device_node *node, struct device_node *parent)
{
struct irq_domain *domain;
unsigned int parent_irq;
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
index 80aaea82468a..6d9a08238c9d 100644
--- a/drivers/irqchip/irq-imx-intmux.c
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -50,8 +50,9 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index 96230a04ec23..bd9543314539 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -10,8 +10,9 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
index 229039eda1b1..90d41c1407ac 100644
--- a/drivers/irqchip/irq-imx-mu-msi.c
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -339,8 +339,8 @@ static int __init imx_mu_of_init(struct device_node *dn,
msi_data->msiir_addr = res->start + msi_data->cfg->xTR;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -ENODEV;
+ if (irq < 0)
+ return irq;
platform_set_drvdata(pdev, msi_data);
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index ba9792e60329..a36396db4b08 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -15,7 +15,7 @@
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index 92d8aa28bdf5..1623cd779175 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -144,7 +144,7 @@ static int eiointc_router_init(unsigned int cpu)
int i, bit;
uint32_t data;
uint32_t node = cpu_to_eio_node(cpu);
- uint32_t index = eiointc_index(node);
+ int index = eiointc_index(node);
if (index < 0) {
pr_err("Error: invalid nodemap!\n");
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
index fc8bf1f5d41b..0bff728b25e3 100644
--- a/drivers/irqchip/irq-loongson-htvec.c
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -15,7 +15,6 @@
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/syscore_ops.h>
/* Registers */
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index 93a71f66efeb..63db8e2172e0 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -12,9 +12,9 @@
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/syscore_ops.h>
/* Registers */
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index f5ba3f9f8415..f31a262fe438 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -349,8 +349,7 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
+ msi_data->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(msi_data->regs)) {
dev_err(&pdev->dev, "failed to initialize 'regs'\n");
return PTR_ERR(msi_data->regs);
diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c
index 8b81271c823c..3eb1f8cdf674 100644
--- a/drivers/irqchip/irq-madera.c
+++ b/drivers/irqchip/irq-madera.c
@@ -10,12 +10,10 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/irqchip/irq-madera.h>
#include <linux/mfd/madera/core.h>
#include <linux/mfd/madera/pdata.h>
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 7da18ef95211..f88df39f4129 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -150,6 +150,10 @@ static const struct meson_gpio_irq_params s4_params = {
INIT_MESON_S4_COMMON_DATA(82)
};
+static const struct meson_gpio_irq_params c3_params = {
+ INIT_MESON_S4_COMMON_DATA(55)
+};
+
static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
@@ -160,6 +164,7 @@ static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
{ .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
{ .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params },
+ { .compatible = "amlogic,c3-gpio-intc", .data = &c3_params },
{ }
};
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 6d5ecc10a870..76253e864f23 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -557,7 +557,7 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
return gic_irq_domain_map(d, virq, hwirq);
}
-void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
+static void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs)
{
}
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index 4ecef6d83777..a48dbe91b036 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -377,8 +377,7 @@ static int mvebu_sei_probe(struct platform_device *pdev)
mutex_init(&sei->cp_msi_lock);
raw_spin_lock_init(&sei->mask_lock);
- sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sei->base = devm_ioremap_resource(sei->dev, sei->res);
+ sei->base = devm_platform_get_and_ioremap_resource(pdev, 0, &sei->res);
if (IS_ERR(sei->base))
return PTR_ERR(sei->base);
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index 17c2c7a07f10..4e4e874e09a8 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -57,8 +57,7 @@ static int __init orion_irq_init(struct device_node *np,
struct resource r;
/* count number of irq chips by valid reg addresses */
- while (of_address_to_resource(np, num_chips, &r) == 0)
- num_chips++;
+ num_chips = of_address_count(np);
orion_irq_domain = irq_domain_add_linear(np,
num_chips * ORION_IRQS_PER_CHIP,
diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c
index fa8d89b02ec0..0f64ecb9b1f4 100644
--- a/drivers/irqchip/irq-pruss-intc.c
+++ b/drivers/irqchip/irq-pruss-intc.c
@@ -17,7 +17,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
/*
@@ -565,8 +565,8 @@ static int pruss_intc_probe(struct platform_device *pdev)
continue;
irq = platform_get_irq_byname(pdev, irq_names[i]);
- if (irq <= 0) {
- ret = (irq == 0) ? -EINVAL : irq;
+ if (irq < 0) {
+ ret = irq;
goto fail_irq;
}
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index d30614661eea..7124565234a5 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -14,7 +14,7 @@
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 26e4c17a7bf2..fa19585f3dee 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -17,7 +17,6 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#define INTC_IRQPIN_MAX 8 /* maximum 8 interrupts per driver instance */
diff --git a/drivers/irqchip/irq-st.c b/drivers/irqchip/irq-st.c
index 819a12297b58..de71bb350d57 100644
--- a/drivers/irqchip/irq-st.c
+++ b/drivers/irqchip/irq-st.c
@@ -10,7 +10,7 @@
#include <dt-bindings/interrupt-controller/irq-st.h>
#include <linux/err.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index b5fa76ce5046..d8ba5fba7450 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -14,10 +14,11 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 21d49791f855..e760b1278143 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -19,7 +19,6 @@
#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index 8a0e69298e83..680586354d12 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -13,7 +13,6 @@
#include <linux/irqchip.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/bitops.h>
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 7133f9fa6fd9..b83f5cbab123 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -15,9 +15,9 @@
#include <linux/msi.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/soc/ti/ti_sci_inta_msi.h>
#include <linux/soc/ti/ti_sci_protocol.h>
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index 1186f1e431a3..c027cd9e4a69 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -12,9 +12,9 @@
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/soc/ti/ti_sci_protocol.h>
/**
diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c
index 716b1bb88bf2..601f9343d5b3 100644
--- a/drivers/irqchip/irq-uniphier-aidet.c
+++ b/drivers/irqchip/irq-uniphier-aidet.c
@@ -12,7 +12,6 @@
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index ab12328be5ee..0c18d1f1e264 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -16,6 +16,7 @@
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/xtensa-pic.h>
#include <linux/of.h>
unsigned int cached_irq_mask;
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 7899607fbee8..1eeb0d0156ce 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -10,7 +10,7 @@
#include <linux/acpi.h>
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/irqchip.h>
#include <linux/platform_device.h>
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index d96916cf6a41..a32c0d28d038 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/soc/qcom/irq.h>
#include <linux/spinlock.h>
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
index 9ff439a50f53..315e97a2450e 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
@@ -821,6 +821,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
return -EINVAL;
if (*nplanes) {
+ if (*nplanes != q_data->fmt->num_planes)
+ return -EINVAL;
for (i = 0; i < *nplanes; i++)
if (sizes[i] < q_data->sizeimage[i])
return -EINVAL;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 6f5b259a6d6a..85be64579fc9 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -237,6 +237,29 @@ config MFD_CROS_EC_DEV
To compile this driver as a module, choose M here: the module will be
called cros-ec-dev.
+config MFD_CS42L43
+ tristate
+ select MFD_CORE
+ select REGMAP
+
+config MFD_CS42L43_I2C
+ tristate "Cirrus Logic CS42L43 (I2C)"
+ depends on I2C
+ select REGMAP_I2C
+ select MFD_CS42L43
+ help
+ Select this to support the Cirrus Logic CS42L43 PC CODEC with
+ headphone and class D speaker drivers over I2C.
+
+config MFD_CS42L43_SDW
+ tristate "Cirrus Logic CS42L43 (SoundWire)"
+ depends on SOUNDWIRE
+ select REGMAP_SOUNDWIRE
+ select MFD_CS42L43
+ help
+ Select this to support the Cirrus Logic CS42L43 PC CODEC with
+ headphone and class D speaker drivers over SoundWire.
+
config MFD_MADERA
tristate "Cirrus Logic Madera codecs"
select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index f3d1f1dc73b5..c66f07edcd0e 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -13,6 +13,9 @@ obj-$(CONFIG_ARCH_BCM2835) += bcm2835-pm.o
obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
obj-$(CONFIG_MFD_BD9571MWV) += bd9571mwv.o
obj-$(CONFIG_MFD_CROS_EC_DEV) += cros_ec_dev.o
+obj-$(CONFIG_MFD_CS42L43) += cs42l43.o
+obj-$(CONFIG_MFD_CS42L43_I2C) += cs42l43-i2c.o
+obj-$(CONFIG_MFD_CS42L43_SDW) += cs42l43-sdw.o
obj-$(CONFIG_MFD_ENE_KB3930) += ene-kb3930.o
obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
obj-$(CONFIG_MFD_GATEWORKS_GSC) += gateworks-gsc.o
diff --git a/drivers/mfd/cs42l43-i2c.c b/drivers/mfd/cs42l43-i2c.c
new file mode 100644
index 000000000000..4922211680c9
--- /dev/null
+++ b/drivers/mfd/cs42l43-i2c.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CS42L43 I2C driver
+ *
+ * Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/mfd/cs42l43-regs.h>
+#include <linux/module.h>
+
+#include "cs42l43.h"
+
+static const struct regmap_config cs42l43_i2c_regmap = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+
+ .max_register = CS42L43_MCU_RAM_MAX,
+ .readable_reg = cs42l43_readable_register,
+ .volatile_reg = cs42l43_volatile_register,
+ .precious_reg = cs42l43_precious_register,
+
+ .cache_type = REGCACHE_MAPLE,
+ .reg_defaults = cs42l43_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(cs42l43_reg_default),
+};
+
+static int cs42l43_i2c_probe(struct i2c_client *i2c)
+{
+ struct cs42l43 *cs42l43;
+ int ret;
+
+ cs42l43 = devm_kzalloc(&i2c->dev, sizeof(*cs42l43), GFP_KERNEL);
+ if (!cs42l43)
+ return -ENOMEM;
+
+ cs42l43->dev = &i2c->dev;
+ cs42l43->irq = i2c->irq;
+ /* A device on an I2C is always attached by definition. */
+ cs42l43->attached = true;
+
+ cs42l43->regmap = devm_regmap_init_i2c(i2c, &cs42l43_i2c_regmap);
+ if (IS_ERR(cs42l43->regmap)) {
+ ret = PTR_ERR(cs42l43->regmap);
+ dev_err(cs42l43->dev, "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+
+ return cs42l43_dev_probe(cs42l43);
+}
+
+static void cs42l43_i2c_remove(struct i2c_client *i2c)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(&i2c->dev);
+
+ cs42l43_dev_remove(cs42l43);
+}
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id cs42l43_of_match[] = {
+ { .compatible = "cirrus,cs42l43", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cs42l43_of_match);
+#endif
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id cs42l43_acpi_match[] = {
+ { "CSC4243", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cs42l43_acpi_match);
+#endif
+
+static struct i2c_driver cs42l43_i2c_driver = {
+ .driver = {
+ .name = "cs42l43",
+ .pm = pm_ptr(&cs42l43_pm_ops),
+ .of_match_table = of_match_ptr(cs42l43_of_match),
+ .acpi_match_table = ACPI_PTR(cs42l43_acpi_match),
+ },
+
+ .probe = cs42l43_i2c_probe,
+ .remove = cs42l43_i2c_remove,
+};
+module_i2c_driver(cs42l43_i2c_driver);
+
+MODULE_IMPORT_NS(MFD_CS42L43);
+
+MODULE_DESCRIPTION("CS42L43 I2C Driver");
+MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/cs42l43-sdw.c b/drivers/mfd/cs42l43-sdw.c
new file mode 100644
index 000000000000..7392b3d2e6b9
--- /dev/null
+++ b/drivers/mfd/cs42l43-sdw.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CS42L43 SoundWire driver
+ *
+ * Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mfd/cs42l43-regs.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_registers.h>
+#include <linux/soundwire/sdw_type.h>
+
+#include "cs42l43.h"
+
+enum cs42l43_sdw_ports {
+ CS42L43_DMIC_DEC_ASP_PORT = 1,
+ CS42L43_SPK_TX_PORT,
+ CS42L43_SPDIF_HP_PORT,
+ CS42L43_SPK_RX_PORT,
+ CS42L43_ASP_PORT,
+};
+
+static const struct regmap_config cs42l43_sdw_regmap = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+
+ .max_register = CS42L43_MCU_RAM_MAX,
+ .readable_reg = cs42l43_readable_register,
+ .volatile_reg = cs42l43_volatile_register,
+ .precious_reg = cs42l43_precious_register,
+
+ .cache_type = REGCACHE_MAPLE,
+ .reg_defaults = cs42l43_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(cs42l43_reg_default),
+};
+
+static int cs42l43_read_prop(struct sdw_slave *sdw)
+{
+ struct sdw_slave_prop *prop = &sdw->prop;
+ struct device *dev = &sdw->dev;
+ struct sdw_dpn_prop *dpn;
+ unsigned long addr;
+ int nval;
+ int i;
+ u32 bit;
+
+ prop->use_domain_irq = true;
+ prop->paging_support = true;
+ prop->wake_capable = true;
+ prop->source_ports = BIT(CS42L43_DMIC_DEC_ASP_PORT) | BIT(CS42L43_SPK_TX_PORT);
+ prop->sink_ports = BIT(CS42L43_SPDIF_HP_PORT) |
+ BIT(CS42L43_SPK_RX_PORT) | BIT(CS42L43_ASP_PORT);
+ prop->quirks = SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY;
+ prop->scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY |
+ SDW_SCP_INT1_IMPL_DEF;
+
+ nval = hweight32(prop->source_ports);
+ prop->src_dpn_prop = devm_kcalloc(dev, nval, sizeof(*prop->src_dpn_prop),
+ GFP_KERNEL);
+ if (!prop->src_dpn_prop)
+ return -ENOMEM;
+
+ i = 0;
+ dpn = prop->src_dpn_prop;
+ addr = prop->source_ports;
+ for_each_set_bit(bit, &addr, 32) {
+ dpn[i].num = bit;
+ dpn[i].max_ch = 2;
+ dpn[i].type = SDW_DPN_FULL;
+ dpn[i].max_word = 24;
+ i++;
+ }
+ /*
+ * All ports are 2 channels max, except the first one,
+ * CS42L43_DMIC_DEC_ASP_PORT.
+ */
+ dpn[CS42L43_DMIC_DEC_ASP_PORT].max_ch = 4;
+
+ nval = hweight32(prop->sink_ports);
+ prop->sink_dpn_prop = devm_kcalloc(dev, nval, sizeof(*prop->sink_dpn_prop),
+ GFP_KERNEL);
+ if (!prop->sink_dpn_prop)
+ return -ENOMEM;
+
+ i = 0;
+ dpn = prop->sink_dpn_prop;
+ addr = prop->sink_ports;
+ for_each_set_bit(bit, &addr, 32) {
+ dpn[i].num = bit;
+ dpn[i].max_ch = 2;
+ dpn[i].type = SDW_DPN_FULL;
+ dpn[i].max_word = 24;
+ i++;
+ }
+
+ return 0;
+}
+
+static int cs42l43_sdw_update_status(struct sdw_slave *sdw, enum sdw_slave_status status)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(&sdw->dev);
+
+ switch (status) {
+ case SDW_SLAVE_ATTACHED:
+ dev_dbg(cs42l43->dev, "Device attach\n");
+
+ sdw_write_no_pm(sdw, CS42L43_GEN_INT_MASK_1,
+ CS42L43_INT_STAT_GEN1_MASK);
+
+ cs42l43->attached = true;
+
+ complete(&cs42l43->device_attach);
+ break;
+ case SDW_SLAVE_UNATTACHED:
+ dev_dbg(cs42l43->dev, "Device detach\n");
+
+ cs42l43->attached = false;
+
+ reinit_completion(&cs42l43->device_attach);
+ complete(&cs42l43->device_detach);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int cs42l43_sdw_interrupt(struct sdw_slave *sdw,
+ struct sdw_slave_intr_status *status)
+{
+ /*
+ * The IRQ itself was handled through the regmap_irq handler, this is
+ * just clearing up the additional Cirrus SoundWire registers that are
+ * not covered by the SoundWire framework or the IRQ handler itself.
+ * There is only a single bit in GEN_INT_STAT_1 and it doesn't clear if
+ * IRQs are still pending so doing a read/write here after handling the
+ * IRQ is fine.
+ */
+ sdw_read_no_pm(sdw, CS42L43_GEN_INT_STAT_1);
+ sdw_write_no_pm(sdw, CS42L43_GEN_INT_STAT_1, CS42L43_INT_STAT_GEN1_MASK);
+
+ return 0;
+}
+
+static int cs42l43_sdw_bus_config(struct sdw_slave *sdw,
+ struct sdw_bus_params *params)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(&sdw->dev);
+ int ret = 0;
+
+ mutex_lock(&cs42l43->pll_lock);
+
+ if (cs42l43->sdw_freq != params->curr_dr_freq / 2) {
+ if (cs42l43->sdw_pll_active) {
+ dev_err(cs42l43->dev,
+ "PLL active can't change SoundWire bus clock\n");
+ ret = -EBUSY;
+ } else {
+ cs42l43->sdw_freq = params->curr_dr_freq / 2;
+ }
+ }
+
+ mutex_unlock(&cs42l43->pll_lock);
+
+ return ret;
+}
+
+static const struct sdw_slave_ops cs42l43_sdw_ops = {
+ .read_prop = cs42l43_read_prop,
+ .update_status = cs42l43_sdw_update_status,
+ .interrupt_callback = cs42l43_sdw_interrupt,
+ .bus_config = cs42l43_sdw_bus_config,
+};
+
+static int cs42l43_sdw_probe(struct sdw_slave *sdw, const struct sdw_device_id *id)
+{
+ struct cs42l43 *cs42l43;
+ struct device *dev = &sdw->dev;
+ int ret;
+
+ cs42l43 = devm_kzalloc(dev, sizeof(*cs42l43), GFP_KERNEL);
+ if (!cs42l43)
+ return -ENOMEM;
+
+ cs42l43->dev = dev;
+ cs42l43->sdw = sdw;
+
+ cs42l43->regmap = devm_regmap_init_sdw(sdw, &cs42l43_sdw_regmap);
+ if (IS_ERR(cs42l43->regmap)) {
+ ret = PTR_ERR(cs42l43->regmap);
+ dev_err(cs42l43->dev, "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+
+ return cs42l43_dev_probe(cs42l43);
+}
+
+static int cs42l43_sdw_remove(struct sdw_slave *sdw)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(&sdw->dev);
+
+ cs42l43_dev_remove(cs42l43);
+
+ return 0;
+}
+
+static const struct sdw_device_id cs42l43_sdw_id[] = {
+ SDW_SLAVE_ENTRY(0x01FA, 0x4243, 0),
+ {}
+};
+MODULE_DEVICE_TABLE(sdw, cs42l43_sdw_id);
+
+static struct sdw_driver cs42l43_sdw_driver = {
+ .driver = {
+ .name = "cs42l43",
+ .pm = pm_ptr(&cs42l43_pm_ops),
+ },
+
+ .probe = cs42l43_sdw_probe,
+ .remove = cs42l43_sdw_remove,
+ .id_table = cs42l43_sdw_id,
+ .ops = &cs42l43_sdw_ops,
+};
+module_sdw_driver(cs42l43_sdw_driver);
+
+MODULE_IMPORT_NS(MFD_CS42L43);
+
+MODULE_DESCRIPTION("CS42L43 SoundWire Driver");
+MODULE_AUTHOR("Lucas Tanure <tanureal@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c
new file mode 100644
index 000000000000..37b23e9bae82
--- /dev/null
+++ b/drivers/mfd/cs42l43.c
@@ -0,0 +1,1188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CS42L43 core driver
+ *
+ * Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/build_bug.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/cs42l43-regs.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/soundwire/sdw.h>
+
+#include "cs42l43.h"
+
+#define CS42L43_RESET_DELAY 20
+
+#define CS42L43_SDW_ATTACH_TIMEOUT 500
+#define CS42L43_SDW_DETACH_TIMEOUT 100
+
+#define CS42L43_MCU_BOOT_STAGE1 1
+#define CS42L43_MCU_BOOT_STAGE2 2
+#define CS42L43_MCU_BOOT_STAGE3 3
+#define CS42L43_MCU_BOOT_STAGE4 4
+#define CS42L43_MCU_POLL 5000
+#define CS42L43_MCU_CMD_TIMEOUT 20000
+#define CS42L43_MCU_UPDATE_FORMAT 3
+#define CS42L43_MCU_UPDATE_OFFSET 0x100000
+#define CS42L43_MCU_UPDATE_TIMEOUT 500000
+#define CS42L43_MCU_UPDATE_RETRIES 5
+
+#define CS42L43_MCU_SUPPORTED_REV 0x2105
+#define CS42L43_MCU_SHADOW_REGS_REQUIRED_REV 0x2200
+#define CS42L43_MCU_SUPPORTED_BIOS_REV 0x0001
+
+#define CS42L43_VDDP_DELAY 50
+#define CS42L43_VDDD_DELAY 1000
+
+#define CS42L43_AUTOSUSPEND_TIME 250
+
+struct cs42l43_patch_header {
+ __le16 version;
+ __le16 size;
+ u8 reserved;
+ u8 secure;
+ __le16 bss_size;
+ __le32 apply_addr;
+ __le32 checksum;
+ __le32 sha;
+ __le16 swrev;
+ __le16 patchid;
+ __le16 ipxid;
+ __le16 romver;
+ __le32 load_addr;
+} __packed;
+
+static const struct reg_sequence cs42l43_reva_patch[] = {
+ { 0x4000, 0x00000055 },
+ { 0x4000, 0x000000AA },
+ { 0x10084, 0x00000000 },
+ { 0x1741C, 0x00CD2000 },
+ { 0x1718C, 0x00000003 },
+ { 0x4000, 0x00000000 },
+ { CS42L43_CCM_BLK_CLK_CONTROL, 0x00000002 },
+ { CS42L43_HPPATHVOL, 0x011B011B },
+ { CS42L43_OSC_DIV_SEL, 0x00000001 },
+ { CS42L43_DACCNFG2, 0x00000005 },
+ { CS42L43_MIC_DETECT_CONTROL_ANDROID, 0x80790079 },
+ { CS42L43_RELID, 0x0000000F },
+};
+
+const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
+ { CS42L43_DRV_CTRL1, 0x000186C0 },
+ { CS42L43_DRV_CTRL3, 0x286DB018 },
+ { CS42L43_DRV_CTRL4, 0x000006D8 },
+ { CS42L43_DRV_CTRL_5, 0x136C00C0 },
+ { CS42L43_GPIO_CTRL1, 0x00000707 },
+ { CS42L43_GPIO_CTRL2, 0x00000000 },
+ { CS42L43_GPIO_FN_SEL, 0x00000000 },
+ { CS42L43_MCLK_SRC_SEL, 0x00000000 },
+ { CS42L43_SAMPLE_RATE1, 0x00000003 },
+ { CS42L43_SAMPLE_RATE2, 0x00000003 },
+ { CS42L43_SAMPLE_RATE3, 0x00000003 },
+ { CS42L43_SAMPLE_RATE4, 0x00000003 },
+ { CS42L43_PLL_CONTROL, 0x00000000 },
+ { CS42L43_FS_SELECT1, 0x00000000 },
+ { CS42L43_FS_SELECT2, 0x00000000 },
+ { CS42L43_FS_SELECT3, 0x00000000 },
+ { CS42L43_FS_SELECT4, 0x00000000 },
+ { CS42L43_PDM_CONTROL, 0x00000000 },
+ { CS42L43_ASP_CLK_CONFIG1, 0x00010001 },
+ { CS42L43_ASP_CLK_CONFIG2, 0x00000000 },
+ { CS42L43_OSC_DIV_SEL, 0x00000001 },
+ { CS42L43_ADC_B_CTRL1, 0x00000000 },
+ { CS42L43_ADC_B_CTRL2, 0x00000000 },
+ { CS42L43_DECIM_HPF_WNF_CTRL1, 0x00000001 },
+ { CS42L43_DECIM_HPF_WNF_CTRL2, 0x00000001 },
+ { CS42L43_DECIM_HPF_WNF_CTRL3, 0x00000001 },
+ { CS42L43_DECIM_HPF_WNF_CTRL4, 0x00000001 },
+ { CS42L43_DMIC_PDM_CTRL, 0x00000000 },
+ { CS42L43_DECIM_VOL_CTRL_CH1_CH2, 0x20122012 },
+ { CS42L43_DECIM_VOL_CTRL_CH3_CH4, 0x20122012 },
+ { CS42L43_INTP_VOLUME_CTRL1, 0x00000180 },
+ { CS42L43_INTP_VOLUME_CTRL2, 0x00000180 },
+ { CS42L43_AMP1_2_VOL_RAMP, 0x00000022 },
+ { CS42L43_ASP_CTRL, 0x00000004 },
+ { CS42L43_ASP_FSYNC_CTRL1, 0x000000FA },
+ { CS42L43_ASP_FSYNC_CTRL2, 0x00000001 },
+ { CS42L43_ASP_FSYNC_CTRL3, 0x00000000 },
+ { CS42L43_ASP_FSYNC_CTRL4, 0x000001F4 },
+ { CS42L43_ASP_DATA_CTRL, 0x0000003A },
+ { CS42L43_ASP_RX_EN, 0x00000000 },
+ { CS42L43_ASP_TX_EN, 0x00000000 },
+ { CS42L43_ASP_RX_CH1_CTRL, 0x00170001 },
+ { CS42L43_ASP_RX_CH2_CTRL, 0x00170031 },
+ { CS42L43_ASP_RX_CH3_CTRL, 0x00170061 },
+ { CS42L43_ASP_RX_CH4_CTRL, 0x00170091 },
+ { CS42L43_ASP_RX_CH5_CTRL, 0x001700C1 },
+ { CS42L43_ASP_RX_CH6_CTRL, 0x001700F1 },
+ { CS42L43_ASP_TX_CH1_CTRL, 0x00170001 },
+ { CS42L43_ASP_TX_CH2_CTRL, 0x00170031 },
+ { CS42L43_ASP_TX_CH3_CTRL, 0x00170061 },
+ { CS42L43_ASP_TX_CH4_CTRL, 0x00170091 },
+ { CS42L43_ASP_TX_CH5_CTRL, 0x001700C1 },
+ { CS42L43_ASP_TX_CH6_CTRL, 0x001700F1 },
+ { CS42L43_ASPTX1_INPUT, 0x00800000 },
+ { CS42L43_ASPTX2_INPUT, 0x00800000 },
+ { CS42L43_ASPTX3_INPUT, 0x00800000 },
+ { CS42L43_ASPTX4_INPUT, 0x00800000 },
+ { CS42L43_ASPTX5_INPUT, 0x00800000 },
+ { CS42L43_ASPTX6_INPUT, 0x00800000 },
+ { CS42L43_SWIRE_DP1_CH1_INPUT, 0x00800000 },
+ { CS42L43_SWIRE_DP1_CH2_INPUT, 0x00800000 },
+ { CS42L43_SWIRE_DP1_CH3_INPUT, 0x00800000 },
+ { CS42L43_SWIRE_DP1_CH4_INPUT, 0x00800000 },
+ { CS42L43_SWIRE_DP2_CH1_INPUT, 0x00800000 },
+ { CS42L43_SWIRE_DP2_CH2_INPUT, 0x00800000 },
+ { CS42L43_SWIRE_DP3_CH1_INPUT, 0x00800000 },
+ { CS42L43_SWIRE_DP3_CH2_INPUT, 0x00800000 },
+ { CS42L43_SWIRE_DP4_CH1_INPUT, 0x00800000 },
+ { CS42L43_SWIRE_DP4_CH2_INPUT, 0x00800000 },
+ { CS42L43_ASRC_INT1_INPUT1, 0x00800000 },
+ { CS42L43_ASRC_INT2_INPUT1, 0x00800000 },
+ { CS42L43_ASRC_INT3_INPUT1, 0x00800000 },
+ { CS42L43_ASRC_INT4_INPUT1, 0x00800000 },
+ { CS42L43_ASRC_DEC1_INPUT1, 0x00800000 },
+ { CS42L43_ASRC_DEC2_INPUT1, 0x00800000 },
+ { CS42L43_ASRC_DEC3_INPUT1, 0x00800000 },
+ { CS42L43_ASRC_DEC4_INPUT1, 0x00800000 },
+ { CS42L43_ISRC1INT1_INPUT1, 0x00800000 },
+ { CS42L43_ISRC1INT2_INPUT1, 0x00800000 },
+ { CS42L43_ISRC1DEC1_INPUT1, 0x00800000 },
+ { CS42L43_ISRC1DEC2_INPUT1, 0x00800000 },
+ { CS42L43_ISRC2INT1_INPUT1, 0x00800000 },
+ { CS42L43_ISRC2INT2_INPUT1, 0x00800000 },
+ { CS42L43_ISRC2DEC1_INPUT1, 0x00800000 },
+ { CS42L43_ISRC2DEC2_INPUT1, 0x00800000 },
+ { CS42L43_EQ1MIX_INPUT1, 0x00800000 },
+ { CS42L43_EQ1MIX_INPUT2, 0x00800000 },
+ { CS42L43_EQ1MIX_INPUT3, 0x00800000 },
+ { CS42L43_EQ1MIX_INPUT4, 0x00800000 },
+ { CS42L43_EQ2MIX_INPUT1, 0x00800000 },
+ { CS42L43_EQ2MIX_INPUT2, 0x00800000 },
+ { CS42L43_EQ2MIX_INPUT3, 0x00800000 },
+ { CS42L43_EQ2MIX_INPUT4, 0x00800000 },
+ { CS42L43_SPDIF1_INPUT1, 0x00800000 },
+ { CS42L43_SPDIF2_INPUT1, 0x00800000 },
+ { CS42L43_AMP1MIX_INPUT1, 0x00800000 },
+ { CS42L43_AMP1MIX_INPUT2, 0x00800000 },
+ { CS42L43_AMP1MIX_INPUT3, 0x00800000 },
+ { CS42L43_AMP1MIX_INPUT4, 0x00800000 },
+ { CS42L43_AMP2MIX_INPUT1, 0x00800000 },
+ { CS42L43_AMP2MIX_INPUT2, 0x00800000 },
+ { CS42L43_AMP2MIX_INPUT3, 0x00800000 },
+ { CS42L43_AMP2MIX_INPUT4, 0x00800000 },
+ { CS42L43_AMP3MIX_INPUT1, 0x00800000 },
+ { CS42L43_AMP3MIX_INPUT2, 0x00800000 },
+ { CS42L43_AMP3MIX_INPUT3, 0x00800000 },
+ { CS42L43_AMP3MIX_INPUT4, 0x00800000 },
+ { CS42L43_AMP4MIX_INPUT1, 0x00800000 },
+ { CS42L43_AMP4MIX_INPUT2, 0x00800000 },
+ { CS42L43_AMP4MIX_INPUT3, 0x00800000 },
+ { CS42L43_AMP4MIX_INPUT4, 0x00800000 },
+ { CS42L43_ASRC_INT_ENABLES, 0x00000100 },
+ { CS42L43_ASRC_DEC_ENABLES, 0x00000100 },
+ { CS42L43_PDNCNTL, 0x00000000 },
+ { CS42L43_RINGSENSE_DEB_CTRL, 0x0000001B },
+ { CS42L43_TIPSENSE_DEB_CTRL, 0x0000001B },
+ { CS42L43_HS2, 0x050106F3 },
+ { CS42L43_STEREO_MIC_CTRL, 0x00000000 },
+ { CS42L43_STEREO_MIC_CLAMP_CTRL, 0x00000001 },
+ { CS42L43_BLOCK_EN2, 0x00000000 },
+ { CS42L43_BLOCK_EN3, 0x00000000 },
+ { CS42L43_BLOCK_EN4, 0x00000000 },
+ { CS42L43_BLOCK_EN5, 0x00000000 },
+ { CS42L43_BLOCK_EN6, 0x00000000 },
+ { CS42L43_BLOCK_EN7, 0x00000000 },
+ { CS42L43_BLOCK_EN8, 0x00000000 },
+ { CS42L43_BLOCK_EN9, 0x00000000 },
+ { CS42L43_BLOCK_EN10, 0x00000000 },
+ { CS42L43_BLOCK_EN11, 0x00000000 },
+ { CS42L43_TONE_CH1_CTRL, 0x00000000 },
+ { CS42L43_TONE_CH2_CTRL, 0x00000000 },
+ { CS42L43_MIC_DETECT_CONTROL_1, 0x00000003 },
+ { CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL, 0x02000003 },
+ { CS42L43_MIC_DETECT_CONTROL_ANDROID, 0x80790079 },
+ { CS42L43_ISRC1_CTRL, 0x00000000 },
+ { CS42L43_ISRC2_CTRL, 0x00000000 },
+ { CS42L43_CTRL_REG, 0x00000006 },
+ { CS42L43_FDIV_FRAC, 0x40000000 },
+ { CS42L43_CAL_RATIO, 0x00000080 },
+ { CS42L43_SPI_CLK_CONFIG1, 0x00000000 },
+ { CS42L43_SPI_CONFIG1, 0x00000000 },
+ { CS42L43_SPI_CONFIG2, 0x00000000 },
+ { CS42L43_SPI_CONFIG3, 0x00000001 },
+ { CS42L43_SPI_CONFIG4, 0x00000000 },
+ { CS42L43_TRAN_CONFIG3, 0x00000000 },
+ { CS42L43_TRAN_CONFIG4, 0x00000000 },
+ { CS42L43_TRAN_CONFIG5, 0x00000000 },
+ { CS42L43_TRAN_CONFIG6, 0x00000000 },
+ { CS42L43_TRAN_CONFIG7, 0x00000000 },
+ { CS42L43_TRAN_CONFIG8, 0x00000000 },
+ { CS42L43_DACCNFG1, 0x00000008 },
+ { CS42L43_DACCNFG2, 0x00000005 },
+ { CS42L43_HPPATHVOL, 0x011B011B },
+ { CS42L43_PGAVOL, 0x00003470 },
+ { CS42L43_LOADDETENA, 0x00000000 },
+ { CS42L43_CTRL, 0x00000037 },
+ { CS42L43_COEFF_DATA_IN0, 0x00000000 },
+ { CS42L43_COEFF_RD_WR0, 0x00000000 },
+ { CS42L43_START_EQZ0, 0x00000000 },
+ { CS42L43_MUTE_EQ_IN0, 0x00000000 },
+ { CS42L43_DECIM_MASK, 0x0000000F },
+ { CS42L43_EQ_MIX_MASK, 0x0000000F },
+ { CS42L43_ASP_MASK, 0x000000FF },
+ { CS42L43_PLL_MASK, 0x00000003 },
+ { CS42L43_SOFT_MASK, 0x0000FFFF },
+ { CS42L43_SWIRE_MASK, 0x00007FFF },
+ { CS42L43_MSM_MASK, 0x00000FFF },
+ { CS42L43_ACC_DET_MASK, 0x00000FFF },
+ { CS42L43_I2C_TGT_MASK, 0x00000003 },
+ { CS42L43_SPI_MSTR_MASK, 0x00000007 },
+ { CS42L43_SW_TO_SPI_BRIDGE_MASK, 0x00000001 },
+ { CS42L43_OTP_MASK, 0x00000007 },
+ { CS42L43_CLASS_D_AMP_MASK, 0x00003FFF },
+ { CS42L43_GPIO_INT_MASK, 0x0000003F },
+ { CS42L43_ASRC_MASK, 0x0000000F },
+ { CS42L43_HPOUT_MASK, 0x00000003 },
+};
+EXPORT_SYMBOL_NS_GPL(cs42l43_reg_default, MFD_CS42L43);
+
+bool cs42l43_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS42L43_DEVID:
+ case CS42L43_REVID:
+ case CS42L43_RELID:
+ case CS42L43_SFT_RESET:
+ case CS42L43_DRV_CTRL1:
+ case CS42L43_DRV_CTRL3:
+ case CS42L43_DRV_CTRL4:
+ case CS42L43_DRV_CTRL_5:
+ case CS42L43_GPIO_CTRL1:
+ case CS42L43_GPIO_CTRL2:
+ case CS42L43_GPIO_STS:
+ case CS42L43_GPIO_FN_SEL:
+ case CS42L43_MCLK_SRC_SEL:
+ case CS42L43_SAMPLE_RATE1 ... CS42L43_SAMPLE_RATE4:
+ case CS42L43_PLL_CONTROL:
+ case CS42L43_FS_SELECT1 ... CS42L43_FS_SELECT4:
+ case CS42L43_PDM_CONTROL:
+ case CS42L43_ASP_CLK_CONFIG1 ... CS42L43_ASP_CLK_CONFIG2:
+ case CS42L43_OSC_DIV_SEL:
+ case CS42L43_ADC_B_CTRL1 ... CS42L43_ADC_B_CTRL2:
+ case CS42L43_DECIM_HPF_WNF_CTRL1 ... CS42L43_DECIM_HPF_WNF_CTRL4:
+ case CS42L43_DMIC_PDM_CTRL:
+ case CS42L43_DECIM_VOL_CTRL_CH1_CH2 ... CS42L43_DECIM_VOL_CTRL_CH3_CH4:
+ case CS42L43_INTP_VOLUME_CTRL1 ... CS42L43_INTP_VOLUME_CTRL2:
+ case CS42L43_AMP1_2_VOL_RAMP:
+ case CS42L43_ASP_CTRL:
+ case CS42L43_ASP_FSYNC_CTRL1 ... CS42L43_ASP_FSYNC_CTRL4:
+ case CS42L43_ASP_DATA_CTRL:
+ case CS42L43_ASP_RX_EN ... CS42L43_ASP_TX_EN:
+ case CS42L43_ASP_RX_CH1_CTRL ... CS42L43_ASP_RX_CH6_CTRL:
+ case CS42L43_ASP_TX_CH1_CTRL ... CS42L43_ASP_TX_CH6_CTRL:
+ case CS42L43_OTP_REVISION_ID:
+ case CS42L43_ASPTX1_INPUT:
+ case CS42L43_ASPTX2_INPUT:
+ case CS42L43_ASPTX3_INPUT:
+ case CS42L43_ASPTX4_INPUT:
+ case CS42L43_ASPTX5_INPUT:
+ case CS42L43_ASPTX6_INPUT:
+ case CS42L43_SWIRE_DP1_CH1_INPUT:
+ case CS42L43_SWIRE_DP1_CH2_INPUT:
+ case CS42L43_SWIRE_DP1_CH3_INPUT:
+ case CS42L43_SWIRE_DP1_CH4_INPUT:
+ case CS42L43_SWIRE_DP2_CH1_INPUT:
+ case CS42L43_SWIRE_DP2_CH2_INPUT:
+ case CS42L43_SWIRE_DP3_CH1_INPUT:
+ case CS42L43_SWIRE_DP3_CH2_INPUT:
+ case CS42L43_SWIRE_DP4_CH1_INPUT:
+ case CS42L43_SWIRE_DP4_CH2_INPUT:
+ case CS42L43_ASRC_INT1_INPUT1:
+ case CS42L43_ASRC_INT2_INPUT1:
+ case CS42L43_ASRC_INT3_INPUT1:
+ case CS42L43_ASRC_INT4_INPUT1:
+ case CS42L43_ASRC_DEC1_INPUT1:
+ case CS42L43_ASRC_DEC2_INPUT1:
+ case CS42L43_ASRC_DEC3_INPUT1:
+ case CS42L43_ASRC_DEC4_INPUT1:
+ case CS42L43_ISRC1INT1_INPUT1:
+ case CS42L43_ISRC1INT2_INPUT1:
+ case CS42L43_ISRC1DEC1_INPUT1:
+ case CS42L43_ISRC1DEC2_INPUT1:
+ case CS42L43_ISRC2INT1_INPUT1:
+ case CS42L43_ISRC2INT2_INPUT1:
+ case CS42L43_ISRC2DEC1_INPUT1:
+ case CS42L43_ISRC2DEC2_INPUT1:
+ case CS42L43_EQ1MIX_INPUT1 ... CS42L43_EQ1MIX_INPUT4:
+ case CS42L43_EQ2MIX_INPUT1 ... CS42L43_EQ2MIX_INPUT4:
+ case CS42L43_SPDIF1_INPUT1:
+ case CS42L43_SPDIF2_INPUT1:
+ case CS42L43_AMP1MIX_INPUT1 ... CS42L43_AMP1MIX_INPUT4:
+ case CS42L43_AMP2MIX_INPUT1 ... CS42L43_AMP2MIX_INPUT4:
+ case CS42L43_AMP3MIX_INPUT1 ... CS42L43_AMP3MIX_INPUT4:
+ case CS42L43_AMP4MIX_INPUT1 ... CS42L43_AMP4MIX_INPUT4:
+ case CS42L43_ASRC_INT_ENABLES ... CS42L43_ASRC_DEC_ENABLES:
+ case CS42L43_PDNCNTL:
+ case CS42L43_RINGSENSE_DEB_CTRL:
+ case CS42L43_TIPSENSE_DEB_CTRL:
+ case CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS:
+ case CS42L43_HS2:
+ case CS42L43_HS_STAT:
+ case CS42L43_MCU_SW_INTERRUPT:
+ case CS42L43_STEREO_MIC_CTRL:
+ case CS42L43_STEREO_MIC_CLAMP_CTRL:
+ case CS42L43_BLOCK_EN2 ... CS42L43_BLOCK_EN11:
+ case CS42L43_TONE_CH1_CTRL ... CS42L43_TONE_CH2_CTRL:
+ case CS42L43_MIC_DETECT_CONTROL_1:
+ case CS42L43_DETECT_STATUS_1:
+ case CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL:
+ case CS42L43_MIC_DETECT_CONTROL_ANDROID:
+ case CS42L43_ISRC1_CTRL:
+ case CS42L43_ISRC2_CTRL:
+ case CS42L43_CTRL_REG:
+ case CS42L43_FDIV_FRAC:
+ case CS42L43_CAL_RATIO:
+ case CS42L43_SPI_CLK_CONFIG1:
+ case CS42L43_SPI_CONFIG1 ... CS42L43_SPI_CONFIG4:
+ case CS42L43_SPI_STATUS1 ... CS42L43_SPI_STATUS2:
+ case CS42L43_TRAN_CONFIG1 ... CS42L43_TRAN_CONFIG8:
+ case CS42L43_TRAN_STATUS1 ... CS42L43_TRAN_STATUS3:
+ case CS42L43_TX_DATA:
+ case CS42L43_RX_DATA:
+ case CS42L43_DACCNFG1 ... CS42L43_DACCNFG2:
+ case CS42L43_HPPATHVOL:
+ case CS42L43_PGAVOL:
+ case CS42L43_LOADDETRESULTS:
+ case CS42L43_LOADDETENA:
+ case CS42L43_CTRL:
+ case CS42L43_COEFF_DATA_IN0:
+ case CS42L43_COEFF_RD_WR0:
+ case CS42L43_INIT_DONE0:
+ case CS42L43_START_EQZ0:
+ case CS42L43_MUTE_EQ_IN0:
+ case CS42L43_DECIM_INT ... CS42L43_HPOUT_INT:
+ case CS42L43_DECIM_MASK ... CS42L43_HPOUT_MASK:
+ case CS42L43_DECIM_INT_SHADOW ... CS42L43_HP_OUT_SHADOW:
+ case CS42L43_BOOT_CONTROL:
+ case CS42L43_BLOCK_EN:
+ case CS42L43_SHUTTER_CONTROL:
+ case CS42L43_MCU_SW_REV ... CS42L43_MCU_RAM_MAX:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs42l43_readable_register, MFD_CS42L43);
+
+bool cs42l43_precious_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS42L43_SFT_RESET:
+ case CS42L43_TX_DATA:
+ case CS42L43_RX_DATA:
+ case CS42L43_DECIM_INT ... CS42L43_HPOUT_INT:
+ case CS42L43_MCU_SW_REV ... CS42L43_MCU_RAM_MAX:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs42l43_precious_register, MFD_CS42L43);
+
+bool cs42l43_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CS42L43_DEVID:
+ case CS42L43_REVID:
+ case CS42L43_RELID:
+ case CS42L43_GPIO_STS:
+ case CS42L43_OTP_REVISION_ID:
+ case CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS:
+ case CS42L43_HS_STAT:
+ case CS42L43_MCU_SW_INTERRUPT:
+ case CS42L43_DETECT_STATUS_1:
+ case CS42L43_SPI_STATUS1 ... CS42L43_SPI_STATUS2:
+ case CS42L43_TRAN_CONFIG1 ... CS42L43_TRAN_CONFIG2:
+ case CS42L43_TRAN_CONFIG8:
+ case CS42L43_TRAN_STATUS1 ... CS42L43_TRAN_STATUS3:
+ case CS42L43_LOADDETRESULTS:
+ case CS42L43_INIT_DONE0:
+ case CS42L43_DECIM_INT_SHADOW ... CS42L43_HP_OUT_SHADOW:
+ case CS42L43_BOOT_CONTROL:
+ case CS42L43_BLOCK_EN:
+ return true;
+ default:
+ return cs42l43_precious_register(dev, reg);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cs42l43_volatile_register, MFD_CS42L43);
+
+#define CS42L43_IRQ_OFFSET(reg) ((CS42L43_##reg##_INT) - CS42L43_DECIM_INT)
+
+#define CS42L43_IRQ_REG(name, reg) REGMAP_IRQ_REG(CS42L43_##name, \
+ CS42L43_IRQ_OFFSET(reg), \
+ CS42L43_##name##_INT_MASK)
+
+static const struct regmap_irq cs42l43_regmap_irqs[] = {
+ CS42L43_IRQ_REG(PLL_LOST_LOCK, PLL),
+ CS42L43_IRQ_REG(PLL_READY, PLL),
+
+ CS42L43_IRQ_REG(HP_STARTUP_DONE, MSM),
+ CS42L43_IRQ_REG(HP_SHUTDOWN_DONE, MSM),
+ CS42L43_IRQ_REG(HSDET_DONE, MSM),
+ CS42L43_IRQ_REG(TIPSENSE_UNPLUG_DB, MSM),
+ CS42L43_IRQ_REG(TIPSENSE_PLUG_DB, MSM),
+ CS42L43_IRQ_REG(RINGSENSE_UNPLUG_DB, MSM),
+ CS42L43_IRQ_REG(RINGSENSE_PLUG_DB, MSM),
+ CS42L43_IRQ_REG(TIPSENSE_UNPLUG_PDET, MSM),
+ CS42L43_IRQ_REG(TIPSENSE_PLUG_PDET, MSM),
+ CS42L43_IRQ_REG(RINGSENSE_UNPLUG_PDET, MSM),
+ CS42L43_IRQ_REG(RINGSENSE_PLUG_PDET, MSM),
+
+ CS42L43_IRQ_REG(HS2_BIAS_SENSE, ACC_DET),
+ CS42L43_IRQ_REG(HS1_BIAS_SENSE, ACC_DET),
+ CS42L43_IRQ_REG(DC_DETECT1_FALSE, ACC_DET),
+ CS42L43_IRQ_REG(DC_DETECT1_TRUE, ACC_DET),
+ CS42L43_IRQ_REG(HSBIAS_CLAMPED, ACC_DET),
+ CS42L43_IRQ_REG(HS3_4_BIAS_SENSE, ACC_DET),
+
+ CS42L43_IRQ_REG(AMP2_CLK_STOP_FAULT, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP1_CLK_STOP_FAULT, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP2_VDDSPK_FAULT, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP1_VDDSPK_FAULT, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP2_SHUTDOWN_DONE, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP1_SHUTDOWN_DONE, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP2_STARTUP_DONE, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP1_STARTUP_DONE, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP2_THERM_SHDN, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP1_THERM_SHDN, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP2_THERM_WARN, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP1_THERM_WARN, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP2_SCDET, CLASS_D_AMP),
+ CS42L43_IRQ_REG(AMP1_SCDET, CLASS_D_AMP),
+
+ CS42L43_IRQ_REG(GPIO3_FALL, GPIO),
+ CS42L43_IRQ_REG(GPIO3_RISE, GPIO),
+ CS42L43_IRQ_REG(GPIO2_FALL, GPIO),
+ CS42L43_IRQ_REG(GPIO2_RISE, GPIO),
+ CS42L43_IRQ_REG(GPIO1_FALL, GPIO),
+ CS42L43_IRQ_REG(GPIO1_RISE, GPIO),
+
+ CS42L43_IRQ_REG(HP_ILIMIT, HPOUT),
+ CS42L43_IRQ_REG(HP_LOADDET_DONE, HPOUT),
+};
+
+static const struct regmap_irq_chip cs42l43_irq_chip = {
+ .name = "cs42l43",
+
+ .status_base = CS42L43_DECIM_INT,
+ .mask_base = CS42L43_DECIM_MASK,
+ .num_regs = 16,
+
+ .irqs = cs42l43_regmap_irqs,
+ .num_irqs = ARRAY_SIZE(cs42l43_regmap_irqs),
+
+ .runtime_pm = true,
+};
+
+static const char * const cs42l43_core_supplies[] = {
+ "vdd-a", "vdd-io", "vdd-cp",
+};
+
+static const char * const cs42l43_parent_supplies[] = { "vdd-amp" };
+
+static const struct mfd_cell cs42l43_devs[] = {
+ { .name = "cs42l43-pinctrl", },
+ { .name = "cs42l43-spi", },
+ {
+ .name = "cs42l43-codec",
+ .parent_supplies = cs42l43_parent_supplies,
+ .num_parent_supplies = ARRAY_SIZE(cs42l43_parent_supplies),
+ },
+};
+
+/*
+ * If the device is connected over Soundwire, as well as soft resetting the
+ * device, this function will also way for the device to detach from the bus
+ * before returning.
+ */
+static int cs42l43_soft_reset(struct cs42l43 *cs42l43)
+{
+ static const struct reg_sequence reset[] = {
+ { CS42L43_SFT_RESET, CS42L43_SFT_RESET_VAL },
+ };
+
+ reinit_completion(&cs42l43->device_detach);
+
+ /*
+ * Apply cache only because the soft reset will cause the device to
+ * detach from the soundwire bus.
+ */
+ regcache_cache_only(cs42l43->regmap, true);
+ regmap_multi_reg_write_bypassed(cs42l43->regmap, reset, ARRAY_SIZE(reset));
+
+ msleep(CS42L43_RESET_DELAY);
+
+ if (cs42l43->sdw) {
+ unsigned long timeout = msecs_to_jiffies(CS42L43_SDW_DETACH_TIMEOUT);
+ unsigned long time;
+
+ time = wait_for_completion_timeout(&cs42l43->device_detach, timeout);
+ if (!time) {
+ dev_err(cs42l43->dev, "Timed out waiting for device detach\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return -EAGAIN;
+}
+
+/*
+ * This function is essentially a no-op on I2C, but will wait for the device to
+ * attach when the device is used on a SoundWire bus.
+ */
+static int cs42l43_wait_for_attach(struct cs42l43 *cs42l43)
+{
+ if (!cs42l43->attached) {
+ unsigned long timeout = msecs_to_jiffies(CS42L43_SDW_ATTACH_TIMEOUT);
+ unsigned long time;
+
+ time = wait_for_completion_timeout(&cs42l43->device_attach, timeout);
+ if (!time) {
+ dev_err(cs42l43->dev, "Timed out waiting for device re-attach\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ regcache_cache_only(cs42l43->regmap, false);
+
+ /* The hardware requires enabling OSC_DIV before doing any SoundWire reads. */
+ if (cs42l43->sdw)
+ regmap_write(cs42l43->regmap, CS42L43_OSC_DIV_SEL,
+ CS42L43_OSC_DIV2_EN_MASK);
+
+ return 0;
+}
+
+/*
+ * This function will advance the firmware into boot stage 3 from boot stage 2.
+ * Boot stage 3 is required to send commands to the firmware. This is achieved
+ * by setting the firmware NEED configuration register to zero, this indicates
+ * no configuration is required forcing the firmware to advance to boot stage 3.
+ *
+ * Later revisions of the firmware require the use of an alternative register
+ * for this purpose, which is indicated through the shadow flag.
+ */
+static int cs42l43_mcu_stage_2_3(struct cs42l43 *cs42l43, bool shadow)
+{
+ unsigned int need_reg = CS42L43_NEED_CONFIGS;
+ unsigned int val;
+ int ret;
+
+ if (shadow)
+ need_reg = CS42L43_FW_SH_BOOT_CFG_NEED_CONFIGS;
+
+ regmap_write(cs42l43->regmap, need_reg, 0);
+
+ ret = regmap_read_poll_timeout(cs42l43->regmap, CS42L43_BOOT_STATUS,
+ val, (val == CS42L43_MCU_BOOT_STAGE3),
+ CS42L43_MCU_POLL, CS42L43_MCU_CMD_TIMEOUT);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to move to stage 3: %d, 0x%x\n", ret, val);
+ return ret;
+ }
+
+ return -EAGAIN;
+}
+
+/*
+ * This function will return the firmware to boot stage 2 from boot stage 3.
+ * Boot stage 2 is required to apply updates to the firmware. This is achieved
+ * by setting the firmware NEED configuration register to FW_PATCH_NEED_CFG,
+ * setting the HAVE configuration register to 0, and soft resetting. The
+ * firmware will see it is missing a patch configuration and will pause in boot
+ * stage 2.
+ *
+ * Note: Unlike cs42l43_mcu_stage_2_3 there is no need to consider the shadow
+ * register here as the driver will only return to boot stage 2 if the firmware
+ * requires update which means the revision does not include shadow register
+ * support.
+ */
+static int cs42l43_mcu_stage_3_2(struct cs42l43 *cs42l43)
+{
+ regmap_write(cs42l43->regmap, CS42L43_FW_MISSION_CTRL_NEED_CONFIGS,
+ CS42L43_FW_PATCH_NEED_CFG_MASK);
+ regmap_write(cs42l43->regmap, CS42L43_FW_MISSION_CTRL_HAVE_CONFIGS, 0);
+
+ return cs42l43_soft_reset(cs42l43);
+}
+
+/*
+ * Disable the firmware running on the device such that the driver can access
+ * the registers without fear of the MCU changing them under it.
+ */
+static int cs42l43_mcu_disable(struct cs42l43 *cs42l43)
+{
+ unsigned int val;
+ int ret;
+
+ regmap_write(cs42l43->regmap, CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG,
+ CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_DISABLE_VAL);
+ regmap_write(cs42l43->regmap, CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION,
+ CS42L43_FW_MM_CTRL_MCU_SEL_MASK);
+ regmap_write(cs42l43->regmap, CS42L43_MCU_SW_INTERRUPT, CS42L43_CONTROL_IND_MASK);
+ regmap_write(cs42l43->regmap, CS42L43_MCU_SW_INTERRUPT, 0);
+
+ ret = regmap_read_poll_timeout(cs42l43->regmap, CS42L43_SOFT_INT_SHADOW, val,
+ (val & CS42L43_CONTROL_APPLIED_INT_MASK),
+ CS42L43_MCU_POLL, CS42L43_MCU_CMD_TIMEOUT);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to disable firmware: %d, 0x%x\n", ret, val);
+ return ret;
+ }
+
+ /* Soft reset to clear any register state the firmware left behind. */
+ return cs42l43_soft_reset(cs42l43);
+}
+
+/*
+ * Callback to load firmware updates.
+ */
+static void cs42l43_mcu_load_firmware(const struct firmware *firmware, void *context)
+{
+ struct cs42l43 *cs42l43 = context;
+ const struct cs42l43_patch_header *hdr;
+ unsigned int loadaddr, val;
+ int ret;
+
+ if (!firmware) {
+ dev_err(cs42l43->dev, "Failed to load firmware\n");
+ cs42l43->firmware_error = -ENODEV;
+ goto err;
+ }
+
+ hdr = (const struct cs42l43_patch_header *)&firmware->data[0];
+ loadaddr = le32_to_cpu(hdr->load_addr);
+
+ if (le16_to_cpu(hdr->version) != CS42L43_MCU_UPDATE_FORMAT) {
+ dev_err(cs42l43->dev, "Bad firmware file format: %d\n", hdr->version);
+ cs42l43->firmware_error = -EINVAL;
+ goto err_release;
+ }
+
+ regmap_write(cs42l43->regmap, CS42L43_PATCH_START_ADDR, loadaddr);
+ regmap_bulk_write(cs42l43->regmap, loadaddr + CS42L43_MCU_UPDATE_OFFSET,
+ &firmware->data[0], firmware->size / sizeof(u32));
+
+ regmap_write(cs42l43->regmap, CS42L43_MCU_SW_INTERRUPT, CS42L43_PATCH_IND_MASK);
+ regmap_write(cs42l43->regmap, CS42L43_MCU_SW_INTERRUPT, 0);
+
+ ret = regmap_read_poll_timeout(cs42l43->regmap, CS42L43_SOFT_INT_SHADOW, val,
+ (val & CS42L43_PATCH_APPLIED_INT_MASK),
+ CS42L43_MCU_POLL, CS42L43_MCU_UPDATE_TIMEOUT);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to update firmware: %d, 0x%x\n", ret, val);
+ cs42l43->firmware_error = ret;
+ goto err_release;
+ }
+
+err_release:
+ release_firmware(firmware);
+err:
+ complete(&cs42l43->firmware_download);
+}
+
+/*
+ * The process of updating the firmware is split into a series of steps, at the
+ * end of each step a soft reset of the device might be required which will
+ * require the driver to wait for the device to re-attach on the SoundWire bus,
+ * if that control bus is being used.
+ */
+static int cs42l43_mcu_update_step(struct cs42l43 *cs42l43)
+{
+ unsigned int mcu_rev, bios_rev, boot_status, secure_cfg;
+ bool patched, shadow;
+ int ret;
+
+ /* Clear any stale software interrupt bits. */
+ regmap_read(cs42l43->regmap, CS42L43_SOFT_INT, &mcu_rev);
+
+ ret = regmap_read(cs42l43->regmap, CS42L43_BOOT_STATUS, &boot_status);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to read boot status: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(cs42l43->regmap, CS42L43_MCU_SW_REV, &mcu_rev);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to read firmware revision: %d\n", ret);
+ return ret;
+ }
+
+ bios_rev = (((mcu_rev & CS42L43_BIOS_MAJOR_REV_MASK) << 12) |
+ ((mcu_rev & CS42L43_BIOS_MINOR_REV_MASK) << 4) |
+ ((mcu_rev & CS42L43_BIOS_SUBMINOR_REV_MASK) >> 8)) >>
+ CS42L43_BIOS_MAJOR_REV_SHIFT;
+ mcu_rev = ((mcu_rev & CS42L43_FW_MAJOR_REV_MASK) << 12) |
+ ((mcu_rev & CS42L43_FW_MINOR_REV_MASK) << 4) |
+ ((mcu_rev & CS42L43_FW_SUBMINOR_REV_MASK) >> 8);
+
+ /*
+ * The firmware has two revision numbers bringing either of them up to a
+ * supported version will provide the features the driver requires.
+ */
+ patched = mcu_rev >= CS42L43_MCU_SUPPORTED_REV ||
+ bios_rev >= CS42L43_MCU_SUPPORTED_BIOS_REV;
+ /*
+ * Later versions of the firmwware require the driver to access some
+ * features through a set of shadow registers.
+ */
+ shadow = mcu_rev >= CS42L43_MCU_SHADOW_REGS_REQUIRED_REV;
+
+ ret = regmap_read(cs42l43->regmap, CS42L43_BOOT_CONTROL, &secure_cfg);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to read security settings: %d\n", ret);
+ return ret;
+ }
+
+ cs42l43->hw_lock = secure_cfg & CS42L43_LOCK_HW_STS_MASK;
+
+ if (!patched && cs42l43->hw_lock) {
+ dev_err(cs42l43->dev, "Unpatched secure device\n");
+ return -EPERM;
+ }
+
+ dev_dbg(cs42l43->dev, "Firmware(0x%x, 0x%x) in boot stage %d\n",
+ mcu_rev, bios_rev, boot_status);
+
+ switch (boot_status) {
+ case CS42L43_MCU_BOOT_STAGE2:
+ if (!patched) {
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+ "cs42l43.bin", cs42l43->dev,
+ GFP_KERNEL, cs42l43,
+ cs42l43_mcu_load_firmware);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to request firmware: %d\n", ret);
+ return ret;
+ }
+
+ wait_for_completion(&cs42l43->firmware_download);
+
+ if (cs42l43->firmware_error)
+ return cs42l43->firmware_error;
+
+ return -EAGAIN;
+ } else {
+ return cs42l43_mcu_stage_2_3(cs42l43, shadow);
+ }
+ case CS42L43_MCU_BOOT_STAGE3:
+ if (patched)
+ return cs42l43_mcu_disable(cs42l43);
+ else
+ return cs42l43_mcu_stage_3_2(cs42l43);
+ case CS42L43_MCU_BOOT_STAGE4:
+ return 0;
+ default:
+ dev_err(cs42l43->dev, "Invalid boot status: %d\n", boot_status);
+ return -EINVAL;
+ }
+}
+
+/*
+ * Update the firmware running on the device.
+ */
+static int cs42l43_mcu_update(struct cs42l43 *cs42l43)
+{
+ int i, ret;
+
+ for (i = 0; i < CS42L43_MCU_UPDATE_RETRIES; i++) {
+ ret = cs42l43_mcu_update_step(cs42l43);
+ if (ret != -EAGAIN)
+ return ret;
+
+ ret = cs42l43_wait_for_attach(cs42l43);
+ if (ret)
+ return ret;
+ }
+
+ dev_err(cs42l43->dev, "Failed retrying update\n");
+ return -ETIMEDOUT;
+}
+
+static int cs42l43_irq_config(struct cs42l43 *cs42l43)
+{
+ struct irq_data *irq_data;
+ unsigned long irq_flags;
+ int ret;
+
+ if (cs42l43->sdw)
+ cs42l43->irq = cs42l43->sdw->irq;
+
+ cs42l43->irq_chip = cs42l43_irq_chip;
+ cs42l43->irq_chip.irq_drv_data = cs42l43;
+
+ irq_data = irq_get_irq_data(cs42l43->irq);
+ if (!irq_data) {
+ dev_err(cs42l43->dev, "Invalid IRQ: %d\n", cs42l43->irq);
+ return -EINVAL;
+ }
+
+ irq_flags = irqd_get_trigger_type(irq_data);
+ switch (irq_flags) {
+ case IRQF_TRIGGER_LOW:
+ case IRQF_TRIGGER_HIGH:
+ case IRQF_TRIGGER_RISING:
+ case IRQF_TRIGGER_FALLING:
+ break;
+ case IRQ_TYPE_NONE:
+ default:
+ irq_flags = IRQF_TRIGGER_LOW;
+ break;
+ }
+
+ irq_flags |= IRQF_ONESHOT;
+
+ ret = devm_regmap_add_irq_chip(cs42l43->dev, cs42l43->regmap,
+ cs42l43->irq, irq_flags, 0,
+ &cs42l43->irq_chip, &cs42l43->irq_data);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to add IRQ chip: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(cs42l43->dev, "Configured IRQ %d with flags 0x%lx\n",
+ cs42l43->irq, irq_flags);
+
+ return 0;
+}
+
+static void cs42l43_boot_work(struct work_struct *work)
+{
+ struct cs42l43 *cs42l43 = container_of(work, struct cs42l43, boot_work);
+ unsigned int devid, revid, otp;
+ int ret;
+
+ ret = cs42l43_wait_for_attach(cs42l43);
+ if (ret)
+ goto err;
+
+ ret = regmap_read(cs42l43->regmap, CS42L43_DEVID, &devid);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to read devid: %d\n", ret);
+ goto err;
+ }
+
+ switch (devid) {
+ case CS42L43_DEVID_VAL:
+ break;
+ default:
+ dev_err(cs42l43->dev, "Unrecognised devid: 0x%06x\n", devid);
+ goto err;
+ }
+
+ ret = regmap_read(cs42l43->regmap, CS42L43_REVID, &revid);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to read rev: %d\n", ret);
+ goto err;
+ }
+
+ ret = regmap_read(cs42l43->regmap, CS42L43_OTP_REVISION_ID, &otp);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to read otp rev: %d\n", ret);
+ goto err;
+ }
+
+ dev_info(cs42l43->dev,
+ "devid: 0x%06x, rev: 0x%02x, otp: 0x%02x\n", devid, revid, otp);
+
+ ret = cs42l43_mcu_update(cs42l43);
+ if (ret)
+ goto err;
+
+ ret = regmap_register_patch(cs42l43->regmap, cs42l43_reva_patch,
+ ARRAY_SIZE(cs42l43_reva_patch));
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to apply register patch: %d\n", ret);
+ goto err;
+ }
+
+ ret = cs42l43_irq_config(cs42l43);
+ if (ret)
+ goto err;
+
+ ret = devm_mfd_add_devices(cs42l43->dev, PLATFORM_DEVID_NONE,
+ cs42l43_devs, ARRAY_SIZE(cs42l43_devs),
+ NULL, 0, NULL);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to add subdevices: %d\n", ret);
+ goto err;
+ }
+
+ pm_runtime_mark_last_busy(cs42l43->dev);
+ pm_runtime_put_autosuspend(cs42l43->dev);
+
+ return;
+
+err:
+ pm_runtime_put_sync(cs42l43->dev);
+ cs42l43_dev_remove(cs42l43);
+}
+
+static int cs42l43_power_up(struct cs42l43 *cs42l43)
+{
+ int ret;
+
+ ret = regulator_enable(cs42l43->vdd_p);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to enable vdd-p: %d\n", ret);
+ return ret;
+ }
+
+ /* vdd-p must be on for 50uS before any other supply */
+ usleep_range(CS42L43_VDDP_DELAY, 2 * CS42L43_VDDP_DELAY);
+
+ gpiod_set_value_cansleep(cs42l43->reset, 1);
+
+ ret = regulator_bulk_enable(CS42L43_N_SUPPLIES, cs42l43->core_supplies);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to enable core supplies: %d\n", ret);
+ goto err_reset;
+ }
+
+ ret = regulator_enable(cs42l43->vdd_d);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to enable vdd-d: %d\n", ret);
+ goto err_core_supplies;
+ }
+
+ usleep_range(CS42L43_VDDD_DELAY, 2 * CS42L43_VDDD_DELAY);
+
+ return 0;
+
+err_core_supplies:
+ regulator_bulk_disable(CS42L43_N_SUPPLIES, cs42l43->core_supplies);
+err_reset:
+ gpiod_set_value_cansleep(cs42l43->reset, 0);
+ regulator_disable(cs42l43->vdd_p);
+
+ return ret;
+}
+
+static int cs42l43_power_down(struct cs42l43 *cs42l43)
+{
+ int ret;
+
+ ret = regulator_disable(cs42l43->vdd_d);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to disable vdd-d: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_disable(CS42L43_N_SUPPLIES, cs42l43->core_supplies);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to disable core supplies: %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(cs42l43->reset, 0);
+
+ ret = regulator_disable(cs42l43->vdd_p);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to disable vdd-p: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int cs42l43_dev_probe(struct cs42l43 *cs42l43)
+{
+ int i, ret;
+
+ dev_set_drvdata(cs42l43->dev, cs42l43);
+
+ mutex_init(&cs42l43->pll_lock);
+ init_completion(&cs42l43->device_attach);
+ init_completion(&cs42l43->device_detach);
+ init_completion(&cs42l43->firmware_download);
+ INIT_WORK(&cs42l43->boot_work, cs42l43_boot_work);
+
+ regcache_cache_only(cs42l43->regmap, true);
+
+ cs42l43->reset = devm_gpiod_get_optional(cs42l43->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(cs42l43->reset))
+ return dev_err_probe(cs42l43->dev, PTR_ERR(cs42l43->reset),
+ "Failed to get reset\n");
+
+ cs42l43->vdd_p = devm_regulator_get(cs42l43->dev, "vdd-p");
+ if (IS_ERR(cs42l43->vdd_p))
+ return dev_err_probe(cs42l43->dev, PTR_ERR(cs42l43->vdd_p),
+ "Failed to get vdd-p\n");
+
+ cs42l43->vdd_d = devm_regulator_get(cs42l43->dev, "vdd-d");
+ if (IS_ERR(cs42l43->vdd_d))
+ return dev_err_probe(cs42l43->dev, PTR_ERR(cs42l43->vdd_d),
+ "Failed to get vdd-d\n");
+
+ BUILD_BUG_ON(ARRAY_SIZE(cs42l43_core_supplies) != CS42L43_N_SUPPLIES);
+
+ for (i = 0; i < CS42L43_N_SUPPLIES; i++)
+ cs42l43->core_supplies[i].supply = cs42l43_core_supplies[i];
+
+ ret = devm_regulator_bulk_get(cs42l43->dev, CS42L43_N_SUPPLIES,
+ cs42l43->core_supplies);
+ if (ret)
+ return dev_err_probe(cs42l43->dev, ret,
+ "Failed to get core supplies\n");
+
+ ret = cs42l43_power_up(cs42l43);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_autosuspend_delay(cs42l43->dev, CS42L43_AUTOSUSPEND_TIME);
+ pm_runtime_use_autosuspend(cs42l43->dev);
+ pm_runtime_set_active(cs42l43->dev);
+ /*
+ * The device is already powered up, but keep it from suspending until
+ * the boot work runs.
+ */
+ pm_runtime_get_noresume(cs42l43->dev);
+ devm_pm_runtime_enable(cs42l43->dev);
+
+ queue_work(system_long_wq, &cs42l43->boot_work);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cs42l43_dev_probe, MFD_CS42L43);
+
+void cs42l43_dev_remove(struct cs42l43 *cs42l43)
+{
+ cs42l43_power_down(cs42l43);
+}
+EXPORT_SYMBOL_NS_GPL(cs42l43_dev_remove, MFD_CS42L43);
+
+static int cs42l43_suspend(struct device *dev)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * Don't care about being resumed here, but the driver does want
+ * force_resume to always trigger an actual resume, so that register
+ * state for the MCU/GPIOs is returned as soon as possible after system
+ * resume. force_resume will resume if the reference count is resumed on
+ * suspend hence the get_noresume.
+ */
+ pm_runtime_get_noresume(dev);
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to force suspend: %d\n", ret);
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ pm_runtime_put_noidle(dev);
+
+ ret = cs42l43_power_down(cs42l43);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cs42l43_resume(struct device *dev)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = cs42l43_power_up(cs42l43);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to force resume: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cs42l43_runtime_suspend(struct device *dev)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
+
+ /*
+ * Whilst the driver doesn't power the chip down here, going into runtime
+ * suspend lets the SoundWire bus power down, which means the driver
+ * can't communicate with the device any more.
+ */
+ regcache_cache_only(cs42l43->regmap, true);
+
+ return 0;
+}
+
+static int cs42l43_runtime_resume(struct device *dev)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(dev);
+ unsigned int reset_canary;
+ int ret;
+
+ ret = cs42l43_wait_for_attach(cs42l43);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(cs42l43->regmap, CS42L43_RELID, &reset_canary);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to check reset canary: %d\n", ret);
+ goto err;
+ }
+
+ if (!reset_canary) {
+ /*
+ * If the canary has cleared the chip has reset, re-handle the
+ * MCU and mark the cache as dirty to indicate the chip reset.
+ */
+ ret = cs42l43_mcu_update(cs42l43);
+ if (ret)
+ goto err;
+
+ regcache_mark_dirty(cs42l43->regmap);
+ }
+
+ ret = regcache_sync(cs42l43->regmap);
+ if (ret) {
+ dev_err(cs42l43->dev, "Failed to restore register cache: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ regcache_cache_only(cs42l43->regmap, true);
+
+ return ret;
+}
+
+EXPORT_NS_GPL_DEV_PM_OPS(cs42l43_pm_ops, MFD_CS42L43) = {
+ SET_SYSTEM_SLEEP_PM_OPS(cs42l43_suspend, cs42l43_resume)
+ SET_RUNTIME_PM_OPS(cs42l43_runtime_suspend, cs42l43_runtime_resume, NULL)
+};
+
+MODULE_DESCRIPTION("CS42L43 Core Driver");
+MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("cs42l43.bin");
diff --git a/drivers/mfd/cs42l43.h b/drivers/mfd/cs42l43.h
new file mode 100644
index 000000000000..eb4caf393833
--- /dev/null
+++ b/drivers/mfd/cs42l43.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CS42L43 core driver internal data
+ *
+ * Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/mfd/cs42l43.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+#ifndef CS42L43_CORE_INT_H
+#define CS42L43_CORE_INT_H
+
+#define CS42L43_N_DEFAULTS 176
+
+extern const struct dev_pm_ops cs42l43_pm_ops;
+extern const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS];
+
+bool cs42l43_readable_register(struct device *dev, unsigned int reg);
+bool cs42l43_precious_register(struct device *dev, unsigned int reg);
+bool cs42l43_volatile_register(struct device *dev, unsigned int reg);
+
+int cs42l43_dev_probe(struct cs42l43 *cs42l43);
+void cs42l43_dev_remove(struct cs42l43 *cs42l43);
+
+#endif /* CS42L43_CORE_INT_H */
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
index 6a21000aad4a..9bb7d7d8dcfc 100644
--- a/drivers/mfd/tps65086.c
+++ b/drivers/mfd/tps65086.c
@@ -81,16 +81,23 @@ static int tps65086_probe(struct i2c_client *client)
return PTR_ERR(tps->regmap);
}
- ret = regmap_read(tps->regmap, TPS65086_DEVICEID, &version);
+ /* Store device ID to load regulator configuration that fit to IC variant */
+ ret = regmap_read(tps->regmap, TPS65086_DEVICEID1, &tps->chip_id);
if (ret) {
- dev_err(tps->dev, "Failed to read revision register\n");
+ dev_err(tps->dev, "Failed to read revision register 1\n");
+ return ret;
+ }
+
+ ret = regmap_read(tps->regmap, TPS65086_DEVICEID2, &version);
+ if (ret) {
+ dev_err(tps->dev, "Failed to read revision register 2\n");
return ret;
}
dev_info(tps->dev, "Device: TPS65086%01lX, OTP: %c, Rev: %ld\n",
- (version & TPS65086_DEVICEID_PART_MASK),
- (char)((version & TPS65086_DEVICEID_OTP_MASK) >> 4) + 'A',
- (version & TPS65086_DEVICEID_REV_MASK) >> 6);
+ (version & TPS65086_DEVICEID2_PART_MASK),
+ (char)((version & TPS65086_DEVICEID2_OTP_MASK) >> 4) + 'A',
+ (version & TPS65086_DEVICEID2_REV_MASK) >> 6);
if (tps->irq > 0) {
ret = regmap_add_irq_chip(tps->regmap, tps->irq, IRQF_ONESHOT, 0,
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 35fec1bf1b3d..5867af9f592c 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -139,7 +139,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
if (ret) {
ret->i_ino = get_next_ino();
ret->i_mode = mode;
- ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
+ ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
}
return ret;
}
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index cbaf6d35e854..2101eb12bcba 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -1124,7 +1124,7 @@ static ssize_t ibmvmc_write(struct file *file, const char *buffer,
goto out;
inode = file_inode(file);
- inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 3c95600ab2f7..c66cc05a68c4 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -273,8 +273,8 @@ static void lkdtm_HUNG_TASK(void)
schedule();
}
-volatile unsigned int huge = INT_MAX - 2;
-volatile unsigned int ignored;
+static volatile unsigned int huge = INT_MAX - 2;
+static volatile unsigned int ignored;
static void lkdtm_OVERFLOW_SIGNED(void)
{
@@ -305,7 +305,7 @@ static void lkdtm_OVERFLOW_UNSIGNED(void)
ignored = value;
}
-/* Intentionally using old-style flex array definition of 1 byte. */
+/* Intentionally using unannotated flex array definition. */
struct array_bounds_flex_array {
int one;
int two;
@@ -357,6 +357,46 @@ static void lkdtm_ARRAY_BOUNDS(void)
pr_expected_config(CONFIG_UBSAN_BOUNDS);
}
+struct lkdtm_annotated {
+ unsigned long flags;
+ int count;
+ int array[] __counted_by(count);
+};
+
+static volatile int fam_count = 4;
+
+static void lkdtm_FAM_BOUNDS(void)
+{
+ struct lkdtm_annotated *inst;
+
+ inst = kzalloc(struct_size(inst, array, fam_count + 1), GFP_KERNEL);
+ if (!inst) {
+ pr_err("FAIL: could not allocate test struct!\n");
+ return;
+ }
+
+ inst->count = fam_count;
+ pr_info("Array access within bounds ...\n");
+ inst->array[1] = fam_count;
+ ignored = inst->array[1];
+
+ pr_info("Array access beyond bounds ...\n");
+ inst->array[fam_count] = fam_count;
+ ignored = inst->array[fam_count];
+
+ kfree(inst);
+
+ pr_err("FAIL: survived access of invalid flexible array member index!\n");
+
+ if (!__has_attribute(__counted_by__))
+ pr_warn("This is expected since this %s was built a compiler supporting __counted_by\n",
+ lkdtm_kernel_info);
+ else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
+ pr_expected_config(CONFIG_UBSAN_TRAP);
+ else
+ pr_expected_config(CONFIG_UBSAN_BOUNDS);
+}
+
static void lkdtm_CORRUPT_LIST_ADD(void)
{
/*
@@ -393,7 +433,7 @@ static void lkdtm_CORRUPT_LIST_ADD(void)
pr_err("Overwrite did not happen, but no BUG?!\n");
else {
pr_err("list_add() corruption not detected!\n");
- pr_expected_config(CONFIG_DEBUG_LIST);
+ pr_expected_config(CONFIG_LIST_HARDENED);
}
}
@@ -420,7 +460,7 @@ static void lkdtm_CORRUPT_LIST_DEL(void)
pr_err("Overwrite did not happen, but no BUG?!\n");
else {
pr_err("list_del() corruption not detected!\n");
- pr_expected_config(CONFIG_DEBUG_LIST);
+ pr_expected_config(CONFIG_LIST_HARDENED);
}
}
@@ -616,6 +656,7 @@ static struct crashtype crashtypes[] = {
CRASHTYPE(OVERFLOW_SIGNED),
CRASHTYPE(OVERFLOW_UNSIGNED),
CRASHTYPE(ARRAY_BOUNDS),
+ CRASHTYPE(FAM_BOUNDS),
CRASHTYPE(CORRUPT_LIST_ADD),
CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(STACK_GUARD_PAGE_LEADING),
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index b6f4be25b31b..b5b414a71e0b 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -918,17 +918,9 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
struct scatterlist sg;
- cmd.opcode = MMC_APP_CMD;
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
-
- err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ err = mmc_app_cmd(card->host, card);
if (err)
return err;
- if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
- return -EIO;
-
- memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
cmd.arg = 0;
@@ -3026,7 +3018,6 @@ static void mmc_blk_remove(struct mmc_card *card)
pm_runtime_disable(&card->dev);
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
- dev_set_drvdata(&card->dev, NULL);
destroy_workqueue(card->complete_wq);
}
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 2c3074a605fc..0af96548e7da 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -310,6 +310,9 @@ int mmc_add_card(struct mmc_card *card)
dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca);
+ dev_set_removable(&card->dev,
+ mmc_card_is_removable(card->host) ?
+ DEVICE_REMOVABLE : DEVICE_FIXED);
switch (card->type) {
case MMC_TYPE_MMC:
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 09ffbc00908b..92d4194c7893 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -32,7 +32,6 @@ int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
u32 args, void *buf, unsigned len);
int mmc_send_csd(struct mmc_card *card, u32 *csd);
int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries);
-int mmc_send_status(struct mmc_card *card, u32 *status);
int mmc_send_cid(struct mmc_host *host, u32 *cid);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index f6dde9edd7a3..3b6d69cefb4e 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -90,14 +90,12 @@ static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
return mmc_pwrseq_register(&pwrseq->pwrseq);
}
-static int mmc_pwrseq_emmc_remove(struct platform_device *pdev)
+static void mmc_pwrseq_emmc_remove(struct platform_device *pdev)
{
struct mmc_pwrseq_emmc *pwrseq = platform_get_drvdata(pdev);
unregister_restart_handler(&pwrseq->reset_nb);
mmc_pwrseq_unregister(&pwrseq->pwrseq);
-
- return 0;
}
static const struct of_device_id mmc_pwrseq_emmc_of_match[] = {
@@ -109,7 +107,7 @@ MODULE_DEVICE_TABLE(of, mmc_pwrseq_emmc_of_match);
static struct platform_driver mmc_pwrseq_emmc_driver = {
.probe = mmc_pwrseq_emmc_probe,
- .remove = mmc_pwrseq_emmc_remove,
+ .remove_new = mmc_pwrseq_emmc_remove,
.driver = {
.name = "pwrseq_emmc",
.of_match_table = mmc_pwrseq_emmc_of_match,
diff --git a/drivers/mmc/core/pwrseq_sd8787.c b/drivers/mmc/core/pwrseq_sd8787.c
index 0c5f5e371e1f..0c5808fc3206 100644
--- a/drivers/mmc/core/pwrseq_sd8787.c
+++ b/drivers/mmc/core/pwrseq_sd8787.c
@@ -113,18 +113,16 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
return mmc_pwrseq_register(&pwrseq->pwrseq);
}
-static int mmc_pwrseq_sd8787_remove(struct platform_device *pdev)
+static void mmc_pwrseq_sd8787_remove(struct platform_device *pdev)
{
struct mmc_pwrseq_sd8787 *pwrseq = platform_get_drvdata(pdev);
mmc_pwrseq_unregister(&pwrseq->pwrseq);
-
- return 0;
}
static struct platform_driver mmc_pwrseq_sd8787_driver = {
.probe = mmc_pwrseq_sd8787_probe,
- .remove = mmc_pwrseq_sd8787_remove,
+ .remove_new = mmc_pwrseq_sd8787_remove,
.driver = {
.name = "pwrseq_sd8787",
.of_match_table = mmc_pwrseq_sd8787_of_match,
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index 3bac1e71411b..df9588503ad0 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -142,18 +142,16 @@ static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
return mmc_pwrseq_register(&pwrseq->pwrseq);
}
-static int mmc_pwrseq_simple_remove(struct platform_device *pdev)
+static void mmc_pwrseq_simple_remove(struct platform_device *pdev)
{
struct mmc_pwrseq_simple *pwrseq = platform_get_drvdata(pdev);
mmc_pwrseq_unregister(&pwrseq->pwrseq);
-
- return 0;
}
static struct platform_driver mmc_pwrseq_simple_driver = {
.probe = mmc_pwrseq_simple_probe,
- .remove = mmc_pwrseq_simple_remove,
+ .remove_new = mmc_pwrseq_simple_remove,
.driver = {
.name = "pwrseq_simple",
.of_match_table = mmc_pwrseq_simple_of_match,
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 246ce027ae0a..c3e554344c99 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1518,6 +1518,13 @@ retry:
*/
mmc_set_clock(host, mmc_sd_get_max_clock(card));
+ if (host->ios.timing == MMC_TIMING_SD_HS &&
+ host->ops->prepare_sd_hs_tuning) {
+ err = host->ops->prepare_sd_hs_tuning(host, card);
+ if (err)
+ goto free_card;
+ }
+
/*
* Switch to wider bus (if supported).
*/
@@ -1529,6 +1536,13 @@ retry:
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
+
+ if (host->ios.timing == MMC_TIMING_SD_HS &&
+ host->ops->execute_sd_hs_tuning) {
+ err = host->ops->execute_sd_hs_tuning(host, card);
+ if (err)
+ goto free_card;
+ }
}
cont:
if (!oldcard) {
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index ef8d1dce5af1..a59cd592f06e 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -323,6 +323,7 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,
return mmc_send_adtc_data(card, card->host, SD_SWITCH, cmd_args, resp,
64);
}
+EXPORT_SYMBOL_GPL(mmc_sd_switch);
int mmc_app_sd_status(struct mmc_card *card, void *ssr)
{
diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h
index 3ba7b3cf4652..7667fc223b74 100644
--- a/drivers/mmc/core/sd_ops.h
+++ b/drivers/mmc/core/sd_ops.h
@@ -19,8 +19,6 @@ int mmc_send_if_cond(struct mmc_host *host, u32 ocr);
int mmc_send_if_cond_pcie(struct mmc_host *host, u32 ocr);
int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca);
int mmc_app_send_scr(struct mmc_card *card);
-int mmc_sd_switch(struct mmc_card *card, int mode, int group,
- u8 value, u8 *resp);
int mmc_app_sd_status(struct mmc_card *card, void *ssr);
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card);
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index d01df01d4b4d..42aa43740ba8 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -1125,7 +1125,7 @@ free_host:
return ret;
}
-static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
+static void alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
{
struct alcor_sdmmc_host *host = dev_get_drvdata(&pdev->dev);
struct mmc_host *mmc = mmc_from_priv(host);
@@ -1136,8 +1136,6 @@ static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
alcor_hw_uninit(host);
mmc_remove_host(mmc);
mmc_free_host(mmc);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1177,7 +1175,7 @@ MODULE_DEVICE_TABLE(platform, alcor_pci_sdmmc_ids);
static struct platform_driver alcor_pci_sdmmc_driver = {
.probe = alcor_pci_sdmmc_drv_probe,
- .remove = alcor_pci_sdmmc_drv_remove,
+ .remove_new = alcor_pci_sdmmc_drv_remove,
.id_table = alcor_pci_sdmmc_ids,
.driver = {
.name = DRV_NAME_ALCOR_PCI_SDMMC,
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index dd18440a90c5..535783c43105 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -11,15 +11,14 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/irq.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
@@ -30,7 +29,6 @@
#include <linux/mmc/host.h>
#include <linux/mmc/sdio.h>
-#include <linux/atmel-mci.h>
#include <linux/atmel_pdc.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -40,6 +38,8 @@
#include <asm/io.h>
#include <asm/unaligned.h>
+#define ATMCI_MAX_NR_SLOTS 2
+
/*
* Superset of MCI IP registers integrated in Atmel AT91 Processor
* Registers and bitfields marked with [2] are only available in MCI2
@@ -201,6 +201,40 @@ enum atmci_pdc_buf {
PDC_SECOND_BUF,
};
+/**
+ * struct mci_slot_pdata - board-specific per-slot configuration
+ * @bus_width: Number of data lines wired up the slot
+ * @detect_pin: GPIO pin wired to the card detect switch
+ * @wp_pin: GPIO pin wired to the write protect sensor
+ * @non_removable: The slot is not removable, only detect once
+ *
+ * If a given slot is not present on the board, @bus_width should be
+ * set to 0. The other fields are ignored in this case.
+ *
+ * Any pins that aren't available should be set to a negative value.
+ *
+ * Note that support for multiple slots is experimental -- some cards
+ * might get upset if we don't get the clock management exactly right.
+ * But in most cases, it should work just fine.
+ */
+struct mci_slot_pdata {
+ unsigned int bus_width;
+ struct gpio_desc *detect_pin;
+ struct gpio_desc *wp_pin;
+ bool non_removable;
+};
+
+/**
+ * struct mci_platform_data - board-specific MMC/SDcard configuration
+ * @dma_slave: DMA slave interface to use in data transfers.
+ * @slot: Per-slot configuration data.
+ */
+struct mci_platform_data {
+ void *dma_slave;
+ dma_filter_fn dma_filter;
+ struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS];
+};
+
struct atmel_mci_caps {
bool has_dma_conf_reg;
bool has_pdc;
@@ -369,7 +403,6 @@ struct atmel_mci {
* available.
* @wp_pin: GPIO pin used for card write protect sending, or negative
* if not available.
- * @detect_is_active_high: The state of the detect pin when it is active.
* @detect_timer: Timer used for debouncing @detect_pin interrupts.
*/
struct atmel_mci_slot {
@@ -388,9 +421,8 @@ struct atmel_mci_slot {
#define ATMCI_CARD_NEED_INIT 1
#define ATMCI_SHUTDOWN 2
- int detect_pin;
- int wp_pin;
- bool detect_is_active_high;
+ struct gpio_desc *detect_pin;
+ struct gpio_desc *wp_pin;
struct timer_list detect_timer;
};
@@ -608,6 +640,7 @@ atmci_of_init(struct platform_device *pdev)
struct device_node *cnp;
struct mci_platform_data *pdata;
u32 slot_id;
+ int err;
if (!np) {
dev_err(&pdev->dev, "device node not found\n");
@@ -637,16 +670,27 @@ atmci_of_init(struct platform_device *pdev)
pdata->slot[slot_id].bus_width = 1;
pdata->slot[slot_id].detect_pin =
- of_get_named_gpio(cnp, "cd-gpios", 0);
-
- pdata->slot[slot_id].detect_is_active_high =
- of_property_read_bool(cnp, "cd-inverted");
+ devm_fwnode_gpiod_get(&pdev->dev, of_fwnode_handle(cnp),
+ "cd", GPIOD_IN, "cd-gpios");
+ err = PTR_ERR_OR_ZERO(pdata->slot[slot_id].detect_pin);
+ if (err) {
+ if (err != -ENOENT)
+ return ERR_PTR(err);
+ pdata->slot[slot_id].detect_pin = NULL;
+ }
pdata->slot[slot_id].non_removable =
of_property_read_bool(cnp, "non-removable");
pdata->slot[slot_id].wp_pin =
- of_get_named_gpio(cnp, "wp-gpios", 0);
+ devm_fwnode_gpiod_get(&pdev->dev, of_fwnode_handle(cnp),
+ "wp", GPIOD_IN, "wp-gpios");
+ err = PTR_ERR_OR_ZERO(pdata->slot[slot_id].wp_pin);
+ if (err) {
+ if (err != -ENOENT)
+ return ERR_PTR(err);
+ pdata->slot[slot_id].wp_pin = NULL;
+ }
}
return pdata;
@@ -1509,8 +1553,8 @@ static int atmci_get_ro(struct mmc_host *mmc)
int read_only = -ENOSYS;
struct atmel_mci_slot *slot = mmc_priv(mmc);
- if (gpio_is_valid(slot->wp_pin)) {
- read_only = gpio_get_value(slot->wp_pin);
+ if (slot->wp_pin) {
+ read_only = gpiod_get_value(slot->wp_pin);
dev_dbg(&mmc->class_dev, "card is %s\n",
read_only ? "read-only" : "read-write");
}
@@ -1523,9 +1567,8 @@ static int atmci_get_cd(struct mmc_host *mmc)
int present = -ENOSYS;
struct atmel_mci_slot *slot = mmc_priv(mmc);
- if (gpio_is_valid(slot->detect_pin)) {
- present = !(gpio_get_value(slot->detect_pin) ^
- slot->detect_is_active_high);
+ if (slot->detect_pin) {
+ present = gpiod_get_value_cansleep(slot->detect_pin);
dev_dbg(&mmc->class_dev, "card is %spresent\n",
present ? "" : "not ");
}
@@ -1637,9 +1680,8 @@ static void atmci_detect_change(struct timer_list *t)
if (test_bit(ATMCI_SHUTDOWN, &slot->flags))
return;
- enable_irq(gpio_to_irq(slot->detect_pin));
- present = !(gpio_get_value(slot->detect_pin) ^
- slot->detect_is_active_high);
+ enable_irq(gpiod_to_irq(slot->detect_pin));
+ present = gpiod_get_value_cansleep(slot->detect_pin);
present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags);
dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n",
@@ -2230,16 +2272,15 @@ static int atmci_init_slot(struct atmel_mci *host,
slot->host = host;
slot->detect_pin = slot_data->detect_pin;
slot->wp_pin = slot_data->wp_pin;
- slot->detect_is_active_high = slot_data->detect_is_active_high;
slot->sdc_reg = sdc_reg;
slot->sdio_irq = sdio_irq;
dev_dbg(&mmc->class_dev,
"slot[%u]: bus_width=%u, detect_pin=%d, "
"detect_is_active_high=%s, wp_pin=%d\n",
- id, slot_data->bus_width, slot_data->detect_pin,
- slot_data->detect_is_active_high ? "true" : "false",
- slot_data->wp_pin);
+ id, slot_data->bus_width, desc_to_gpio(slot_data->detect_pin),
+ !gpiod_is_active_low(slot_data->detect_pin) ? "true" : "false",
+ desc_to_gpio(slot_data->wp_pin));
mmc->ops = &atmci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
@@ -2275,31 +2316,22 @@ static int atmci_init_slot(struct atmel_mci *host,
/* Assume card is present initially */
set_bit(ATMCI_CARD_PRESENT, &slot->flags);
- if (gpio_is_valid(slot->detect_pin)) {
- if (devm_gpio_request(&host->pdev->dev, slot->detect_pin,
- "mmc_detect")) {
- dev_dbg(&mmc->class_dev, "no detect pin available\n");
- slot->detect_pin = -EBUSY;
- } else if (gpio_get_value(slot->detect_pin) ^
- slot->detect_is_active_high) {
+ if (slot->detect_pin) {
+ if (!gpiod_get_value_cansleep(slot->detect_pin))
clear_bit(ATMCI_CARD_PRESENT, &slot->flags);
- }
+ } else {
+ dev_dbg(&mmc->class_dev, "no detect pin available\n");
}
- if (!gpio_is_valid(slot->detect_pin)) {
+ if (!slot->detect_pin) {
if (slot_data->non_removable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
else
mmc->caps |= MMC_CAP_NEEDS_POLL;
}
- if (gpio_is_valid(slot->wp_pin)) {
- if (devm_gpio_request(&host->pdev->dev, slot->wp_pin,
- "mmc_wp")) {
- dev_dbg(&mmc->class_dev, "no WP pin available\n");
- slot->wp_pin = -EBUSY;
- }
- }
+ if (!slot->wp_pin)
+ dev_dbg(&mmc->class_dev, "no WP pin available\n");
host->slot[id] = slot;
mmc_regulator_get_supply(mmc);
@@ -2309,18 +2341,18 @@ static int atmci_init_slot(struct atmel_mci *host,
return ret;
}
- if (gpio_is_valid(slot->detect_pin)) {
+ if (slot->detect_pin) {
timer_setup(&slot->detect_timer, atmci_detect_change, 0);
- ret = request_irq(gpio_to_irq(slot->detect_pin),
- atmci_detect_interrupt,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "mmc-detect", slot);
+ ret = request_irq(gpiod_to_irq(slot->detect_pin),
+ atmci_detect_interrupt,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "mmc-detect", slot);
if (ret) {
dev_dbg(&mmc->class_dev,
"could not request IRQ %d for detect pin\n",
- gpio_to_irq(slot->detect_pin));
- slot->detect_pin = -EBUSY;
+ gpiod_to_irq(slot->detect_pin));
+ slot->detect_pin = NULL;
}
}
@@ -2339,10 +2371,8 @@ static void atmci_cleanup_slot(struct atmel_mci_slot *slot,
mmc_remove_host(slot->mmc);
- if (gpio_is_valid(slot->detect_pin)) {
- int pin = slot->detect_pin;
-
- free_irq(gpio_to_irq(pin), slot);
+ if (slot->detect_pin) {
+ free_irq(gpiod_to_irq(slot->detect_pin), slot);
del_timer_sync(&slot->detect_timer);
}
@@ -2600,7 +2630,7 @@ err_dma_probe_defer:
return ret;
}
-static int atmci_remove(struct platform_device *pdev)
+static void atmci_remove(struct platform_device *pdev)
{
struct atmel_mci *host = platform_get_drvdata(pdev);
unsigned int i;
@@ -2630,8 +2660,6 @@ static int atmci_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -2664,7 +2692,7 @@ static const struct dev_pm_ops atmci_dev_pm_ops = {
static struct platform_driver atmci_driver = {
.probe = atmci_probe,
- .remove = atmci_remove,
+ .remove_new = atmci_remove,
.driver = {
.name = "atmel_mci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 82dd0ae40305..b5a5c6a2fe8b 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -1114,7 +1114,7 @@ out0:
return ret;
}
-static int au1xmmc_remove(struct platform_device *pdev)
+static void au1xmmc_remove(struct platform_device *pdev)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
@@ -1153,7 +1153,6 @@ static int au1xmmc_remove(struct platform_device *pdev)
mmc_free_host(host->mmc);
}
- return 0;
}
#ifdef CONFIG_PM
@@ -1185,7 +1184,7 @@ static int au1xmmc_resume(struct platform_device *pdev)
static struct platform_driver au1xmmc_driver = {
.probe = au1xmmc_probe,
- .remove = au1xmmc_remove,
+ .remove_new = au1xmmc_remove,
.suspend = au1xmmc_suspend,
.resume = au1xmmc_resume,
.driver = {
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index eea208856ce0..35d8fdea668b 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1431,7 +1431,7 @@ err:
return ret;
}
-static int bcm2835_remove(struct platform_device *pdev)
+static void bcm2835_remove(struct platform_device *pdev)
{
struct bcm2835_host *host = platform_get_drvdata(pdev);
struct mmc_host *mmc = mmc_from_priv(host);
@@ -1449,8 +1449,6 @@ static int bcm2835_remove(struct platform_device *pdev)
dma_release_channel(host->dma_chan_rxtx);
mmc_free_host(mmc);
-
- return 0;
}
static const struct of_device_id bcm2835_match[] = {
@@ -1461,7 +1459,7 @@ MODULE_DEVICE_TABLE(of, bcm2835_match);
static struct platform_driver bcm2835_driver = {
.probe = bcm2835_probe,
- .remove = bcm2835_remove,
+ .remove_new = bcm2835_remove,
.driver = {
.name = "sdhost-bcm2835",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/cavium-octeon.c b/drivers/mmc/host/cavium-octeon.c
index 12dca91a8ef6..060ec4f4800f 100644
--- a/drivers/mmc/host/cavium-octeon.c
+++ b/drivers/mmc/host/cavium-octeon.c
@@ -13,7 +13,9 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/octeon/octeon.h>
#include "cavium.h"
@@ -294,7 +296,7 @@ error:
return ret;
}
-static int octeon_mmc_remove(struct platform_device *pdev)
+static void octeon_mmc_remove(struct platform_device *pdev)
{
struct cvm_mmc_host *host = platform_get_drvdata(pdev);
u64 dma_cfg;
@@ -309,7 +311,6 @@ static int octeon_mmc_remove(struct platform_device *pdev)
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
octeon_mmc_set_shared_power(host, 0);
- return 0;
}
static const struct of_device_id octeon_mmc_match[] = {
@@ -325,7 +326,7 @@ MODULE_DEVICE_TABLE(of, octeon_mmc_match);
static struct platform_driver octeon_mmc_driver = {
.probe = octeon_mmc_probe,
- .remove = octeon_mmc_remove,
+ .remove_new = octeon_mmc_remove,
.driver = {
.name = KBUILD_MODNAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index 202b1d6da678..2e2ff984f0b3 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pci.h>
#include "cavium.h"
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index 6d623b2681c3..0aec33b88bef 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -745,7 +745,7 @@ err_free_mmc:
return err;
}
-static int cb710_mmc_exit(struct platform_device *pdev)
+static void cb710_mmc_exit(struct platform_device *pdev)
{
struct cb710_slot *slot = cb710_pdev_to_slot(pdev);
struct mmc_host *mmc = cb710_slot_to_mmc(slot);
@@ -766,13 +766,12 @@ static int cb710_mmc_exit(struct platform_device *pdev)
tasklet_kill(&reader->finish_req_tasklet);
mmc_free_host(mmc);
- return 0;
}
static struct platform_driver cb710_mmc_driver = {
.driver.name = "cb710-mmc",
.probe = cb710_mmc_init,
- .remove = cb710_mmc_exit,
+ .remove_new = cb710_mmc_exit,
#ifdef CONFIG_PM
.suspend = cb710_mmc_suspend,
.resume = cb710_mmc_resume,
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 7138dfa065bf..ee3b1a4e0848 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -21,7 +21,6 @@
#include <linux/dma-mapping.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/interrupt.h>
@@ -1257,7 +1256,7 @@ static int davinci_mmcsd_probe(struct platform_device *pdev)
host->use_dma = use_dma;
host->mmc_irq = irq;
- host->sdio_irq = platform_get_irq(pdev, 1);
+ host->sdio_irq = platform_get_irq_optional(pdev, 1);
if (host->use_dma) {
ret = davinci_acquire_dma_channels(host);
@@ -1345,7 +1344,7 @@ ioremap_fail:
return ret;
}
-static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
+static void __exit davinci_mmcsd_remove(struct platform_device *pdev)
{
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
@@ -1354,8 +1353,6 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
davinci_release_dma_channels(host);
clk_disable_unprepare(host->clk);
mmc_free_host(host->mmc);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1402,7 +1399,7 @@ static struct platform_driver davinci_mmcsd_driver = {
.of_match_table = davinci_mmc_dt_ids,
},
.probe = davinci_mmcsd_probe,
- .remove = __exit_p(davinci_mmcsd_remove),
+ .remove_new = __exit_p(davinci_mmcsd_remove),
.id_table = davinci_mmc_devtype,
};
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 9f20ac524c8b..698408e8bad0 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -664,15 +664,13 @@ static int dw_mci_exynos_probe(struct platform_device *pdev)
return 0;
}
-static int dw_mci_exynos_remove(struct platform_device *pdev)
+static void dw_mci_exynos_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
dw_mci_pltfm_remove(pdev);
-
- return 0;
}
static const struct dev_pm_ops dw_mci_exynos_pmops = {
@@ -685,7 +683,7 @@ static const struct dev_pm_ops dw_mci_exynos_pmops = {
static struct platform_driver dw_mci_exynos_pltfm_driver = {
.probe = dw_mci_exynos_probe,
- .remove = dw_mci_exynos_remove,
+ .remove_new = dw_mci_exynos_remove,
.driver = {
.name = "dwmmc_exynos",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c
index 6f22fe054087..e9470c50a348 100644
--- a/drivers/mmc/host/dw_mmc-hi3798cv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c
@@ -171,7 +171,7 @@ static int dw_mci_hi3798cv200_probe(struct platform_device *pdev)
return dw_mci_pltfm_register(pdev, &hi3798cv200_data);
}
-static int dw_mci_hi3798cv200_remove(struct platform_device *pdev)
+static void dw_mci_hi3798cv200_remove(struct platform_device *pdev)
{
struct dw_mci *host = platform_get_drvdata(pdev);
struct hi3798cv200_priv *priv = host->priv;
@@ -180,8 +180,6 @@ static int dw_mci_hi3798cv200_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->sample_clk);
dw_mci_pltfm_remove(pdev);
-
- return 0;
}
static const struct of_device_id dw_mci_hi3798cv200_match[] = {
@@ -192,7 +190,7 @@ static const struct of_device_id dw_mci_hi3798cv200_match[] = {
MODULE_DEVICE_TABLE(of, dw_mci_hi3798cv200_match);
static struct platform_driver dw_mci_hi3798cv200_driver = {
.probe = dw_mci_hi3798cv200_probe,
- .remove = dw_mci_hi3798cv200_remove,
+ .remove_new = dw_mci_hi3798cv200_remove,
.driver = {
.name = "dwmmc_hi3798cv200",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 2a99f15f527f..b07190ba4b7a 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -371,15 +371,13 @@ static int dw_mci_rockchip_probe(struct platform_device *pdev)
return 0;
}
-static int dw_mci_rockchip_remove(struct platform_device *pdev)
+static void dw_mci_rockchip_remove(struct platform_device *pdev)
{
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
dw_mci_pltfm_remove(pdev);
-
- return 0;
}
static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = {
@@ -392,7 +390,7 @@ static const struct dev_pm_ops dw_mci_rockchip_dev_pm_ops = {
static struct platform_driver dw_mci_rockchip_pltfm_driver = {
.probe = dw_mci_rockchip_probe,
- .remove = dw_mci_rockchip_remove,
+ .remove_new = dw_mci_rockchip_remove,
.driver = {
.name = "dwmmc_rockchip",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 1846a05210e3..f379ce5b582d 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1163,7 +1163,7 @@ err_free_host:
return ret;
}
-static int jz4740_mmc_remove(struct platform_device *pdev)
+static void jz4740_mmc_remove(struct platform_device *pdev)
{
struct jz4740_mmc_host *host = platform_get_drvdata(pdev);
@@ -1179,8 +1179,6 @@ static int jz4740_mmc_remove(struct platform_device *pdev)
jz4740_mmc_release_dma_channels(host);
mmc_free_host(host->mmc);
-
- return 0;
}
static int jz4740_mmc_suspend(struct device *dev)
@@ -1198,7 +1196,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
static struct platform_driver jz4740_mmc_driver = {
.probe = jz4740_mmc_probe,
- .remove = jz4740_mmc_remove,
+ .remove_new = jz4740_mmc_remove,
.driver = {
.name = "jz4740-mmc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/litex_mmc.c b/drivers/mmc/host/litex_mmc.c
index 9af6b0902efe..4ec8072dc60b 100644
--- a/drivers/mmc/host/litex_mmc.c
+++ b/drivers/mmc/host/litex_mmc.c
@@ -629,12 +629,11 @@ static int litex_mmc_probe(struct platform_device *pdev)
return 0;
}
-static int litex_mmc_remove(struct platform_device *pdev)
+static void litex_mmc_remove(struct platform_device *pdev)
{
struct litex_mmc_host *host = platform_get_drvdata(pdev);
mmc_remove_host(host->mmc);
- return 0;
}
static const struct of_device_id litex_match[] = {
@@ -645,7 +644,7 @@ MODULE_DEVICE_TABLE(of, litex_match);
static struct platform_driver litex_mmc_driver = {
.probe = litex_mmc_probe,
- .remove = litex_mmc_remove,
+ .remove_new = litex_mmc_remove,
.driver = {
.name = "litex-mmc",
.of_match_table = litex_match,
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index ee9a25b900ae..9837dab096e6 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -11,7 +11,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/iopoll.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/dma-mapping.h>
@@ -948,9 +948,6 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
return IRQ_NONE;
}
- if (WARN_ON(!host))
- return IRQ_NONE;
-
/* ack all raised interrupts */
writel(status, host->regs + SD_EMMC_STATUS);
@@ -1297,7 +1294,7 @@ err_init_clk:
return ret;
}
-static int meson_mmc_remove(struct platform_device *pdev)
+static void meson_mmc_remove(struct platform_device *pdev)
{
struct meson_host *host = dev_get_drvdata(&pdev->dev);
@@ -1308,8 +1305,6 @@ static int meson_mmc_remove(struct platform_device *pdev)
free_irq(host->irq, host);
clk_disable_unprepare(host->mmc_clk);
-
- return 0;
}
static const struct meson_mmc_data meson_gx_data = {
@@ -1340,7 +1335,7 @@ MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
static struct platform_driver meson_mmc_driver = {
.probe = meson_mmc_probe,
- .remove = meson_mmc_remove,
+ .remove_new = meson_mmc_remove,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
index 97168cdfa8e9..528ec8166e7c 100644
--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -880,7 +880,7 @@ err_disable_pclk:
return ret;
}
-static int meson_mx_sdhc_remove(struct platform_device *pdev)
+static void meson_mx_sdhc_remove(struct platform_device *pdev)
{
struct meson_mx_sdhc_host *host = platform_get_drvdata(pdev);
@@ -889,8 +889,6 @@ static int meson_mx_sdhc_remove(struct platform_device *pdev)
meson_mx_sdhc_disable_clks(host->mmc);
clk_disable_unprepare(host->pclk);
-
- return 0;
}
static const struct meson_mx_sdhc_data meson_mx_sdhc_data_meson8 = {
@@ -925,7 +923,7 @@ MODULE_DEVICE_TABLE(of, meson_mx_sdhc_of_match);
static struct platform_driver meson_mx_sdhc_driver = {
.probe = meson_mx_sdhc_probe,
- .remove = meson_mx_sdhc_remove,
+ .remove_new = meson_mx_sdhc_remove,
.driver = {
.name = "meson-mx-sdhc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 3a19a05ef55a..a11577f2ee69 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -728,7 +728,7 @@ error_unregister_slot_pdev:
return ret;
}
-static int meson_mx_mmc_remove(struct platform_device *pdev)
+static void meson_mx_mmc_remove(struct platform_device *pdev)
{
struct meson_mx_mmc_host *host = platform_get_drvdata(pdev);
struct device *slot_dev = mmc_dev(host->mmc);
@@ -743,8 +743,6 @@ static int meson_mx_mmc_remove(struct platform_device *pdev)
clk_disable_unprepare(host->core_clk);
mmc_free_host(host->mmc);
-
- return 0;
}
static const struct of_device_id meson_mx_mmc_of_match[] = {
@@ -756,7 +754,7 @@ MODULE_DEVICE_TABLE(of, meson_mx_mmc_of_match);
static struct platform_driver meson_mx_mmc_driver = {
.probe = meson_mx_mmc_probe,
- .remove = meson_mx_mmc_remove,
+ .remove_new = meson_mx_mmc_remove,
.driver = {
.name = "meson-mx-sdio",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 769b34afa835..dda756a56379 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -764,7 +764,8 @@ static bool ux500_busy_complete(struct mmci_host *host, struct mmc_command *cmd,
}
retries--;
}
- dev_dbg(mmc_dev(host->mmc), "no busy signalling in time\n");
+ dev_dbg(mmc_dev(host->mmc),
+ "no busy signalling in time CMD%02x\n", cmd->opcode);
ux500_busy_clear_mask_done(host);
break;
@@ -786,7 +787,8 @@ static bool ux500_busy_complete(struct mmci_host *host, struct mmc_command *cmd,
host->busy_state = MMCI_BUSY_WAITING_FOR_END_IRQ;
} else {
dev_dbg(mmc_dev(host->mmc),
- "lost busy status when waiting for busy start IRQ\n");
+ "lost busy status when waiting for busy start IRQ CMD%02x\n",
+ cmd->opcode);
cancel_delayed_work(&host->ux500_busy_timeout_work);
ux500_busy_clear_mask_done(host);
}
@@ -800,13 +802,14 @@ static bool ux500_busy_complete(struct mmci_host *host, struct mmc_command *cmd,
ux500_busy_clear_mask_done(host);
} else {
dev_dbg(mmc_dev(host->mmc),
- "busy status still asserted when handling busy end IRQ - will keep waiting\n");
+ "busy status still asserted when handling busy end IRQ - will keep waiting CMD%02x\n",
+ cmd->opcode);
}
break;
default:
- dev_dbg(mmc_dev(host->mmc), "fell through on state %d\n",
- host->busy_state);
+ dev_dbg(mmc_dev(host->mmc), "fell through on state %d, CMD%02x\n",
+ host->busy_state, cmd->opcode);
break;
}
@@ -1533,6 +1536,20 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
}
}
+static char *ux500_state_str(struct mmci_host *host)
+{
+ switch (host->busy_state) {
+ case MMCI_BUSY_WAITING_FOR_START_IRQ:
+ return "waiting for start IRQ";
+ case MMCI_BUSY_WAITING_FOR_END_IRQ:
+ return "waiting for end IRQ";
+ case MMCI_BUSY_DONE:
+ return "not waiting for IRQs";
+ default:
+ return "unknown";
+ }
+}
+
/*
* This busy timeout worker is used to "kick" the command IRQ if a
* busy detect IRQ fails to appear in reasonable time. Only used on
@@ -1548,12 +1565,18 @@ static void ux500_busy_timeout_work(struct work_struct *work)
spin_lock_irqsave(&host->lock, flags);
if (host->cmd) {
- dev_dbg(mmc_dev(host->mmc), "timeout waiting for busy IRQ\n");
-
/* If we are still busy let's tag on a cmd-timeout error. */
status = readl(host->base + MMCISTATUS);
- if (status & host->variant->busy_detect_flag)
+ if (status & host->variant->busy_detect_flag) {
status |= MCI_CMDTIMEOUT;
+ dev_err(mmc_dev(host->mmc),
+ "timeout in state %s still busy with CMD%02x\n",
+ ux500_state_str(host), host->cmd->opcode);
+ } else {
+ dev_err(mmc_dev(host->mmc),
+ "timeout in state %s waiting for busy CMD%02x\n",
+ ux500_state_str(host), host->cmd->opcode);
+ }
mmci_cmd_irq(host, host->cmd, status);
}
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index d0d6ffcf78d4..5cfdd3a86e54 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -687,13 +687,11 @@ out_mmc:
return ret;
}
-static int moxart_remove(struct platform_device *pdev)
+static void moxart_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = dev_get_drvdata(&pdev->dev);
struct moxart_host *host = mmc_priv(mmc);
- dev_set_drvdata(&pdev->dev, NULL);
-
if (!IS_ERR_OR_NULL(host->dma_chan_tx))
dma_release_channel(host->dma_chan_tx);
if (!IS_ERR_OR_NULL(host->dma_chan_rx))
@@ -705,8 +703,6 @@ static int moxart_remove(struct platform_device *pdev)
writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
host->base + REG_CLOCK_CONTROL);
mmc_free_host(mmc);
-
- return 0;
}
static const struct of_device_id moxart_mmc_match[] = {
@@ -718,7 +714,7 @@ MODULE_DEVICE_TABLE(of, moxart_mmc_match);
static struct platform_driver moxart_mmc_driver = {
.probe = moxart_probe,
- .remove = moxart_remove,
+ .remove_new = moxart_remove,
.driver = {
.name = "mmc-moxart",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 02403ff99e0d..5392200cfdf7 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -12,9 +12,7 @@
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irq.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -2887,7 +2885,7 @@ host_free:
return ret;
}
-static int msdc_drv_remove(struct platform_device *pdev)
+static void msdc_drv_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct msdc_host *host;
@@ -2911,8 +2909,6 @@ static int msdc_drv_remove(struct platform_device *pdev)
host->dma.bd, host->dma.bd_addr);
mmc_free_host(mmc);
-
- return 0;
}
static void msdc_save_reg(struct msdc_host *host)
@@ -3054,7 +3050,7 @@ static const struct dev_pm_ops msdc_dev_pm_ops = {
static struct platform_driver mt_msdc_driver = {
.probe = msdc_drv_probe,
- .remove = msdc_drv_remove,
+ .remove_new = msdc_drv_remove,
.driver = {
.name = "mtk-msdc",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index b4f6a0a2fcb5..ca01b7d204ba 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -796,7 +796,7 @@ out:
return ret;
}
-static int mvsd_remove(struct platform_device *pdev)
+static void mvsd_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
@@ -809,8 +809,6 @@ static int mvsd_remove(struct platform_device *pdev)
if (!IS_ERR(host->clk))
clk_disable_unprepare(host->clk);
mmc_free_host(mmc);
-
- return 0;
}
static const struct of_device_id mvsdio_dt_ids[] = {
@@ -821,7 +819,7 @@ MODULE_DEVICE_TABLE(of, mvsdio_dt_ids);
static struct platform_driver mvsd_driver = {
.probe = mvsd_probe,
- .remove = mvsd_remove,
+ .remove_new = mvsd_remove,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 668f865f3efb..5b3ab0e20505 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,7 +31,6 @@
#include <linux/dmaengine.h>
#include <linux/types.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/mmc/slot-gpio.h>
@@ -989,7 +988,6 @@ static int mxcmci_probe(struct platform_device *pdev)
pr_info("i.MX/MPC512x SDHC driver\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -1000,7 +998,7 @@ static int mxcmci_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
- host->base = devm_ioremap_resource(&pdev->dev, res);
+ host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto out_free;
@@ -1164,7 +1162,7 @@ out_free:
return ret;
}
-static int mxcmci_remove(struct platform_device *pdev)
+static void mxcmci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct mxcmci_host *host = mmc_priv(mmc);
@@ -1181,8 +1179,6 @@ static int mxcmci_remove(struct platform_device *pdev)
clk_disable_unprepare(host->clk_ipg);
mmc_free_host(mmc);
-
- return 0;
}
static int mxcmci_suspend(struct device *dev)
@@ -1216,7 +1212,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
static struct platform_driver mxcmci_driver = {
.probe = mxcmci_probe,
- .remove = mxcmci_remove,
+ .remove_new = mxcmci_remove,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 8c3655d3be96..6751da9b60f9 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -674,7 +673,7 @@ out_mmc_free:
return ret;
}
-static int mxs_mmc_remove(struct platform_device *pdev)
+static void mxs_mmc_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct mxs_mmc_host *host = mmc_priv(mmc);
@@ -688,8 +687,6 @@ static int mxs_mmc_remove(struct platform_device *pdev)
clk_disable_unprepare(ssp->clk);
mmc_free_host(mmc);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -717,7 +714,7 @@ static SIMPLE_DEV_PM_OPS(mxs_mmc_pm_ops, mxs_mmc_suspend, mxs_mmc_resume);
static struct platform_driver mxs_mmc_driver = {
.probe = mxs_mmc_probe,
- .remove = mxs_mmc_remove,
+ .remove_new = mxs_mmc_remove,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 6a259563690d..9fb8995b43a1 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1506,7 +1506,7 @@ err_free_iclk:
return ret;
}
-static int mmc_omap_remove(struct platform_device *pdev)
+static void mmc_omap_remove(struct platform_device *pdev)
{
struct mmc_omap_host *host = platform_get_drvdata(pdev);
int i;
@@ -1532,8 +1532,6 @@ static int mmc_omap_remove(struct platform_device *pdev)
dma_release_channel(host->dma_rx);
destroy_workqueue(host->mmc_omap_wq);
-
- return 0;
}
#if IS_BUILTIN(CONFIG_OF)
@@ -1546,7 +1544,7 @@ MODULE_DEVICE_TABLE(of, mmc_omap_match);
static struct platform_driver mmc_omap_driver = {
.probe = mmc_omap_probe,
- .remove = mmc_omap_remove,
+ .remove_new = mmc_omap_remove,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 1e0f2d7774bd..e120aeb869b8 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1790,14 +1790,11 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
return -ENXIO;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1982,7 +1979,7 @@ err:
return ret;
}
-static int omap_hsmmc_remove(struct platform_device *pdev)
+static void omap_hsmmc_remove(struct platform_device *pdev)
{
struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
@@ -2000,8 +1997,6 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
clk_disable_unprepare(host->dbclk);
mmc_free_host(host->mmc);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -2126,7 +2121,7 @@ static const struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
static struct platform_driver omap_hsmmc_driver = {
.probe = omap_hsmmc_probe,
- .remove = omap_hsmmc_remove,
+ .remove_new = omap_hsmmc_remove,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
index 1bf22b08b373..fc08f25c34eb 100644
--- a/drivers/mmc/host/owl-mmc.c
+++ b/drivers/mmc/host/owl-mmc.c
@@ -16,8 +16,9 @@
#include <linux/interrupt.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
@@ -667,7 +668,7 @@ err_free_host:
return ret;
}
-static int owl_mmc_remove(struct platform_device *pdev)
+static void owl_mmc_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct owl_mmc_host *owl_host = mmc_priv(mmc);
@@ -676,8 +677,6 @@ static int owl_mmc_remove(struct platform_device *pdev)
disable_irq(owl_host->irq);
dma_release_channel(owl_host->dma);
mmc_free_host(mmc);
-
- return 0;
}
static const struct of_device_id owl_mmc_of_match[] = {
@@ -693,7 +692,7 @@ static struct platform_driver owl_mmc_driver = {
.of_match_table = owl_mmc_of_match,
},
.probe = owl_mmc_probe,
- .remove = owl_mmc_remove,
+ .remove_new = owl_mmc_remove,
};
module_platform_driver(owl_mmc_driver);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 2a988f942b6c..fae3192c3a14 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -30,7 +30,6 @@
#include <linux/gpio/consumer.h>
#include <linux/gfp.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/soc/pxa/cpu.h>
#include <linux/sizes.h>
@@ -612,7 +611,6 @@ static int pxamci_probe(struct platform_device *pdev)
struct resource *r;
int ret, irq;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -685,14 +683,14 @@ static int pxamci_probe(struct platform_device *pdev)
}
spin_lock_init(&host->lock);
- host->res = r;
host->imask = MMC_I_MASK_ALL;
- host->base = devm_ioremap_resource(dev, r);
+ host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto out;
}
+ host->res = r;
/*
* Ensure that the host controller is shut down, and setup
@@ -784,7 +782,7 @@ out:
return ret;
}
-static int pxamci_remove(struct platform_device *pdev)
+static void pxamci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
@@ -808,13 +806,11 @@ static int pxamci_remove(struct platform_device *pdev)
mmc_free_host(mmc);
}
-
- return 0;
}
static struct platform_driver pxamci_driver = {
.probe = pxamci_probe,
- .remove = pxamci_remove,
+ .remove_new = pxamci_remove,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index 68da3da9e2e5..c1fb9740eab0 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -101,5 +101,5 @@ int renesas_sdhi_probe(struct platform_device *pdev,
const struct tmio_mmc_dma_ops *dma_ops,
const struct renesas_sdhi_of_data *of_data,
const struct renesas_sdhi_quirks *quirks);
-int renesas_sdhi_remove(struct platform_device *pdev);
+void renesas_sdhi_remove(struct platform_device *pdev);
#endif
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 345934e4f59e..c675dec587ef 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -983,12 +983,12 @@ int renesas_sdhi_probe(struct platform_device *pdev,
}
- host->write16_hook = renesas_sdhi_write16_hook;
- host->clk_enable = renesas_sdhi_clk_enable;
- host->clk_disable = renesas_sdhi_clk_disable;
- host->set_clock = renesas_sdhi_set_clock;
- host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
- host->dma_ops = dma_ops;
+ host->write16_hook = renesas_sdhi_write16_hook;
+ host->clk_enable = renesas_sdhi_clk_enable;
+ host->clk_disable = renesas_sdhi_clk_disable;
+ host->set_clock = renesas_sdhi_set_clock;
+ host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
+ host->dma_ops = dma_ops;
if (sdhi_has_quirk(priv, hs400_disabled))
host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
@@ -1006,6 +1006,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27;
host->sdcard_irq_mask_all = TMIO_MASK_ALL_RCAR2;
host->reset = renesas_sdhi_reset;
+ } else {
+ host->sdcard_irq_mask_all = TMIO_MASK_ALL;
}
/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
@@ -1100,9 +1102,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->ops.hs400_complete = renesas_sdhi_hs400_complete;
}
- ret = tmio_mmc_host_probe(host);
- if (ret < 0)
- goto edisclk;
+ sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
num_irqs = platform_irq_count(pdev);
if (num_irqs < 0) {
@@ -1129,6 +1129,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
goto eirq;
}
+ ret = tmio_mmc_host_probe(host);
+ if (ret < 0)
+ goto edisclk;
+
dev_info(&pdev->dev, "%s base at %pa, max clock rate %u MHz\n",
mmc_hostname(host->mmc), &res->start, host->mmc->f_max / 1000000);
@@ -1145,15 +1149,13 @@ efree:
}
EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
-int renesas_sdhi_remove(struct platform_device *pdev)
+void renesas_sdhi_remove(struct platform_device *pdev)
{
struct tmio_mmc_host *host = platform_get_drvdata(pdev);
tmio_mmc_host_remove(host);
renesas_sdhi_clk_disable(host);
tmio_mmc_host_free(host);
-
- return 0;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 9ab813903b2c..53d34c3eddce 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -15,7 +15,8 @@
#include <linux/mmc/host.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/sys_soc.h>
@@ -609,7 +610,7 @@ static struct platform_driver renesas_internal_dmac_sdhi_driver = {
.of_match_table = renesas_sdhi_internal_dmac_of_match,
},
.probe = renesas_sdhi_internal_dmac_probe,
- .remove = renesas_sdhi_remove,
+ .remove_new = renesas_sdhi_remove,
};
module_platform_driver(renesas_internal_dmac_sdhi_driver);
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index b559ad38b667..9cf7f9feab72 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -15,7 +15,8 @@
#include <linux/mmc/host.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/sys_soc.h>
@@ -470,7 +471,7 @@ static struct platform_driver renesas_sys_dmac_sdhi_driver = {
.of_match_table = renesas_sdhi_sys_dmac_of_match,
},
.probe = renesas_sdhi_sys_dmac_probe,
- .remove = renesas_sdhi_remove,
+ .remove_new = renesas_sdhi_remove,
};
module_platform_driver(renesas_sys_dmac_sdhi_driver);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 8098726dcc0b..87d78432a1e0 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -1523,15 +1523,12 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
return 0;
}
-static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
+static void rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
{
struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
struct rtsx_pcr *pcr;
struct mmc_host *mmc;
- if (!host)
- return 0;
-
pcr = host->pcr;
pcr->slots[RTSX_SD_CARD].p_dev = NULL;
pcr->slots[RTSX_SD_CARD].card_event = NULL;
@@ -1566,8 +1563,6 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
dev_dbg(&(pdev->dev),
": Realtek PCI-E SDMMC controller has been removed\n");
-
- return 0;
}
static const struct platform_device_id rtsx_pci_sdmmc_ids[] = {
@@ -1581,7 +1576,7 @@ MODULE_DEVICE_TABLE(platform, rtsx_pci_sdmmc_ids);
static struct platform_driver rtsx_pci_sdmmc_driver = {
.probe = rtsx_pci_sdmmc_drv_probe,
- .remove = rtsx_pci_sdmmc_drv_remove,
+ .remove_new = rtsx_pci_sdmmc_drv_remove,
.id_table = rtsx_pci_sdmmc_ids,
.driver = {
.name = DRV_NAME_RTSX_PCI_SDMMC,
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 2c650cd58693..ded9b6849e35 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1379,13 +1379,13 @@ static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev)
return 0;
}
-static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev)
+static void rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev)
{
struct rtsx_usb_sdmmc *host = platform_get_drvdata(pdev);
struct mmc_host *mmc;
if (!host)
- return 0;
+ return;
mmc = host->mmc;
host->host_removal = true;
@@ -1415,8 +1415,6 @@ static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev)
dev_dbg(&(pdev->dev),
": Realtek USB SD/MMC module has been removed\n");
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1455,7 +1453,7 @@ MODULE_DEVICE_TABLE(platform, rtsx_usb_sdmmc_ids);
static struct platform_driver rtsx_usb_sdmmc_driver = {
.probe = rtsx_usb_sdmmc_drv_probe,
- .remove = rtsx_usb_sdmmc_drv_remove,
+ .remove_new = rtsx_usb_sdmmc_drv_remove,
.id_table = rtsx_usb_sdmmc_ids,
.driver = {
.name = "rtsx_usb_sdmmc",
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index edf2e6c14dc6..acf5fc3ad7e4 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -917,7 +917,7 @@ err_free:
return err;
}
-static int sdhci_acpi_remove(struct platform_device *pdev)
+static void sdhci_acpi_remove(struct platform_device *pdev)
{
struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
@@ -939,8 +939,6 @@ static int sdhci_acpi_remove(struct platform_device *pdev)
c->slot->free_slot(pdev);
sdhci_free_host(c->host);
-
- return 0;
}
static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed(
@@ -1033,7 +1031,7 @@ static struct platform_driver sdhci_acpi_driver = {
.pm = &sdhci_acpi_pm_ops,
},
.probe = sdhci_acpi_probe,
- .remove = sdhci_acpi_remove,
+ .remove_new = sdhci_acpi_remove,
};
module_platform_driver(sdhci_acpi_driver);
diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c
index 6a93a54fe067..cb9152c6a65d 100644
--- a/drivers/mmc/host/sdhci-bcm-kona.c
+++ b/drivers/mmc/host/sdhci-bcm-kona.c
@@ -11,7 +11,6 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mmc/slot-gpio.h>
#include "sdhci-pltfm.h"
@@ -311,6 +310,16 @@ err_pltfm_free:
return ret;
}
+static void sdhci_bcm_kona_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct clk *clk = pltfm_host->clk;
+
+ sdhci_pltfm_remove(pdev);
+ clk_disable_unprepare(clk);
+}
+
static struct platform_driver sdhci_bcm_kona_driver = {
.driver = {
.name = "sdhci-kona",
@@ -319,7 +328,7 @@ static struct platform_driver sdhci_bcm_kona_driver = {
.of_match_table = sdhci_bcm_kona_of_match,
},
.probe = sdhci_bcm_kona_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove_new = sdhci_bcm_kona_remove,
};
module_platform_driver(sdhci_bcm_kona_driver);
diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
index 4c22337199cf..c23251bb95f3 100644
--- a/drivers/mmc/host/sdhci-brcmstb.c
+++ b/drivers/mmc/host/sdhci-brcmstb.c
@@ -264,23 +264,17 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Probe found match for %s\n", match->compatible);
- clk = devm_clk_get_optional(&pdev->dev, NULL);
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(&pdev->dev, PTR_ERR(clk),
- "Failed to get clock from Device Tree\n");
-
- res = clk_prepare_enable(clk);
- if (res)
- return res;
+ "Failed to get and enable clock from Device Tree\n");
memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata));
brcmstb_pdata.ops = match_priv->ops;
host = sdhci_pltfm_init(pdev, &brcmstb_pdata,
sizeof(struct sdhci_brcmstb_priv));
- if (IS_ERR(host)) {
- res = PTR_ERR(host);
- goto err_clk;
- }
+ if (IS_ERR(host))
+ return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
@@ -369,9 +363,7 @@ add_host:
err:
sdhci_pltfm_free(pdev);
-err_clk:
clk_disable_unprepare(base_clk);
- clk_disable_unprepare(clk);
return res;
}
@@ -430,7 +422,7 @@ static struct platform_driver sdhci_brcmstb_driver = {
.of_match_table = of_match_ptr(sdhci_brcm_of_match),
},
.probe = sdhci_brcmstb_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove_new = sdhci_pltfm_remove,
.shutdown = sdhci_brcmstb_shutdown,
};
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index d2f625054689..be1505e8c536 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -11,7 +11,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include "sdhci-pltfm.h"
@@ -487,14 +487,10 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
static const u16 version = SDHCI_SPEC_400 << SDHCI_SPEC_VER_SHIFT;
- clk = devm_clk_get(dev, NULL);
+ clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
data = of_device_get_match_data(dev);
if (!data)
data = &sdhci_cdns_drv_data;
@@ -502,10 +498,8 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node);
host = sdhci_pltfm_init(pdev, &data->pltfm_data,
struct_size(priv, phy_params, nr_phy_params));
- if (IS_ERR(host)) {
- ret = PTR_ERR(host);
- goto disable_clk;
- }
+ if (IS_ERR(host))
+ return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
pltfm_host->clk = clk;
@@ -556,9 +550,6 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
return 0;
free:
sdhci_pltfm_free(pdev);
-disable_clk:
- clk_disable_unprepare(clk);
-
return ret;
}
@@ -617,7 +608,7 @@ static struct platform_driver sdhci_cdns_driver = {
.of_match_table = sdhci_cdns_match,
},
.probe = sdhci_cdns_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove_new = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_cdns_driver);
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 5e5bf82e5976..88ec23417808 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -75,10 +75,7 @@ static int sdhci_dove_probe(struct platform_device *pdev)
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
- pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
-
- if (!IS_ERR(pltfm_host->clk))
- clk_prepare_enable(pltfm_host->clk);
+ pltfm_host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
ret = mmc_of_parse(host->mmc);
if (ret)
@@ -91,7 +88,6 @@ static int sdhci_dove_probe(struct platform_device *pdev)
return 0;
err_sdhci_add:
- clk_disable_unprepare(pltfm_host->clk);
sdhci_pltfm_free(pdev);
return ret;
}
@@ -110,7 +106,7 @@ static struct platform_driver sdhci_dove_driver = {
.of_match_table = sdhci_dove_of_match_table,
},
.probe = sdhci_dove_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove_new = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_dove_driver);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index eebf94604a7f..3b8030f3552a 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -22,7 +22,7 @@
#include <linux/mmc/sdio.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include "sdhci-cqhci.h"
@@ -171,8 +171,8 @@
#define ESDHC_FLAG_HS400 BIT(9)
/*
* The IP has errata ERR010450
- * uSDHC: Due to the I/O timing limit, for SDR mode, SD card clock can't
- * exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
+ * uSDHC: At 1.8V due to the I/O timing limit, for SDR mode, SD card
+ * clock can't exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
*/
#define ESDHC_FLAG_ERR010450 BIT(10)
/* The IP supports HS400ES mode */
@@ -961,7 +961,8 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
| ESDHC_CLOCK_MASK);
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- if (imx_data->socdata->flags & ESDHC_FLAG_ERR010450) {
+ if ((imx_data->socdata->flags & ESDHC_FLAG_ERR010450) &&
+ (!(host->quirks2 & SDHCI_QUIRK2_NO_1_8_V))) {
unsigned int max_clock;
max_clock = imx_data->is_ddr ? 45000000 : 150000000;
@@ -1802,7 +1803,7 @@ free_sdhci:
return err;
}
-static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
+static void sdhci_esdhc_imx_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1824,8 +1825,6 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
sdhci_pltfm_free(pdev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1986,7 +1985,7 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
.pm = &sdhci_esdhc_pmops,
},
.probe = sdhci_esdhc_imx_probe,
- .remove = sdhci_esdhc_imx_remove,
+ .remove_new = sdhci_esdhc_imx_remove,
};
module_platform_driver(sdhci_esdhc_imx_driver);
diff --git a/drivers/mmc/host/sdhci-esdhc-mcf.c b/drivers/mmc/host/sdhci-esdhc-mcf.c
index 05926bf5ecf9..a07f8333cd6b 100644
--- a/drivers/mmc/host/sdhci-esdhc-mcf.c
+++ b/drivers/mmc/host/sdhci-esdhc-mcf.c
@@ -489,7 +489,7 @@ err_exit:
return err;
}
-static int sdhci_esdhc_mcf_remove(struct platform_device *pdev)
+static void sdhci_esdhc_mcf_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -502,8 +502,6 @@ static int sdhci_esdhc_mcf_remove(struct platform_device *pdev)
clk_disable_unprepare(mcf_data->clk_per);
sdhci_pltfm_free(pdev);
-
- return 0;
}
static struct platform_driver sdhci_esdhc_mcf_driver = {
@@ -512,7 +510,7 @@ static struct platform_driver sdhci_esdhc_mcf_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = sdhci_esdhc_mcf_probe,
- .remove = sdhci_esdhc_mcf_remove,
+ .remove_new = sdhci_esdhc_mcf_remove,
};
module_platform_driver(sdhci_esdhc_mcf_driver);
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 86eb0045515e..10235fdff246 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/mmc/host.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "sdhci-pltfm.h"
struct sdhci_iproc_data {
@@ -386,16 +386,11 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
host->mmc->caps |= iproc_host->data->mmc_caps;
if (dev->of_node) {
- pltfm_host->clk = devm_clk_get(dev, NULL);
+ pltfm_host->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(pltfm_host->clk)) {
ret = PTR_ERR(pltfm_host->clk);
goto err;
}
- ret = clk_prepare_enable(pltfm_host->clk);
- if (ret) {
- dev_err(dev, "failed to enable host clk\n");
- goto err;
- }
}
if (iproc_host->data->missing_caps) {
@@ -406,13 +401,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
ret = sdhci_add_host(host);
if (ret)
- goto err_clk;
+ goto err;
return 0;
-err_clk:
- if (dev->of_node)
- clk_disable_unprepare(pltfm_host->clk);
err:
sdhci_pltfm_free(pdev);
return ret;
@@ -432,7 +424,7 @@ static struct platform_driver sdhci_iproc_driver = {
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_iproc_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove_new = sdhci_pltfm_remove,
.shutdown = sdhci_iproc_shutdown,
};
module_platform_driver(sdhci_iproc_driver);
diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c
index 148b37ac6564..83706edc9796 100644
--- a/drivers/mmc/host/sdhci-milbeaut.c
+++ b/drivers/mmc/host/sdhci-milbeaut.c
@@ -313,7 +313,7 @@ err:
return ret;
}
-static int sdhci_milbeaut_remove(struct platform_device *pdev)
+static void sdhci_milbeaut_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct f_sdhost_priv *priv = sdhci_priv(host);
@@ -326,18 +326,16 @@ static int sdhci_milbeaut_remove(struct platform_device *pdev)
sdhci_free_host(host);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static struct platform_driver sdhci_milbeaut_driver = {
.driver = {
.name = "sdhci-milbeaut",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = of_match_ptr(mlb_dt_ids),
+ .of_match_table = mlb_dt_ids,
},
.probe = sdhci_milbeaut_probe,
- .remove = sdhci_milbeaut_remove,
+ .remove_new = sdhci_milbeaut_remove,
};
module_platform_driver(sdhci_milbeaut_driver);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 1c935b5bafe1..668e0aceeeba 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -6,7 +6,6 @@
*/
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/mmc/mmc.h>
#include <linux/pm_runtime.h>
@@ -15,6 +14,7 @@
#include <linux/iopoll.h>
#include <linux/regulator/consumer.h>
#include <linux/interconnect.h>
+#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
@@ -2668,7 +2668,7 @@ pltfm_free:
return ret;
}
-static int sdhci_msm_remove(struct platform_device *pdev)
+static void sdhci_msm_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -2687,7 +2687,6 @@ static int sdhci_msm_remove(struct platform_device *pdev)
if (!IS_ERR(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
sdhci_pltfm_free(pdev);
- return 0;
}
static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
@@ -2740,7 +2739,7 @@ static const struct dev_pm_ops sdhci_msm_pm_ops = {
static struct platform_driver sdhci_msm_driver = {
.probe = sdhci_msm_probe,
- .remove = sdhci_msm_remove,
+ .remove_new = sdhci_msm_remove,
.driver = {
.name = "sdhci_msm",
.of_match_table = sdhci_msm_dt_match,
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 294dd605fd2b..5edd024347bd 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -18,11 +18,11 @@
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <linux/reset.h>
-#include <linux/of.h>
#include <linux/firmware/xlnx-zynqmp.h>
#include "cqhci.h"
@@ -2016,12 +2016,13 @@ err_pltfm_free:
return ret;
}
-static int sdhci_arasan_remove(struct platform_device *pdev)
+static void sdhci_arasan_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
struct clk *clk_ahb = sdhci_arasan->clk_ahb;
+ struct clk *clk_xin = pltfm_host->clk;
if (!IS_ERR(sdhci_arasan->phy)) {
if (sdhci_arasan->is_phy_on)
@@ -2031,11 +2032,10 @@ static int sdhci_arasan_remove(struct platform_device *pdev)
sdhci_arasan_unregister_sdclk(&pdev->dev);
- sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_remove(pdev);
+ clk_disable_unprepare(clk_xin);
clk_disable_unprepare(clk_ahb);
-
- return 0;
}
static struct platform_driver sdhci_arasan_driver = {
@@ -2046,7 +2046,7 @@ static struct platform_driver sdhci_arasan_driver = {
.pm = &sdhci_arasan_dev_pm_ops,
},
.probe = sdhci_arasan_probe,
- .remove = sdhci_arasan_remove,
+ .remove_new = sdhci_arasan_remove,
};
module_platform_driver(sdhci_arasan_driver);
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 25b4073f698b..42d54532cabe 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -450,22 +450,19 @@ err_pltfm_free:
return ret;
}
-static int aspeed_sdhci_remove(struct platform_device *pdev)
+static void aspeed_sdhci_remove(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
- int dead = 0;
host = platform_get_drvdata(pdev);
pltfm_host = sdhci_priv(host);
- sdhci_remove_host(host, dead);
+ sdhci_remove_host(host, 0);
clk_disable_unprepare(pltfm_host->clk);
sdhci_pltfm_free(pdev);
-
- return 0;
}
static const struct aspeed_sdhci_pdata ast2400_sdhci_pdata = {
@@ -521,7 +518,7 @@ static struct platform_driver aspeed_sdhci_driver = {
.of_match_table = aspeed_sdhci_of_match,
},
.probe = aspeed_sdhci_probe,
- .remove = aspeed_sdhci_remove,
+ .remove_new = aspeed_sdhci_remove,
};
static int aspeed_sdc_probe(struct platform_device *pdev)
@@ -574,13 +571,11 @@ err_clk:
return ret;
}
-static int aspeed_sdc_remove(struct platform_device *pdev)
+static void aspeed_sdc_remove(struct platform_device *pdev)
{
struct aspeed_sdc *sdc = dev_get_drvdata(&pdev->dev);
clk_disable_unprepare(sdc->clk);
-
- return 0;
}
static const struct of_device_id aspeed_sdc_of_match[] = {
@@ -600,7 +595,7 @@ static struct platform_driver aspeed_sdc_driver = {
.of_match_table = aspeed_sdc_of_match,
},
.probe = aspeed_sdc_probe,
- .remove = aspeed_sdc_remove,
+ .remove_new = aspeed_sdc_remove,
};
#if defined(CONFIG_MMC_SDHCI_OF_ASPEED_TEST)
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index cd0134580a90..23a9faad2ff8 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -17,7 +17,7 @@
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -443,7 +443,7 @@ sdhci_pltfm_free:
return ret;
}
-static int sdhci_at91_remove(struct platform_device *pdev)
+static void sdhci_at91_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -456,13 +456,11 @@ static int sdhci_at91_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_remove(pdev);
clk_disable_unprepare(gck);
clk_disable_unprepare(hclock);
clk_disable_unprepare(mainck);
-
- return 0;
}
static struct platform_driver sdhci_at91_driver = {
@@ -473,7 +471,7 @@ static struct platform_driver sdhci_at91_driver = {
.pm = &sdhci_at91_dev_pm_ops,
},
.probe = sdhci_at91_probe,
- .remove = sdhci_at91_remove,
+ .remove_new = sdhci_at91_remove,
};
module_platform_driver(sdhci_at91_driver);
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index e68cd87998c8..3a3bae6948a8 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -14,7 +14,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/sizes.h>
@@ -548,9 +549,13 @@ static int dwcmshc_probe(struct platform_device *pdev)
host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
err = sdhci_setup_host(host);
if (err)
- goto err_clk;
+ goto err_rpm;
if (rk_priv)
dwcmshc_rk35xx_postinit(host, priv);
@@ -559,10 +564,15 @@ static int dwcmshc_probe(struct platform_device *pdev)
if (err)
goto err_setup_host;
+ pm_runtime_put(dev);
+
return 0;
err_setup_host:
sdhci_cleanup_host(host);
+err_rpm:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
err_clk:
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
@@ -574,7 +584,7 @@ free_pltfm:
return err;
}
-static int dwcmshc_remove(struct platform_device *pdev)
+static void dwcmshc_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -589,8 +599,6 @@ static int dwcmshc_remove(struct platform_device *pdev)
clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
sdhci_pltfm_free(pdev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -602,6 +610,8 @@ static int dwcmshc_suspend(struct device *dev)
struct rk35xx_priv *rk_priv = priv->priv;
int ret;
+ pm_runtime_resume(dev);
+
ret = sdhci_suspend_host(host);
if (ret)
return ret;
@@ -632,21 +642,84 @@ static int dwcmshc_resume(struct device *dev)
if (!IS_ERR(priv->bus_clk)) {
ret = clk_prepare_enable(priv->bus_clk);
if (ret)
- return ret;
+ goto disable_clk;
}
if (rk_priv) {
ret = clk_bulk_prepare_enable(RK35xx_MAX_CLKS,
rk_priv->rockchip_clks);
if (ret)
- return ret;
+ goto disable_bus_clk;
}
- return sdhci_resume_host(host);
+ ret = sdhci_resume_host(host);
+ if (ret)
+ goto disable_rockchip_clks;
+
+ return 0;
+
+disable_rockchip_clks:
+ if (rk_priv)
+ clk_bulk_disable_unprepare(RK35xx_MAX_CLKS,
+ rk_priv->rockchip_clks);
+disable_bus_clk:
+ if (!IS_ERR(priv->bus_clk))
+ clk_disable_unprepare(priv->bus_clk);
+disable_clk:
+ clk_disable_unprepare(pltfm_host->clk);
+ return ret;
}
#endif
-static SIMPLE_DEV_PM_OPS(dwcmshc_pmops, dwcmshc_suspend, dwcmshc_resume);
+#ifdef CONFIG_PM
+
+static void dwcmshc_enable_card_clk(struct sdhci_host *host)
+{
+ u16 ctrl;
+
+ ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if ((ctrl & SDHCI_CLOCK_INT_EN) && !(ctrl & SDHCI_CLOCK_CARD_EN)) {
+ ctrl |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
+ }
+}
+
+static void dwcmshc_disable_card_clk(struct sdhci_host *host)
+{
+ u16 ctrl;
+
+ ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (ctrl & SDHCI_CLOCK_CARD_EN) {
+ ctrl &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
+ }
+}
+
+static int dwcmshc_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ dwcmshc_disable_card_clk(host);
+
+ return 0;
+}
+
+static int dwcmshc_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+
+ dwcmshc_enable_card_clk(host);
+
+ return 0;
+}
+
+#endif
+
+static const struct dev_pm_ops dwcmshc_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwcmshc_suspend, dwcmshc_resume)
+ SET_RUNTIME_PM_OPS(dwcmshc_runtime_suspend,
+ dwcmshc_runtime_resume, NULL)
+};
static struct platform_driver sdhci_dwcmshc_driver = {
.driver = {
@@ -657,7 +730,7 @@ static struct platform_driver sdhci_dwcmshc_driver = {
.pm = &dwcmshc_pmops,
},
.probe = dwcmshc_probe,
- .remove = dwcmshc_remove,
+ .remove_new = dwcmshc_remove,
};
module_platform_driver(sdhci_dwcmshc_driver);
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 48ca1cf15b19..3ae9aa25745a 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -1521,7 +1521,7 @@ static struct platform_driver sdhci_esdhc_driver = {
.pm = &esdhc_of_dev_pm_ops,
},
.probe = sdhci_esdhc_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove_new = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_esdhc_driver);
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 12675797b296..9c1c0ce610ef 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -68,7 +68,7 @@ static const struct sdhci_pltfm_data sdhci_hlwd_pdata = {
static int sdhci_hlwd_probe(struct platform_device *pdev)
{
- return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata, 0);
+ return sdhci_pltfm_init_and_add_host(pdev, &sdhci_hlwd_pdata, 0);
}
static const struct of_device_id sdhci_hlwd_of_match[] = {
@@ -85,7 +85,7 @@ static struct platform_driver sdhci_hlwd_driver = {
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_hlwd_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove_new = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_hlwd_driver);
diff --git a/drivers/mmc/host/sdhci-of-sparx5.c b/drivers/mmc/host/sdhci-of-sparx5.c
index 28e4ee69e100..64b77e7d14cd 100644
--- a/drivers/mmc/host/sdhci-of-sparx5.c
+++ b/drivers/mmc/host/sdhci-of-sparx5.c
@@ -13,9 +13,9 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/regmap.h>
-#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/dma-mapping.h>
+#include <linux/of.h>
#include "sdhci-pltfm.h"
@@ -184,15 +184,12 @@ static int sdhci_sparx5_probe(struct platform_device *pdev)
sdhci_sparx5 = sdhci_pltfm_priv(pltfm_host);
sdhci_sparx5->host = host;
- pltfm_host->clk = devm_clk_get(&pdev->dev, "core");
+ pltfm_host->clk = devm_clk_get_enabled(&pdev->dev, "core");
if (IS_ERR(pltfm_host->clk)) {
ret = PTR_ERR(pltfm_host->clk);
- dev_err(&pdev->dev, "failed to get core clk: %d\n", ret);
+ dev_err(&pdev->dev, "failed to get and enable core clk: %d\n", ret);
goto free_pltfm;
}
- ret = clk_prepare_enable(pltfm_host->clk);
- if (ret)
- goto free_pltfm;
if (!of_property_read_u32(np, "microchip,clock-delay", &value) &&
(value > 0 && value <= MSHC_DLY_CC_MAX))
@@ -202,13 +199,13 @@ static int sdhci_sparx5_probe(struct platform_device *pdev)
ret = mmc_of_parse(host->mmc);
if (ret)
- goto err_clk;
+ goto free_pltfm;
sdhci_sparx5->cpu_ctrl = syscon_regmap_lookup_by_compatible(syscon);
if (IS_ERR(sdhci_sparx5->cpu_ctrl)) {
dev_err(&pdev->dev, "No CPU syscon regmap !\n");
ret = PTR_ERR(sdhci_sparx5->cpu_ctrl);
- goto err_clk;
+ goto free_pltfm;
}
if (sdhci_sparx5->delay_clock >= 0)
@@ -225,7 +222,7 @@ static int sdhci_sparx5_probe(struct platform_device *pdev)
ret = sdhci_add_host(host);
if (ret)
- goto err_clk;
+ goto free_pltfm;
/* Set AXI bus master to use un-cached access (for DMA) */
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA) &&
@@ -239,8 +236,6 @@ static int sdhci_sparx5_probe(struct platform_device *pdev)
return ret;
-err_clk:
- clk_disable_unprepare(pltfm_host->clk);
free_pltfm:
sdhci_pltfm_free(pdev);
return ret;
@@ -260,7 +255,7 @@ static struct platform_driver sdhci_sparx5_driver = {
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_sparx5_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove_new = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_sparx5_driver);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 8ed9256b83da..1e0bc7bace1b 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -11,7 +11,6 @@
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -1394,7 +1393,7 @@ err_pltfm_free:
return ret;
}
-static int sdhci_omap_remove(struct platform_device *pdev)
+static void sdhci_omap_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sdhci_host *host = platform_get_drvdata(pdev);
@@ -1408,8 +1407,6 @@ static int sdhci_omap_remove(struct platform_device *pdev)
/* Ensure device gets disabled despite userspace sysfs config */
pm_runtime_force_suspend(dev);
sdhci_pltfm_free(pdev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1478,7 +1475,7 @@ static const struct dev_pm_ops sdhci_omap_dev_pm_ops = {
static struct platform_driver sdhci_omap_driver = {
.probe = sdhci_omap_probe,
- .remove = sdhci_omap_remove,
+ .remove_new = sdhci_omap_remove,
.driver = {
.name = "sdhci-omap",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 1c2572c0f012..7c14feb5db77 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -1898,6 +1898,10 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(O2, SDS1, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
+ SDHCI_PCI_DEVICE(O2, GG8_9860, o2),
+ SDHCI_PCI_DEVICE(O2, GG8_9861, o2),
+ SDHCI_PCI_DEVICE(O2, GG8_9862, o2),
+ SDHCI_PCI_DEVICE(O2, GG8_9863, o2),
SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
SDHCI_PCI_DEVICE(SYNOPSYS, DWC_MSHC, snps),
SDHCI_PCI_DEVICE(GLI, 9750, gl9750),
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 620f52ad9667..7bfee28116af 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -21,6 +21,7 @@
* O2Micro device registers
*/
+#define O2_SD_PCIE_SWITCH 0x54
#define O2_SD_MISC_REG5 0x64
#define O2_SD_LD0_CTRL 0x68
#define O2_SD_DEV_CTRL 0x88
@@ -36,6 +37,7 @@
#define O2_SD_INF_MOD 0xF1
#define O2_SD_MISC_CTRL4 0xFC
#define O2_SD_MISC_CTRL 0x1C0
+#define O2_SD_EXP_INT_REG 0x1E0
#define O2_SD_PWR_FORCE_L0 0x0002
#define O2_SD_TUNING_CTRL 0x300
#define O2_SD_PLL_SETTING 0x304
@@ -49,6 +51,9 @@
#define O2_SD_UHS2_L1_CTRL 0x35C
#define O2_SD_FUNC_REG3 0x3E0
#define O2_SD_FUNC_REG4 0x3E4
+#define O2_SD_PARA_SET_REG1 0x444
+#define O2_SD_VDDX_CTRL_REG 0x508
+#define O2_SD_GPIO_CTRL_REG1 0x510
#define O2_SD_LED_ENABLE BIT(6)
#define O2_SD_FREG0_LEDOFF BIT(13)
#define O2_SD_SEL_DLL BIT(16)
@@ -334,33 +339,45 @@ static int sdhci_o2_execute_tuning(struct mmc_host *mmc, u32 opcode)
scratch |= O2_SD_PWR_FORCE_L0;
sdhci_writew(host, scratch, O2_SD_MISC_CTRL);
- /* Stop clk */
- reg_val = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- reg_val &= ~SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, reg_val, SDHCI_CLOCK_CONTROL);
-
- if ((host->timing == MMC_TIMING_MMC_HS200) ||
- (host->timing == MMC_TIMING_UHS_SDR104)) {
- /* UnLock WP */
- pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
- scratch_8 &= 0x7f;
- pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
-
- /* Set pcr 0x354[16] to choose dll clock, and set the default phase */
- pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &reg_val);
- reg_val &= ~(O2_SD_SEL_DLL | O2_SD_PHASE_MASK);
- reg_val |= (O2_SD_SEL_DLL | O2_SD_FIX_PHASE);
- pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, reg_val);
+ /* Update output phase */
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_O2_SDS0:
+ case PCI_DEVICE_ID_O2_SEABIRD0:
+ case PCI_DEVICE_ID_O2_SEABIRD1:
+ case PCI_DEVICE_ID_O2_SDS1:
+ case PCI_DEVICE_ID_O2_FUJIN2:
+ /* Stop clk */
+ reg_val = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ reg_val &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, reg_val, SDHCI_CLOCK_CONTROL);
+
+ if (host->timing == MMC_TIMING_MMC_HS200 ||
+ host->timing == MMC_TIMING_UHS_SDR104) {
+ /* UnLock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+
+ /* Set pcr 0x354[16] to choose dll clock, and set the default phase */
+ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &reg_val);
+ reg_val &= ~(O2_SD_SEL_DLL | O2_SD_PHASE_MASK);
+ reg_val |= (O2_SD_SEL_DLL | O2_SD_FIX_PHASE);
+ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, reg_val);
+
+ /* Lock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+ }
- /* Lock WP */
- pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
- scratch_8 |= 0x80;
- pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+ /* Start clk */
+ reg_val = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ reg_val |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, reg_val, SDHCI_CLOCK_CONTROL);
+ break;
+ default:
+ break;
}
- /* Start clk */
- reg_val = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- reg_val |= SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, reg_val, SDHCI_CLOCK_CONTROL);
/* wait DLL lock, timeout value 5ms */
if (readx_poll_timeout(sdhci_o2_pll_dll_wdt_control, host,
@@ -563,6 +580,7 @@ static void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
u16 clk;
u8 scratch;
u32 scratch_32;
+ u32 dmdn_208m, dmdn_200m;
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct sdhci_pci_chip *chip = slot->chip;
@@ -578,16 +596,27 @@ static void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ if (chip->pdev->device == PCI_DEVICE_ID_O2_GG8_9860 ||
+ chip->pdev->device == PCI_DEVICE_ID_O2_GG8_9861 ||
+ chip->pdev->device == PCI_DEVICE_ID_O2_GG8_9862 ||
+ chip->pdev->device == PCI_DEVICE_ID_O2_GG8_9863) {
+ dmdn_208m = 0x2c500000;
+ dmdn_200m = 0x25200000;
+ } else {
+ dmdn_208m = 0x2c280000;
+ dmdn_200m = 0x25100000;
+ }
+
if ((host->timing == MMC_TIMING_UHS_SDR104) && (clock == 200000000)) {
pci_read_config_dword(chip->pdev, O2_SD_PLL_SETTING, &scratch_32);
- if ((scratch_32 & 0xFFFF0000) != 0x2c280000)
- o2_pci_set_baseclk(chip, 0x2c280000);
+ if ((scratch_32 & 0xFFFF0000) != dmdn_208m)
+ o2_pci_set_baseclk(chip, dmdn_208m);
} else {
pci_read_config_dword(chip->pdev, O2_SD_PLL_SETTING, &scratch_32);
- if ((scratch_32 & 0xFFFF0000) != 0x25100000)
- o2_pci_set_baseclk(chip, 0x25100000);
+ if ((scratch_32 & 0xFFFF0000) != dmdn_200m)
+ o2_pci_set_baseclk(chip, dmdn_200m);
}
pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
@@ -603,6 +632,67 @@ static void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock)
sdhci_o2_enable_clk(host, clk);
}
+static int sdhci_pci_o2_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct sdhci_pci_chip *chip = slot->chip;
+ u8 scratch8;
+ u16 scratch16;
+ int ret;
+
+ /* Disable clock */
+ sdhci_writeb(host, 0, SDHCI_CLOCK_CONTROL);
+
+ /* Set VDD2 voltage*/
+ scratch8 = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ scratch8 &= 0x0F;
+ if (host->mmc->ios.timing == MMC_TIMING_SD_EXP_1_2V &&
+ host->mmc->caps2 & MMC_CAP2_SD_EXP_1_2V) {
+ scratch8 |= SDHCI_VDD2_POWER_ON | SDHCI_VDD2_POWER_120;
+ } else {
+ scratch8 |= SDHCI_VDD2_POWER_ON | SDHCI_VDD2_POWER_180;
+ }
+
+ sdhci_writeb(host, scratch8, SDHCI_POWER_CONTROL);
+
+ /* UnLock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch8);
+ scratch8 &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch8);
+
+ /* Wait for express card clkreqn assert */
+ ret = read_poll_timeout(sdhci_readb, scratch8, !(scratch8 & BIT(0)),
+ 1, 30000, false, host, O2_SD_EXP_INT_REG);
+
+ if (!ret) {
+ /* Switch to PCIe mode */
+ scratch16 = sdhci_readw(host, O2_SD_PCIE_SWITCH);
+ scratch16 |= BIT(8);
+ sdhci_writew(host, scratch16, O2_SD_PCIE_SWITCH);
+ } else {
+ /* Power off VDD2 voltage*/
+ scratch8 = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ scratch8 &= 0x0F;
+ sdhci_writeb(host, scratch8, SDHCI_POWER_CONTROL);
+
+ /* Keep mode as UHSI */
+ pci_read_config_word(chip->pdev, O2_SD_PARA_SET_REG1, &scratch16);
+ scratch16 &= ~BIT(11);
+ pci_write_config_word(chip->pdev, O2_SD_PARA_SET_REG1, scratch16);
+
+ host->mmc->ios.timing = MMC_TIMING_LEGACY;
+ pr_info("%s: Express card initialization failed, falling back to Legacy\n",
+ mmc_hostname(host->mmc));
+ }
+ /* Lock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch8);
+ scratch8 |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch8);
+
+ return 0;
+}
+
static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
struct sdhci_pci_chip *chip;
@@ -624,6 +714,11 @@ static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
if (caps & SDHCI_CAN_DO_8BIT)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_DDR50;
+
+ sdhci_pci_o2_enable_msi(chip, host);
+
+ host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning;
switch (chip->pdev->device) {
case PCI_DEVICE_ID_O2_SDS0:
case PCI_DEVICE_ID_O2_SEABIRD0:
@@ -634,10 +729,6 @@ static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
if (reg & 0x1)
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
- host->quirks2 |= SDHCI_QUIRK2_BROKEN_DDR50;
-
- sdhci_pci_o2_enable_msi(chip, host);
-
if (chip->pdev->device == PCI_DEVICE_ID_O2_SEABIRD0) {
ret = pci_read_config_dword(chip->pdev,
O2_SD_MISC_SETTING, &reg);
@@ -663,15 +754,22 @@ static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
}
- host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning;
-
if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2)
break;
/* set dll watch dog timer */
reg = sdhci_readl(host, O2_SD_VENDOR_SETTING2);
reg |= (1 << 12);
sdhci_writel(host, reg, O2_SD_VENDOR_SETTING2);
-
+ break;
+ case PCI_DEVICE_ID_O2_GG8_9860:
+ case PCI_DEVICE_ID_O2_GG8_9861:
+ case PCI_DEVICE_ID_O2_GG8_9862:
+ case PCI_DEVICE_ID_O2_GG8_9863:
+ host->mmc->caps2 |= MMC_CAP2_NO_SDIO | MMC_CAP2_SD_EXP | MMC_CAP2_SD_EXP_1_2V;
+ host->mmc->caps |= MMC_CAP_HW_RESET;
+ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
+ slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd;
+ host->mmc_host_ops.init_sd_express = sdhci_pci_o2_init_sd_express;
break;
default:
break;
@@ -684,6 +782,7 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
{
int ret;
u8 scratch;
+ u16 scratch16;
u32 scratch_32;
switch (chip->pdev->device) {
@@ -893,6 +992,46 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
+ case PCI_DEVICE_ID_O2_GG8_9860:
+ case PCI_DEVICE_ID_O2_GG8_9861:
+ case PCI_DEVICE_ID_O2_GG8_9862:
+ case PCI_DEVICE_ID_O2_GG8_9863:
+ /* UnLock WP */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+ /* Select mode switch source as software control */
+ pci_read_config_word(chip->pdev, O2_SD_PARA_SET_REG1, &scratch16);
+ scratch16 &= 0xF8FF;
+ scratch16 |= BIT(9);
+ pci_write_config_word(chip->pdev, O2_SD_PARA_SET_REG1, scratch16);
+
+ /* set VDD1 supply source */
+ pci_read_config_word(chip->pdev, O2_SD_VDDX_CTRL_REG, &scratch16);
+ scratch16 &= 0xFFE3;
+ scratch16 |= BIT(3);
+ pci_write_config_word(chip->pdev, O2_SD_VDDX_CTRL_REG, scratch16);
+
+ /* Set host drive strength*/
+ scratch16 = 0x0025;
+ pci_write_config_word(chip->pdev, O2_SD_PLL_SETTING, scratch16);
+
+ /* Set output delay*/
+ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
+ scratch_32 &= 0xFF0FFF00;
+ scratch_32 |= 0x00B0003B;
+ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
+
+ /* Lock WP */
+ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+ if (ret)
+ return ret;
+ scratch |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+ break;
}
return 0;
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 9c8863956381..153704f812ed 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -11,6 +11,10 @@
#define PCI_DEVICE_ID_O2_FUJIN2 0x8520
#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620
#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621
+#define PCI_DEVICE_ID_O2_GG8_9860 0x9860
+#define PCI_DEVICE_ID_O2_GG8_9861 0x9861
+#define PCI_DEVICE_ID_O2_GG8_9862 0x9862
+#define PCI_DEVICE_ID_O2_GG8_9863 0x9863
#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809
#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a
diff --git a/drivers/mmc/host/sdhci-pic32.c b/drivers/mmc/host/sdhci-pic32.c
index 6696b6bdd88e..7a0351a9c74e 100644
--- a/drivers/mmc/host/sdhci-pic32.c
+++ b/drivers/mmc/host/sdhci-pic32.c
@@ -210,7 +210,7 @@ err:
return ret;
}
-static int pic32_sdhci_remove(struct platform_device *pdev)
+static void pic32_sdhci_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct pic32_sdhci_priv *sdhci_pdata = sdhci_priv(host);
@@ -221,8 +221,6 @@ static int pic32_sdhci_remove(struct platform_device *pdev)
clk_disable_unprepare(sdhci_pdata->base_clk);
clk_disable_unprepare(sdhci_pdata->sys_clk);
sdhci_pltfm_free(pdev);
-
- return 0;
}
static const struct of_device_id pic32_sdhci_id_table[] = {
@@ -238,7 +236,7 @@ static struct platform_driver pic32_sdhci_driver = {
.of_match_table = of_match_ptr(pic32_sdhci_id_table),
},
.probe = pic32_sdhci_probe,
- .remove = pic32_sdhci_remove,
+ .remove_new = pic32_sdhci_remove,
};
module_platform_driver(pic32_sdhci_driver);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 673e750a8490..a72e123a585d 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -166,9 +166,9 @@ void sdhci_pltfm_free(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_free);
-int sdhci_pltfm_register(struct platform_device *pdev,
- const struct sdhci_pltfm_data *pdata,
- size_t priv_size)
+int sdhci_pltfm_init_and_add_host(struct platform_device *pdev,
+ const struct sdhci_pltfm_data *pdata,
+ size_t priv_size)
{
struct sdhci_host *host;
int ret = 0;
@@ -185,21 +185,17 @@ int sdhci_pltfm_register(struct platform_device *pdev,
return ret;
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_register);
+EXPORT_SYMBOL_GPL(sdhci_pltfm_init_and_add_host);
-int sdhci_pltfm_unregister(struct platform_device *pdev)
+void sdhci_pltfm_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
sdhci_remove_host(host, dead);
- clk_disable_unprepare(pltfm_host->clk);
sdhci_pltfm_free(pdev);
-
- return 0;
}
-EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
+EXPORT_SYMBOL_GPL(sdhci_pltfm_remove);
#ifdef CONFIG_PM_SLEEP
int sdhci_pltfm_suspend(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 9bd717ff784b..b81d5b0fd616 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -99,10 +99,10 @@ extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
size_t priv_size);
extern void sdhci_pltfm_free(struct platform_device *pdev);
-extern int sdhci_pltfm_register(struct platform_device *pdev,
- const struct sdhci_pltfm_data *pdata,
- size_t priv_size);
-extern int sdhci_pltfm_unregister(struct platform_device *pdev);
+extern int sdhci_pltfm_init_and_add_host(struct platform_device *pdev,
+ const struct sdhci_pltfm_data *pdata,
+ size_t priv_size);
+extern void sdhci_pltfm_remove(struct platform_device *pdev);
extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host);
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index 91aca8f8d6ef..b75cbea88b40 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -19,7 +19,6 @@
#include <linux/platform_data/pxa_sdhci.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/mmc.h>
#include <linux/pinctrl/consumer.h>
@@ -269,26 +268,21 @@ static int sdhci_pxav2_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
pxav2_host = sdhci_pltfm_priv(pltfm_host);
- clk = devm_clk_get(dev, "io");
- if (IS_ERR(clk) && PTR_ERR(clk) != -EPROBE_DEFER)
- clk = devm_clk_get(dev, NULL);
+ clk = devm_clk_get_optional_enabled(dev, "io");
+ if (!clk)
+ clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err_probe(dev, ret, "failed to get io clock\n");
goto free;
}
pltfm_host->clk = clk;
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "failed to enable io clock\n");
- goto free;
- }
clk_core = devm_clk_get_optional_enabled(dev, "core");
if (IS_ERR(clk_core)) {
ret = PTR_ERR(clk_core);
dev_err_probe(dev, ret, "failed to enable core clock\n");
- goto disable_clk;
+ goto free;
}
host->quirks = SDHCI_QUIRK_BROKEN_ADMA
@@ -340,12 +334,10 @@ static int sdhci_pxav2_probe(struct platform_device *pdev)
ret = sdhci_add_host(host);
if (ret)
- goto disable_clk;
+ goto free;
return 0;
-disable_clk:
- clk_disable_unprepare(clk);
free:
sdhci_pltfm_free(pdev);
return ret;
@@ -359,7 +351,7 @@ static struct platform_driver sdhci_pxav2_driver = {
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_pxav2_probe,
- .remove = sdhci_pltfm_unregister,
+ .remove_new = sdhci_pltfm_remove,
};
module_platform_driver(sdhci_pxav2_driver);
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index e39dcc998772..3af43ac05825 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -470,7 +470,7 @@ err_clk_get:
return ret;
}
-static int sdhci_pxav3_remove(struct platform_device *pdev)
+static void sdhci_pxav3_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -486,8 +486,6 @@ static int sdhci_pxav3_remove(struct platform_device *pdev)
clk_disable_unprepare(pxa->clk_core);
sdhci_pltfm_free(pdev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -570,7 +568,7 @@ static struct platform_driver sdhci_pxav3_driver = {
.pm = &sdhci_pxav3_pmops,
},
.probe = sdhci_pxav3_probe,
- .remove = sdhci_pxav3_remove,
+ .remove_new = sdhci_pxav3_remove,
};
module_platform_driver(sdhci_pxav3_driver);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 504015e84308..0e8a8ac14e56 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -20,7 +20,6 @@
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -668,7 +667,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
return ret;
}
-static int sdhci_s3c_remove(struct platform_device *pdev)
+static void sdhci_s3c_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_s3c *sc = sdhci_priv(host);
@@ -688,8 +687,6 @@ static int sdhci_s3c_remove(struct platform_device *pdev)
clk_disable_unprepare(sc->clk_io);
sdhci_free_host(host);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -776,7 +773,7 @@ MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match);
static struct platform_driver sdhci_s3c_driver = {
.probe = sdhci_s3c_probe,
- .remove = sdhci_s3c_remove,
+ .remove_new = sdhci_s3c_remove,
.id_table = sdhci_s3c_driver_ids,
.driver = {
.name = "s3c-sdhci",
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index c79035727b20..c81bdfa97b89 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -117,7 +117,7 @@ err:
return ret;
}
-static int sdhci_remove(struct platform_device *pdev)
+static void sdhci_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct spear_sdhci *sdhci = sdhci_priv(host);
@@ -131,8 +131,6 @@ static int sdhci_remove(struct platform_device *pdev)
sdhci_remove_host(host, dead);
clk_disable_unprepare(sdhci->clk);
sdhci_free_host(host);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -170,23 +168,21 @@ static int sdhci_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume);
-#ifdef CONFIG_OF
static const struct of_device_id sdhci_spear_id_table[] = {
{ .compatible = "st,spear300-sdhci" },
{}
};
MODULE_DEVICE_TABLE(of, sdhci_spear_id_table);
-#endif
static struct platform_driver sdhci_driver = {
.driver = {
.name = "sdhci",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.pm = &sdhci_pm_ops,
- .of_match_table = of_match_ptr(sdhci_spear_id_table),
+ .of_match_table = sdhci_spear_id_table,
},
.probe = sdhci_probe,
- .remove = sdhci_remove,
+ .remove_new = sdhci_remove,
};
module_platform_driver(sdhci_driver);
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 7f4ee2e12735..649ae075e229 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -9,9 +9,10 @@
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/iopoll.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -73,6 +74,11 @@
#define SDHCI_SPRD_CLK_DEF_RATE 26000000
#define SDHCI_SPRD_PHY_DLL_CLK 52000000
+#define SDHCI_SPRD_MAX_RANGE 0xff
+#define SDHCI_SPRD_CMD_DLY_MASK GENMASK(15, 8)
+#define SDHCI_SPRD_POSRD_DLY_MASK GENMASK(23, 16)
+#define SDHCI_SPRD_CPST_EN GENMASK(27, 24)
+
struct sdhci_sprd_host {
u32 version;
struct clk *clk_sdio;
@@ -86,6 +92,11 @@ struct sdhci_sprd_host {
u32 phy_delay[MMC_TIMING_MMC_HS400 + 2];
};
+enum sdhci_sprd_tuning_type {
+ SDHCI_SPRD_TUNING_SD_HS_CMD,
+ SDHCI_SPRD_TUNING_SD_HS_DATA,
+};
+
struct sdhci_sprd_phy_cfg {
const char *property;
u8 timing;
@@ -533,6 +544,138 @@ static void sdhci_sprd_hs400_enhanced_strobe(struct mmc_host *mmc,
SDHCI_SPRD_REG_32_DLL_DLY);
}
+static int mmc_send_tuning_cmd(struct mmc_card *card)
+{
+ return mmc_send_status(card, NULL);
+}
+
+static int mmc_send_tuning_data(struct mmc_card *card)
+{
+ u8 *status;
+ int ret;
+
+ status = kmalloc(64, GFP_KERNEL);
+ if (!status)
+ return -ENOMEM;
+
+ ret = mmc_sd_switch(card, 0, 0, 0, status);
+
+ kfree(status);
+
+ return ret;
+}
+
+static int sdhci_sprd_get_best_clk_sample(struct mmc_host *mmc, u8 *value)
+{
+ int range_end = SDHCI_SPRD_MAX_RANGE;
+ int range_length = 0;
+ int middle_range = 0;
+ int count = 0;
+ int i;
+
+ for (i = 0; i <= SDHCI_SPRD_MAX_RANGE; i++) {
+ if (value[i]) {
+ pr_debug("%s: tuning ok: %d\n", mmc_hostname(mmc), i);
+ count++;
+ } else {
+ pr_debug("%s: tuning fail: %d\n", mmc_hostname(mmc), i);
+ if (range_length < count) {
+ range_length = count;
+ range_end = i - 1;
+ count = 0;
+ }
+ }
+ }
+
+ if (!count)
+ return -EIO;
+
+ if (count > range_length) {
+ range_length = count;
+ range_end = i - 1;
+ }
+
+ middle_range = range_end - (range_length - 1) / 2;
+
+ return middle_range;
+}
+
+static int sdhci_sprd_tuning(struct mmc_host *mmc, struct mmc_card *card,
+ enum sdhci_sprd_tuning_type type)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+ u32 *p = sprd_host->phy_delay;
+ u32 dll_cfg, dll_dly;
+ int best_clk_sample;
+ int err = 0;
+ u8 *value;
+ int i;
+
+ value = kmalloc(SDHCI_SPRD_MAX_RANGE + 1, GFP_KERNEL);
+ if (!value)
+ return -ENOMEM;
+
+ sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+ dll_cfg = sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_CFG);
+ dll_cfg &= ~SDHCI_SPRD_CPST_EN;
+ sdhci_writel(host, dll_cfg, SDHCI_SPRD_REG_32_DLL_CFG);
+
+ dll_dly = p[mmc->ios.timing];
+
+ for (i = 0; i <= SDHCI_SPRD_MAX_RANGE; i++) {
+ if (type == SDHCI_SPRD_TUNING_SD_HS_CMD) {
+ dll_dly &= ~SDHCI_SPRD_CMD_DLY_MASK;
+ dll_dly |= ((i << 8) & SDHCI_SPRD_CMD_DLY_MASK);
+ } else {
+ dll_dly &= ~SDHCI_SPRD_POSRD_DLY_MASK;
+ dll_dly |= ((i << 16) & SDHCI_SPRD_POSRD_DLY_MASK);
+ }
+
+ sdhci_writel(host, dll_dly, SDHCI_SPRD_REG_32_DLL_DLY);
+
+ if (type == SDHCI_SPRD_TUNING_SD_HS_CMD)
+ value[i] = !mmc_send_tuning_cmd(card);
+ else
+ value[i] = !mmc_send_tuning_data(card);
+ }
+
+ best_clk_sample = sdhci_sprd_get_best_clk_sample(mmc, value);
+ if (best_clk_sample < 0) {
+ dev_err(mmc_dev(host->mmc), "all tuning phase fail!\n");
+ goto out;
+ }
+
+ if (type == SDHCI_SPRD_TUNING_SD_HS_CMD) {
+ p[mmc->ios.timing] &= ~SDHCI_SPRD_CMD_DLY_MASK;
+ p[mmc->ios.timing] |= ((best_clk_sample << 8) & SDHCI_SPRD_CMD_DLY_MASK);
+ } else {
+ p[mmc->ios.timing] &= ~(SDHCI_SPRD_POSRD_DLY_MASK);
+ p[mmc->ios.timing] |= ((best_clk_sample << 16) & SDHCI_SPRD_POSRD_DLY_MASK);
+ }
+
+ pr_debug("%s: the best clk sample %d, delay value 0x%08x\n",
+ mmc_hostname(host->mmc), best_clk_sample, p[mmc->ios.timing]);
+
+out:
+ sdhci_writel(host, p[mmc->ios.timing], SDHCI_SPRD_REG_32_DLL_DLY);
+
+ kfree(value);
+
+ return err;
+}
+
+static int sdhci_sprd_prepare_sd_hs_cmd_tuning(struct mmc_host *mmc, struct mmc_card *card)
+{
+ return sdhci_sprd_tuning(mmc, card, SDHCI_SPRD_TUNING_SD_HS_CMD);
+}
+
+static int sdhci_sprd_execute_sd_hs_data_tuning(struct mmc_host *mmc, struct mmc_card *card)
+{
+ return sdhci_sprd_tuning(mmc, card, SDHCI_SPRD_TUNING_SD_HS_DATA);
+}
+
static void sdhci_sprd_phy_param_parse(struct sdhci_sprd_host *sprd_host,
struct device_node *np)
{
@@ -577,6 +720,11 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
host->mmc_host_ops.request = sdhci_sprd_request;
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_sprd_hs400_enhanced_strobe;
+ host->mmc_host_ops.prepare_sd_hs_tuning =
+ sdhci_sprd_prepare_sd_hs_cmd_tuning;
+ host->mmc_host_ops.execute_sd_hs_tuning =
+ sdhci_sprd_execute_sd_hs_data_tuning;
+
/*
* We can not use the standard ops to change and detect the voltage
* signal for Spreadtrum SD host controller, since our voltage regulator
@@ -720,7 +868,7 @@ pltfm_free:
return ret;
}
-static int sdhci_sprd_remove(struct platform_device *pdev)
+static void sdhci_sprd_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
@@ -732,8 +880,6 @@ static int sdhci_sprd_remove(struct platform_device *pdev)
clk_disable_unprepare(sprd_host->clk_2x_enable);
sdhci_pltfm_free(pdev);
-
- return 0;
}
static const struct of_device_id sdhci_sprd_of_match[] = {
@@ -800,7 +946,7 @@ static const struct dev_pm_ops sdhci_sprd_pm_ops = {
static struct platform_driver sdhci_sprd_driver = {
.probe = sdhci_sprd_probe,
- .remove = sdhci_sprd_remove,
+ .remove_new = sdhci_sprd_remove,
.driver = {
.name = "sdhci_sprd_r11",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index 6415916fbd91..d12532b96b51 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -348,7 +348,6 @@ static int sdhci_st_probe(struct platform_device *pdev)
struct clk *clk, *icnclk;
int ret = 0;
u16 host_version;
- struct resource *res;
struct reset_control *rstc;
clk = devm_clk_get(&pdev->dev, "mmc");
@@ -397,9 +396,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
}
/* Configure the FlashSS Top registers for setting eMMC TX/RX delay */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "top-mmc-delay");
- pdata->top_ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ pdata->top_ioaddr = devm_platform_ioremap_resource_byname(pdev, "top-mmc-delay");
if (IS_ERR(pdata->top_ioaddr))
pdata->top_ioaddr = NULL;
@@ -434,20 +431,20 @@ err_pltfm_init:
return ret;
}
-static int sdhci_st_remove(struct platform_device *pdev)
+static void sdhci_st_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct st_mmc_platform_data *pdata = sdhci_pltfm_priv(pltfm_host);
struct reset_control *rstc = pdata->rstc;
+ struct clk *clk = pltfm_host->clk;
- sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_remove(pdev);
clk_disable_unprepare(pdata->icnclk);
+ clk_disable_unprepare(clk);
reset_control_assert(rstc);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -510,7 +507,7 @@ MODULE_DEVICE_TABLE(of, st_sdhci_match);
static struct platform_driver sdhci_st_driver = {
.probe = sdhci_st_probe,
- .remove = sdhci_st_remove,
+ .remove_new = sdhci_st_remove,
.driver = {
.name = "sdhci-st",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index bff084f178c9..1ad0a6b3a2eb 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -19,7 +19,6 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -1818,7 +1817,7 @@ err_parse_dt:
return rc;
}
-static int sdhci_tegra_remove(struct platform_device *pdev)
+static void sdhci_tegra_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1834,8 +1833,6 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
clk_disable_unprepare(tegra_host->tmclk);
sdhci_pltfm_free(pdev);
-
- return 0;
}
static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
@@ -1933,7 +1930,7 @@ static struct platform_driver sdhci_tegra_driver = {
.pm = &sdhci_tegra_dev_pm_ops,
},
.probe = sdhci_tegra_probe,
- .remove = sdhci_tegra_remove,
+ .remove_new = sdhci_tegra_remove,
};
module_platform_driver(sdhci_tegra_driver);
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 08e838400b52..25ba7aecc3be 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -578,7 +578,7 @@ free_pltfm:
return err;
}
-static int xenon_remove(struct platform_device *pdev)
+static void xenon_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -595,8 +595,6 @@ static int xenon_remove(struct platform_device *pdev)
clk_disable_unprepare(pltfm_host->clk);
sdhci_pltfm_free(pdev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -705,7 +703,7 @@ static struct platform_driver sdhci_xenon_driver = {
.pm = &sdhci_xenon_dev_pm_ops,
},
.probe = xenon_probe,
- .remove = xenon_remove,
+ .remove_new = xenon_remove,
};
module_platform_driver(sdhci_xenon_driver);
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 7cdf0f54e3a5..c125485ba80e 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -866,22 +866,22 @@ err_pltfm_free:
return ret;
}
-static int sdhci_am654_remove(struct platform_device *pdev)
+static void sdhci_am654_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct device *dev = &pdev->dev;
int ret;
- ret = pm_runtime_resume_and_get(&pdev->dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0)
- return ret;
+ dev_err(dev, "pm_runtime_get_sync() Failed\n");
sdhci_remove_host(host, true);
clk_disable_unprepare(pltfm_host->clk);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
sdhci_pltfm_free(pdev);
- return 0;
}
#ifdef CONFIG_PM
@@ -993,7 +993,7 @@ static struct platform_driver sdhci_am654_driver = {
.of_match_table = sdhci_am654_of_match,
},
.probe = sdhci_am654_probe,
- .remove = sdhci_am654_remove,
+ .remove_new = sdhci_am654_remove,
};
module_platform_driver(sdhci_am654_driver);
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 3215063bcf86..c58e7cb1e2a7 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -206,7 +206,7 @@ err:
return ret;
}
-static int sdhci_f_sdh30_remove(struct platform_device *pdev)
+static void sdhci_f_sdh30_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
@@ -214,13 +214,11 @@ static int sdhci_f_sdh30_remove(struct platform_device *pdev)
struct reset_control *rst = priv->rst;
struct clk *clk = priv->clk;
- sdhci_pltfm_unregister(pdev);
+ sdhci_pltfm_remove(pdev);
reset_control_assert(rst);
clk_disable_unprepare(clk);
clk_disable_unprepare(clk_iface);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -248,8 +246,8 @@ static struct platform_driver sdhci_f_sdh30_driver = {
.acpi_match_table = ACPI_PTR(f_sdh30_acpi_ids),
.pm = &sdhci_pltfm_pmops,
},
- .probe = sdhci_f_sdh30_probe,
- .remove = sdhci_f_sdh30_remove,
+ .probe = sdhci_f_sdh30_probe,
+ .remove_new = sdhci_f_sdh30_remove,
};
module_platform_driver(sdhci_f_sdh30_driver);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 5cf53348372a..077d711e964e 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -46,7 +46,6 @@
#include <linux/mmc/slot-gpio.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
#include <linux/pagemap.h>
#include <linux/platform_data/sh_mmcif.h>
#include <linux/platform_device.h>
@@ -1509,7 +1508,7 @@ err_host:
return ret;
}
-static int sh_mmcif_remove(struct platform_device *pdev)
+static void sh_mmcif_remove(struct platform_device *pdev)
{
struct sh_mmcif_host *host = platform_get_drvdata(pdev);
@@ -1533,8 +1532,6 @@ static int sh_mmcif_remove(struct platform_device *pdev)
mmc_free_host(host->mmc);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1561,7 +1558,7 @@ static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
static struct platform_driver sh_mmcif_driver = {
.probe = sh_mmcif_probe,
- .remove = sh_mmcif_remove,
+ .remove_new = sh_mmcif_remove,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/sunplus-mmc.c b/drivers/mmc/host/sunplus-mmc.c
index 2bdebeb1f8e4..13c7cc0b6180 100644
--- a/drivers/mmc/host/sunplus-mmc.c
+++ b/drivers/mmc/host/sunplus-mmc.c
@@ -885,7 +885,7 @@ static int spmmc_drv_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(host->rstc), "rst get fail\n");
host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0)
+ if (host->irq < 0)
return host->irq;
ret = devm_request_threaded_irq(&pdev->dev, host->irq,
@@ -939,7 +939,7 @@ clk_disable:
return ret;
}
-static int spmmc_drv_remove(struct platform_device *dev)
+static void spmmc_drv_remove(struct platform_device *dev)
{
struct spmmc_host *host = platform_get_drvdata(dev);
@@ -948,9 +948,6 @@ static int spmmc_drv_remove(struct platform_device *dev)
clk_disable_unprepare(host->clk);
pm_runtime_put_noidle(&dev->dev);
pm_runtime_disable(&dev->dev);
- platform_set_drvdata(dev, NULL);
-
- return 0;
}
static int spmmc_pm_runtime_suspend(struct device *dev)
@@ -985,7 +982,7 @@ MODULE_DEVICE_TABLE(of, spmmc_of_table);
static struct platform_driver spmmc_driver = {
.probe = spmmc_drv_probe,
- .remove = spmmc_drv_remove,
+ .remove_new = spmmc_drv_remove,
.driver = {
.name = "spmmc",
.pm = pm_ptr(&spmmc_pm_ops),
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 69dcb8805e05..d3bd0ac99ec4 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1486,7 +1486,7 @@ error_free_host:
return ret;
}
-static int sunxi_mmc_remove(struct platform_device *pdev)
+static void sunxi_mmc_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct sunxi_mmc_host *host = mmc_priv(mmc);
@@ -1499,8 +1499,6 @@ static int sunxi_mmc_remove(struct platform_device *pdev)
}
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
mmc_free_host(mmc);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1556,7 +1554,7 @@ static struct platform_driver sunxi_mmc_driver = {
.pm = &sunxi_mmc_pm_ops,
},
.probe = sunxi_mmc_probe,
- .remove = sunxi_mmc_remove,
+ .remove_new = sunxi_mmc_remove,
};
module_platform_driver(sunxi_mmc_driver);
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
index 61acd69fac0e..1404989e6151 100644
--- a/drivers/mmc/host/uniphier-sd.c
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -13,7 +13,6 @@
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -706,19 +705,19 @@ static int uniphier_sd_probe(struct platform_device *pdev)
tmio_data->max_segs = 1;
tmio_data->max_blk_count = U16_MAX;
- ret = tmio_mmc_host_probe(host);
- if (ret)
- goto disable_clk;
+ sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, TMIO_MASK_ALL);
ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
dev_name(dev), host);
if (ret)
- goto remove_host;
+ goto disable_clk;
+
+ ret = tmio_mmc_host_probe(host);
+ if (ret)
+ goto disable_clk;
return 0;
-remove_host:
- tmio_mmc_host_remove(host);
disable_clk:
uniphier_sd_clk_disable(host);
free_host:
@@ -727,15 +726,13 @@ free_host:
return ret;
}
-static int uniphier_sd_remove(struct platform_device *pdev)
+static void uniphier_sd_remove(struct platform_device *pdev)
{
struct tmio_mmc_host *host = platform_get_drvdata(pdev);
tmio_mmc_host_remove(host);
uniphier_sd_clk_disable(host);
tmio_mmc_host_free(host);
-
- return 0;
}
static const struct of_device_id uniphier_sd_match[] = {
@@ -757,7 +754,7 @@ MODULE_DEVICE_TABLE(of, uniphier_sd_match);
static struct platform_driver uniphier_sd_driver = {
.probe = uniphier_sd_probe,
- .remove = uniphier_sd_remove,
+ .remove_new = uniphier_sd_remove,
.driver = {
.name = "uniphier-sd",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 2e17903658fc..6e421445d56c 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -1884,7 +1884,7 @@ e_free_mmc:
return ret;
}
-static int usdhi6_remove(struct platform_device *pdev)
+static void usdhi6_remove(struct platform_device *pdev)
{
struct usdhi6_host *host = platform_get_drvdata(pdev);
@@ -1895,13 +1895,11 @@ static int usdhi6_remove(struct platform_device *pdev)
usdhi6_dma_release(host);
clk_disable_unprepare(host->clk);
mmc_free_host(host->mmc);
-
- return 0;
}
static struct platform_driver usdhi6_driver = {
.probe = usdhi6_probe,
- .remove = usdhi6_remove,
+ .remove_new = usdhi6_remove,
.driver = {
.name = "usdhi6rol0",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c
index a2b0d9461665..ba6044b16e07 100644
--- a/drivers/mmc/host/via-sdmmc.c
+++ b/drivers/mmc/host/via-sdmmc.c
@@ -1160,7 +1160,6 @@ static int via_sd_probe(struct pci_dev *pcidev,
unmap:
iounmap(sdhost->mmiobase);
free_mmc_host:
- dev_set_drvdata(&pcidev->dev, NULL);
mmc_free_host(mmc);
release:
pci_release_regions(pcidev);
@@ -1212,7 +1211,6 @@ static void via_sd_remove(struct pci_dev *pcidev)
writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
iounmap(sdhost->mmiobase);
- dev_set_drvdata(&pcidev->dev, NULL);
mmc_free_host(sdhost->mmc);
pci_release_regions(pcidev);
pci_disable_device(pcidev);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index bf2a92fba0ed..001a468bc149 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1264,8 +1264,6 @@ static void wbsd_free_mmc(struct device *dev)
del_timer_sync(&host->ignore_timer);
mmc_free_host(mmc);
-
- dev_set_drvdata(dev, NULL);
}
/*
@@ -1756,11 +1754,9 @@ static int wbsd_probe(struct platform_device *dev)
return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0);
}
-static int wbsd_remove(struct platform_device *dev)
+static void wbsd_remove(struct platform_device *dev)
{
wbsd_shutdown(&dev->dev, 0);
-
- return 0;
}
/*
@@ -1902,8 +1898,7 @@ static struct platform_device *wbsd_device;
static struct platform_driver wbsd_driver = {
.probe = wbsd_probe,
- .remove = wbsd_remove,
-
+ .remove_new = wbsd_remove,
.suspend = wbsd_platform_suspend,
.resume = wbsd_platform_resume,
.driver = {
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 68525d900046..77d5f1d24489 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -21,7 +21,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_device.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
@@ -880,7 +879,7 @@ fail1:
return ret;
}
-static int wmt_mci_remove(struct platform_device *pdev)
+static void wmt_mci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
@@ -918,8 +917,6 @@ static int wmt_mci_remove(struct platform_device *pdev)
mmc_free_host(mmc);
dev_info(&pdev->dev, "WMT MCI device removed\n");
-
- return 0;
}
#ifdef CONFIG_PM
@@ -989,7 +986,7 @@ static const struct dev_pm_ops wmt_mci_pm = {
static struct platform_driver wmt_mci_driver = {
.probe = wmt_mci_probe,
- .remove = wmt_mci_remove,
+ .remove_new = wmt_mci_remove,
.driver = {
.name = DRIVER_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 99265667538c..d9e052c49ba1 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -464,7 +464,7 @@ static void arcnet_reply_tasklet(struct tasklet_struct *t)
ret = sock_queue_err_skb(sk, ackskb);
if (ret)
- kfree_skb(ackskb);
+ dev_kfree_skb_irq(ackskb);
local_irq_enable();
};
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index f2c79456d745..36f9b932b9e2 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -464,7 +464,8 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
{
int i;
- pdsc_devcmd_reset(pdsc);
+ if (!pdsc->pdev->is_virtfn)
+ pdsc_devcmd_reset(pdsc);
pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
pdsc_qcq_free(pdsc, &pdsc->adminqcq);
@@ -524,7 +525,8 @@ static void pdsc_fw_down(struct pdsc *pdsc)
}
/* Notify clients of fw_down */
- devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
+ if (pdsc->fw_reporter)
+ devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
pdsc_notify(PDS_EVENT_RESET, &reset_event);
pdsc_stop(pdsc);
@@ -554,8 +556,9 @@ static void pdsc_fw_up(struct pdsc *pdsc)
/* Notify clients of fw_up */
pdsc->fw_recoveries++;
- devlink_health_reporter_state_update(pdsc->fw_reporter,
- DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+ if (pdsc->fw_reporter)
+ devlink_health_reporter_state_update(pdsc->fw_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
pdsc_notify(PDS_EVENT_RESET, &reset_event);
return;
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index debe5216fe29..f77cd9f5a2fd 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -121,7 +121,7 @@ static const char *pdsc_devcmd_str(int opcode)
}
}
-static int pdsc_devcmd_wait(struct pdsc *pdsc, int max_seconds)
+static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
{
struct device *dev = pdsc->dev;
unsigned long start_time;
@@ -131,9 +131,6 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, int max_seconds)
int done = 0;
int err = 0;
int status;
- int opcode;
-
- opcode = ioread8(&pdsc->cmd_regs->cmd.opcode);
start_time = jiffies;
max_wait = start_time + (max_seconds * HZ);
@@ -180,10 +177,10 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd));
pdsc_devcmd_dbell(pdsc);
- err = pdsc_devcmd_wait(pdsc, max_seconds);
+ err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds);
memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
- if (err == -ENXIO || err == -ETIMEDOUT)
+ if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq)
queue_work(pdsc->wq, &pdsc->health_work);
return err;
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 9c6b3653c1c7..d9607033bbf2 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -10,6 +10,9 @@ pdsc_viftype *pdsc_dl_find_viftype_by_id(struct pdsc *pdsc,
{
int vt;
+ if (!pdsc->viftype_status)
+ return NULL;
+
for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
if (pdsc->viftype_status[vt].dl_id == dl_id)
return &pdsc->viftype_status[vt];
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e388bffda2dd..14b311196b8f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17794,10 +17794,7 @@ static int tg3_init_one(struct pci_dev *pdev,
tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
tnapi->int_mbox = intmbx;
- if (i <= 4)
- intmbx += 0x8;
- else
- intmbx += 0x4;
+ intmbx += 0x8;
tnapi->consmbox = rcvmbx;
tnapi->prodmbox = sndmbx;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 5a6ffa738396..f818dd215c05 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -131,6 +131,8 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
case READ_TIME:
cmd_val |= GLTSYN_CMD_READ_TIME;
break;
+ case ICE_PTP_NOP:
+ break;
}
wr32(hw, GLTSYN_CMD, cmd_val);
@@ -1226,18 +1228,18 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
}
/**
- * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
+ * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command
* @hw: pointer to HW struct
* @port: Port to which cmd has to be sent
* @cmd: Command to be sent to the port
*
* Prepare the requested port for an upcoming timer sync command.
*
- * Note there is no equivalent of this operation on E810, as that device
- * always handles all external PHYs internally.
+ * Do not use this function directly. If you want to configure exactly one
+ * port, use ice_ptp_one_port_cmd() instead.
*/
static int
-ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val, val;
u8 tmr_idx;
@@ -1261,6 +1263,8 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
case ADJ_TIME_AT_TIME:
cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
break;
+ case ICE_PTP_NOP:
+ break;
}
/* Tx case */
@@ -1307,6 +1311,39 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
}
/**
+ * ice_ptp_one_port_cmd - Prepare one port for a timer command
+ * @hw: pointer to the HW struct
+ * @configured_port: the port to configure with configured_cmd
+ * @configured_cmd: timer command to prepare on the configured_port
+ *
+ * Prepare the configured_port for the configured_cmd, and prepare all other
+ * ports for ICE_PTP_NOP. This causes the configured_port to execute the
+ * desired command while all other ports perform no operation.
+ */
+static int
+ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
+ enum ice_ptp_tmr_cmd configured_cmd)
+{
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ enum ice_ptp_tmr_cmd cmd;
+ int err;
+
+ if (port == configured_port)
+ cmd = configured_cmd;
+ else
+ cmd = ICE_PTP_NOP;
+
+ err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
* @hw: pointer to the HW struct
* @cmd: timer command to prepare
@@ -1322,7 +1359,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_ptp_one_port_cmd(hw, port, cmd);
+ err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
if (err)
return err;
}
@@ -2252,6 +2289,9 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
goto err_unlock;
+ /* Do not perform any action on the main timer */
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
/* Issue the sync to activate the time adjustment */
ice_ptp_exec_tmr_cmd(hw);
@@ -2372,6 +2412,9 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
return err;
+ /* Do not perform any action on the main timer */
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
ice_ptp_exec_tmr_cmd(hw);
err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
@@ -2847,6 +2890,8 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
case ADJ_TIME_AT_TIME:
cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
break;
+ case ICE_PTP_NOP:
+ return 0;
}
/* Read, modify, write */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 13547707b8e3..9aa10b0426fd 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -9,7 +9,8 @@ enum ice_ptp_tmr_cmd {
INIT_INCVAL,
ADJ_TIME,
ADJ_TIME_AT_TIME,
- READ_TIME
+ READ_TIME,
+ ICE_PTP_NOP,
};
enum ice_ptp_serdes {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 9f63a10c6f80..1ab787ed254d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4814,6 +4814,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
struct igb_ring *rx_ring)
{
+#if (PAGE_SIZE < 8192)
+ struct e1000_hw *hw = &adapter->hw;
+#endif
+
/* set build_skb and buffer size flags */
clear_ring_build_skb_enabled(rx_ring);
clear_ring_uses_large_buffer(rx_ring);
@@ -4824,10 +4828,9 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
set_ring_build_skb_enabled(rx_ring);
#if (PAGE_SIZE < 8192)
- if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
- return;
-
- set_ring_uses_large_buffer(rx_ring);
+ if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB ||
+ rd32(E1000_RCTL) & E1000_RCTL_SBP)
+ set_ring_uses_large_buffer(rx_ring);
#endif
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 988383e20bb8..e06f77ad6106 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -55,6 +55,7 @@ static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
[LMAC_MODE_50G_R] = "50G_R",
[LMAC_MODE_100G_R] = "100G_R",
[LMAC_MODE_USXGMII] = "USXGMII",
+ [LMAC_MODE_USGMII] = "USGMII",
};
/* CGX PHY management internal APIs */
@@ -231,6 +232,9 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
int index, id;
u64 cfg;
+ if (!lmac)
+ return -ENODEV;
+
/* access mac_ops to know csr_offset */
mac_ops = cgx_dev->mac_ops;
@@ -549,15 +553,16 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
struct lmac *lmac = lmac_pdata(lmac_id, cgx);
- u16 max_dmac = lmac->mac_to_index_bmap.max;
struct mac_ops *mac_ops;
+ u16 max_dmac;
int index, i;
u64 cfg = 0;
int id;
- if (!cgx)
+ if (!cgx || !lmac)
return;
+ max_dmac = lmac->mac_to_index_bmap.max;
id = get_sequence_id_of_lmac(cgx, lmac_id);
mac_ops = cgx->mac_ops;
@@ -730,7 +735,7 @@ int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
int corr_reg, uncorr_reg;
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 574114179688..6f7d1dee5830 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -110,6 +110,7 @@ enum LMAC_TYPE {
LMAC_MODE_50G_R = 8,
LMAC_MODE_100G_R = 9,
LMAC_MODE_USXGMII = 10,
+ LMAC_MODE_USGMII = 11,
LMAC_MODE_MAX,
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index b4fcb20c3f4f..af21e2030cff 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -355,8 +355,8 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
{
+ u64 cfg, pfc_class_mask_cfg;
rpm_t *rpm = rpmd;
- u64 cfg;
/* ALL pause frames received are completely ignored */
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
@@ -380,9 +380,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
/* Disable all PFC classes */
- cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
+ RPMX_CMRX_PRT_CBFC_CTL;
+ cfg = rpm_read(rpm, lmac_id, pfc_class_mask_cfg);
cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
- rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
+ rpm_write(rpm, lmac_id, pfc_class_mask_cfg, cfg);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
@@ -605,8 +607,11 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
+ pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
+ RPMX_CMRX_PRT_CBFC_CTL;
+
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- class_en = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ class_en = rpm_read(rpm, lmac_id, pfc_class_mask_cfg);
pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en);
if (rx_pause) {
@@ -635,10 +640,6 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p
cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
- RPMX_CMRX_PRT_CBFC_CTL;
-
rpm_write(rpm, lmac_id, pfc_class_mask_cfg, class_en);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index b3f766b970ca..f2b1edf1bb43 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -236,6 +236,11 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
linfo = &event->link_uinfo;
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
+ if (!pfmap) {
+ dev_err(rvu->dev, "CGX port%d:%d not mapped with PF\n",
+ event->cgx_id, event->lmac_id);
+ return;
+ }
do {
pfid = find_first_bit(&pfmap,
@@ -345,7 +350,7 @@ int rvu_cgx_init(struct rvu *rvu)
rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
- return -ENODEV;
+ return 0;
}
rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index dce3cea00032..8511906cb4e2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -806,6 +806,7 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq)
mutex_unlock(&pfvf->mbox.lock);
}
+EXPORT_SYMBOL(otx2_txschq_free_one);
void otx2_txschq_stop(struct otx2_nic *pfvf)
{
@@ -1434,7 +1435,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
}
pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
- pp_params.pool_size = numptrs;
+ pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
pp_params.nid = NUMA_NO_NODE;
pp_params.dev = pfvf->dev;
pp_params.dma_dir = DMA_FROM_DEVICE;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index ccaf97bb1ce0..bfddbff7bcdf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -70,7 +70,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
* link config level. These rest of the scheduler can be
* same as hw.txschq_list.
*/
- for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
req->schq[lvl] = 1;
rc = otx2_sync_mbox_msg(&pfvf->mbox);
@@ -83,7 +83,7 @@ static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
return PTR_ERR(rsp);
/* Setup transmit scheduler list */
- for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) {
+ for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++) {
if (!rsp->schq[lvl])
return -ENOSPC;
@@ -125,19 +125,12 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
{
- struct nix_txsch_free_req *free_req;
+ int lvl;
- mutex_lock(&pfvf->mbox.lock);
/* free PFC TLx nodes */
- free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
- if (!free_req) {
- mutex_unlock(&pfvf->mbox.lock);
- return -ENOMEM;
- }
-
- free_req->flags = TXSCHQ_FREE_ALL;
- otx2_sync_mbox_msg(&pfvf->mbox);
- mutex_unlock(&pfvf->mbox.lock);
+ for (lvl = 0; lvl <= pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ otx2_txschq_free_one(pfvf, lvl,
+ pfvf->pfc_schq_list[lvl][prio]);
pfvf->pfc_alloc_status[prio] = false;
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index b5d689eeff80..9e3bfbe5c480 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -23,6 +23,8 @@
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
#define OTX2_MIN_MTU 60
+#define OTX2_PAGE_POOL_SZ 2048
+
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
index b244c02c5b51..e24afeaea0da 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -84,7 +84,6 @@ dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
}
}
-
static int
wed_txinfo_show(struct seq_file *s, void *data)
{
@@ -127,16 +126,23 @@ wed_txinfo_show(struct seq_file *s, void *data)
DUMP_WDMA_RING(WDMA_RING_RX(0)),
DUMP_WDMA_RING(WDMA_RING_RX(1)),
- DUMP_STR("TX FREE"),
+ DUMP_STR("WED TX FREE"),
DUMP_WED(WED_RX_MIB(0)),
+ DUMP_WED_RING(WED_RING_RX(0)),
+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
+ DUMP_WED(WED_RX_MIB(1)),
+ DUMP_WED_RING(WED_RING_RX(1)),
+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
+
+ DUMP_STR("WED WPDMA TX FREE"),
+ DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
- if (!dev)
- return 0;
-
- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+ if (dev)
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
return 0;
}
@@ -208,10 +214,8 @@ wed_rxinfo_show(struct seq_file *s, void *data)
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
- if (!dev)
- return 0;
-
- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+ if (dev)
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
index 0a50bb98c5ea..47ea69feb3b2 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -266,6 +266,8 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index b56b187a9097..7e94caca4888 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -69,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
ecpf.o rdma.o esw/legacy.o \
- esw/devlink_port.o esw/vporttbl.o esw/qos.o
+ esw/devlink_port.o esw/vporttbl.o esw/qos.o esw/ipsec.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 3b88a8bb7082..7d4ceb9b9c16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -38,6 +38,7 @@
#include <net/netevent.h>
#include "en.h"
+#include "eswitch.h"
#include "ipsec.h"
#include "ipsec_rxtx.h"
#include "en_rep.h"
@@ -670,6 +671,11 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
if (err)
goto err_xfrm;
+ if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
+ err = -EBUSY;
+ goto err_xfrm;
+ }
+
/* check esn */
if (x->props.flags & XFRM_STATE_ESN)
mlx5e_ipsec_update_esn_state(sa_entry);
@@ -678,7 +684,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
err = mlx5_ipsec_create_work(sa_entry);
if (err)
- goto err_xfrm;
+ goto unblock_ipsec;
err = mlx5e_ipsec_create_dwork(sa_entry);
if (err)
@@ -735,6 +741,8 @@ release_work:
if (sa_entry->work)
kfree(sa_entry->work->data);
kfree(sa_entry->work);
+unblock_ipsec:
+ mlx5_eswitch_unblock_ipsec(priv->mdev);
err_xfrm:
kfree(sa_entry);
NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state");
@@ -764,6 +772,7 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
goto sa_entry_free;
@@ -780,6 +789,7 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
if (sa_entry->work)
kfree(sa_entry->work->data);
kfree(sa_entry->work);
+ mlx5_eswitch_unblock_ipsec(ipsec->mdev);
sa_entry_free:
kfree(sa_entry);
}
@@ -1055,6 +1065,11 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
pol_entry->x = x;
pol_entry->ipsec = priv->ipsec;
+ if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
+ err = -EBUSY;
+ goto ipsec_busy;
+ }
+
mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
if (err)
@@ -1064,6 +1079,8 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
return 0;
err_fs:
+ mlx5_eswitch_unblock_ipsec(priv->mdev);
+ipsec_busy:
kfree(pol_entry);
NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
return err;
@@ -1074,6 +1091,7 @@ static void mlx5e_xfrm_del_policy(struct xfrm_policy *x)
struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
mlx5e_accel_ipsec_fs_del_pol(pol_entry);
+ mlx5_eswitch_unblock_ipsec(pol_entry->ipsec->mdev);
}
static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index a1cfddd05bc4..7dba4221993f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -254,6 +254,8 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_del_flow_rules(rx->sa.rule);
mlx5_destroy_flow_group(rx->sa.group);
mlx5_destroy_flow_table(rx->ft.sa);
+ if (rx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
if (rx == ipsec->rx_esw) {
mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
} else {
@@ -357,6 +359,8 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
goto err_add;
/* Create FT */
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (rx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
@@ -411,6 +415,8 @@ err_pol_ft:
err_fs:
mlx5_destroy_flow_table(rx->ft.sa);
err_fs_ft:
+ if (rx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(rx->status.rule);
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
err_add:
@@ -428,26 +434,19 @@ static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (rx->ft.refcnt)
goto skip;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
-
- err = mlx5_eswitch_block_mode_trylock(mdev);
+ err = mlx5_eswitch_block_mode(mdev);
if (err)
- goto err_out;
+ return err;
err = rx_create(mdev, ipsec, rx, family);
- mlx5_eswitch_block_mode_unlock(mdev, err);
- if (err)
- goto err_out;
+ if (err) {
+ mlx5_eswitch_unblock_mode(mdev);
+ return err;
+ }
skip:
rx->ft.refcnt++;
return 0;
-
-err_out:
- if (rx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(mdev);
- return err;
}
static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
@@ -456,12 +455,8 @@ static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
if (--rx->ft.refcnt)
return;
- mlx5_eswitch_unblock_mode_lock(ipsec->mdev);
rx_destroy(ipsec->mdev, ipsec, rx, family);
- mlx5_eswitch_unblock_mode_unlock(ipsec->mdev);
-
- if (rx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(ipsec->mdev);
+ mlx5_eswitch_unblock_mode(ipsec->mdev);
}
static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
@@ -581,6 +576,8 @@ static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
mlx5_destroy_flow_group(tx->sa.group);
}
mlx5_destroy_flow_table(tx->ft.sa);
+ if (tx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(ipsec->mdev);
mlx5_del_flow_rules(tx->status.rule);
mlx5_destroy_flow_table(tx->ft.status);
}
@@ -621,6 +618,8 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
if (err)
goto err_status_rule;
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
+ tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
if (tx->allow_tunnel_mode)
flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
@@ -687,6 +686,8 @@ err_pol_ft:
err_sa_miss:
mlx5_destroy_flow_table(tx->ft.sa);
err_sa_ft:
+ if (tx->allow_tunnel_mode)
+ mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(tx->status.rule);
err_status_rule:
mlx5_destroy_flow_table(tx->ft.status);
@@ -720,32 +721,22 @@ static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
if (tx->ft.refcnt)
goto skip;
- if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
- tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
-
- err = mlx5_eswitch_block_mode_trylock(mdev);
+ err = mlx5_eswitch_block_mode(mdev);
if (err)
- goto err_out;
+ return err;
err = tx_create(ipsec, tx, ipsec->roce);
if (err) {
- mlx5_eswitch_block_mode_unlock(mdev, err);
- goto err_out;
+ mlx5_eswitch_unblock_mode(mdev);
+ return err;
}
if (tx == ipsec->tx_esw)
ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
- mlx5_eswitch_block_mode_unlock(mdev, err);
-
skip:
tx->ft.refcnt++;
return 0;
-
-err_out:
- if (tx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(mdev);
- return err;
}
static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
@@ -753,19 +744,13 @@ static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
if (--tx->ft.refcnt)
return;
- mlx5_eswitch_unblock_mode_lock(ipsec->mdev);
-
if (tx == ipsec->tx_esw) {
mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
}
tx_destroy(ipsec, tx, ipsec->roce);
-
- mlx5_eswitch_unblock_mode_unlock(ipsec->mdev);
-
- if (tx->allow_tunnel_mode)
- mlx5_eswitch_unblock_encap(ipsec->mdev);
+ mlx5_eswitch_unblock_mode(ipsec->mdev);
}
static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 3c254a710006..d8e739cbcbce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -92,6 +92,12 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
.port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
.port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
.port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
+#ifdef CONFIG_XFRM_OFFLOAD
+ .port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
+ .port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
+ .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
+ .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
+#endif /* CONFIG_XFRM_OFFLOAD */
};
static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
new file mode 100644
index 000000000000..da10e04777cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+#include "eswitch.h"
+
+static int esw_ipsec_vf_query_generic(struct mlx5_core_dev *dev, u16 vport_num, bool *result)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev)) {
+ *result = false;
+ return 0;
+ }
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ *result = MLX5_GET(cmd_hca_cap, hca_cap, ipsec_offload);
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+enum esw_vport_ipsec_offload {
+ MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD,
+ MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD,
+};
+
+int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *vport)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ bool ipsec_enabled;
+ int err;
+
+ /* Querying IPsec caps only makes sense when generic ipsec_offload
+ * HCA cap is enabled
+ */
+ err = esw_ipsec_vf_query_generic(dev, vport->vport, &ipsec_enabled);
+ if (err)
+ return err;
+
+ if (!ipsec_enabled) {
+ vport->info.ipsec_crypto_enabled = false;
+ vport->info.ipsec_packet_enabled = false;
+ return 0;
+ }
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ vport->info.ipsec_crypto_enabled =
+ MLX5_GET(ipsec_cap, hca_cap, ipsec_crypto_offload);
+ vport->info.ipsec_packet_enabled =
+ MLX5_GET(ipsec_cap, hca_cap, ipsec_full_offload);
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+static int esw_ipsec_vf_set_generic(struct mlx5_core_dev *dev, u16 vport_num, bool ipsec_ofld)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(cmd_hca_cap, cap, ipsec_offload, ipsec_ofld);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport *vport,
+ bool enable, enum esw_vport_ipsec_offload type)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+
+ switch (type) {
+ case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
+ MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable);
+ break;
+ case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+ MLX5_SET(ipsec_cap, cap, ipsec_full_offload, enable);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport->vport);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_IPSEC << 1);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_crypto_aux_caps_set(struct mlx5_core_dev *dev, u16 vport_num, bool enable)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(per_protocol_networking_offload_caps, cap, insert_trailer, enable);
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS << 1);
+ ret = mlx5_cmd_exec_in(esw->dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable, enum esw_vport_ipsec_offload type)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ int err;
+
+ if (vport->vport == MLX5_VPORT_PF)
+ return -EOPNOTSUPP;
+
+ if (type == MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD) {
+ err = esw_ipsec_vf_crypto_aux_caps_set(dev, vport->vport, enable);
+ if (err)
+ return err;
+ }
+
+ if (enable) {
+ err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+ if (err)
+ return err;
+ err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
+ if (err)
+ return err;
+ } else {
+ err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
+ if (err)
+ return err;
+ err = mlx5_esw_ipsec_vf_offload_get(dev, vport);
+ if (err)
+ return err;
+
+ /* The generic ipsec_offload cap can be disabled only if both
+ * ipsec_crypto_offload and ipsec_full_offload aren't enabled.
+ */
+ if (!vport->info.ipsec_crypto_enabled &&
+ !vport->info.ipsec_packet_enabled) {
+ err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+ if (err)
+ return err;
+ }
+ }
+
+ switch (type) {
+ case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
+ vport->info.ipsec_crypto_enabled = enable;
+ break;
+ case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+ vport->info.ipsec_packet_enabled = enable;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int esw_ipsec_offload_supported(struct mlx5_core_dev *dev, u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_GENERAL);
+ if (ret)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(cmd_hca_cap, hca_cap, log_max_dek))
+ ret = -EOPNOTSUPP;
+free:
+ kvfree(query_cap);
+ return ret;
+}
+
+bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev)
+{
+ /* Old firmware doesn't support ipsec_offload capability for VFs. This
+ * can be detected by checking reformat_add_esp_trasport capability -
+ * when this cap isn't supported it means firmware cannot be trusted
+ * about what it reports for ipsec_offload cap.
+ */
+ return MLX5_CAP_FLOWTABLE_NIC_TX(dev, reformat_add_esp_trasport);
+}
+
+int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int err;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+ return -EOPNOTSUPP;
+
+ err = esw_ipsec_offload_supported(dev, vport_num);
+ if (err)
+ return err;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(per_protocol_networking_offload_caps, hca_cap, swp))
+ goto free;
+
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int ret;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+ return -EOPNOTSUPP;
+
+ ret = esw_ipsec_offload_supported(dev, vport_num);
+ if (ret)
+ return ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_FLOW_TABLE);
+ if (ret)
+ goto out;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(flow_table_nic_cap, hca_cap, flow_table_properties_nic_receive.decap)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ kvfree(query_cap);
+ return ret;
+}
+
+int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable)
+{
+ return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+ MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD);
+}
+
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable)
+{
+ return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+ MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index db1c2a076364..6cd7d6497e10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -48,6 +48,7 @@
#include "devlink.h"
#include "ecpf.h"
#include "en/mod_hdr.h"
+#include "en_accel/ipsec.h"
enum {
MLX5_ACTION_NONE = 0,
@@ -831,6 +832,8 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable);
+
+ err = mlx5_esw_ipsec_vf_offload_get(esw->dev, vport);
out_free:
kfree(query_ctx);
return err;
@@ -913,6 +916,9 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
/* Sync with current vport context */
vport->enabled_events = enabled_events;
vport->enabled = true;
+ if (vport->vport != MLX5_VPORT_PF &&
+ (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
+ esw->enabled_ipsec_vf_count++;
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
* in smartNIC as it's a vport group manager.
@@ -969,6 +975,10 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
mlx5_esw_vport_vhca_id_clear(esw, vport_num);
+ if (vport->vport != MLX5_VPORT_PF &&
+ (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
+ esw->enabled_ipsec_vf_count--;
+
/* We don't assume VFs will cleanup after themselves.
* Calling vport change handler while vport is disabled will cleanup
* the vport resources.
@@ -2336,3 +2346,34 @@ struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
return mlx5_esw_allowed(esw) ? esw->dev : NULL;
}
EXPORT_SYMBOL(mlx5_eswitch_get_core_dev);
+
+bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!mlx5_esw_allowed(esw))
+ return true;
+
+ mutex_lock(&esw->state_lock);
+ if (esw->enabled_ipsec_vf_count) {
+ mutex_unlock(&esw->state_lock);
+ return false;
+ }
+
+ dev->num_ipsec_offloads++;
+ mutex_unlock(&esw->state_lock);
+ return true;
+}
+
+void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+ if (!mlx5_esw_allowed(esw))
+ /* Failure means no eswitch => core dev is not a PF */
+ return;
+
+ mutex_lock(&esw->state_lock);
+ dev->num_ipsec_offloads--;
+ mutex_unlock(&esw->state_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 6fcece69d3be..37ab66e7b403 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -163,6 +163,8 @@ struct mlx5_vport_info {
u8 trusted: 1;
u8 roce_enabled: 1;
u8 mig_enabled: 1;
+ u8 ipsec_crypto_enabled: 1;
+ u8 ipsec_packet_enabled: 1;
};
/* Vport context events */
@@ -380,6 +382,7 @@ struct mlx5_eswitch {
struct blocking_notifier_head n_head;
struct xarray paired;
struct mlx5_devcom_comp_dev *devcom;
+ u16 enabled_ipsec_vf_count;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -558,6 +561,16 @@ int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enab
struct netlink_ext_ack *extack);
int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
+#ifdef CONFIG_XFRM_OFFLOAD
+int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack);
+#endif /* CONFIG_XFRM_OFFLOAD */
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
@@ -829,10 +842,8 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
-int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev);
-void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err);
-void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev);
-void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev);
+int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev);
+void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev);
static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
{
@@ -857,6 +868,22 @@ mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *esw_attr, int attr_idx);
+bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev);
+void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev);
+bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev);
+int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev,
+ struct mlx5_vport *vport);
+int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num);
+int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable);
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable);
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num);
+void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw);
+void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -916,13 +943,14 @@ static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
{
}
-static inline int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev) { return 0; }
-
-static inline void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err) {}
-
-static inline void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev) {}
+static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; }
+static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {}
+static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev)
+{
+ return false;
+}
-static inline void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev) {}
+static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 67eab99f95b1..752fb0dfb111 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3641,65 +3641,32 @@ static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
return net_eq(devl_net, netdev_net);
}
-int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev)
+int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
- devl_lock(devlink);
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw)) {
- /* Failure means no eswitch => not possible to change eswitch mode */
- devl_unlock(devlink);
+ if (!mlx5_esw_allowed(esw))
return 0;
- }
+ /* Take TC into account */
err = mlx5_esw_try_lock(esw);
- if (err < 0) {
- devl_unlock(devlink);
+ if (err < 0)
return err;
- }
-
- return 0;
-}
-void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err)
-{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
-
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
- return;
-
- if (!err)
- esw->offloads.num_block_mode++;
+ esw->offloads.num_block_mode++;
mlx5_esw_unlock(esw);
- devl_unlock(devlink);
+ return 0;
}
-void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev)
+void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
+ if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
-}
-
-void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev)
-{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
-
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
- return;
-
esw->offloads.num_block_mode--;
up_write(&esw->mode_lock);
}
@@ -3903,38 +3870,28 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- devl_lock(devlink);
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw)) {
- devl_unlock(devlink);
- /* Failure means no eswitch => not possible to change encap */
+ if (!mlx5_esw_allowed(esw))
return true;
- }
down_write(&esw->mode_lock);
if (esw->mode != MLX5_ESWITCH_LEGACY &&
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
up_write(&esw->mode_lock);
- devl_unlock(devlink);
return false;
}
esw->offloads.num_block_encap++;
up_write(&esw->mode_lock);
- devl_unlock(devlink);
return true;
}
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch *esw;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
- esw = mlx5_devlink_eswitch_get(devlink);
- if (IS_ERR(esw))
+ if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
@@ -4410,3 +4367,172 @@ mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handl
return mlx5_modify_rule_destination(rule, &new_dest, &old_dest);
}
+
+#ifdef CONFIG_XFRM_OFFLOAD
+int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = 0;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPSec crypto");
+ return -EOPNOTSUPP;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ *is_enabled = vport->info.ipsec_crypto_enabled;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ err = mlx5_esw_ipsec_vf_crypto_offload_supported(esw->dev, vport_num);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support IPsec crypto");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto unlock;
+ }
+
+ if (vport->info.ipsec_crypto_enabled == enable)
+ goto unlock;
+
+ if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = mlx5_esw_ipsec_vf_crypto_offload_set(esw, vport, enable);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set IPsec crypto");
+ goto unlock;
+ }
+
+ vport->info.ipsec_crypto_enabled = enable;
+ if (enable)
+ esw->enabled_ipsec_vf_count++;
+ else
+ esw->enabled_ipsec_vf_count--;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ int err = 0;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet");
+ return -EOPNOTSUPP;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ *is_enabled = vport->info.ipsec_packet_enabled;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
+ bool enable,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+ int err;
+
+ esw = mlx5_devlink_eswitch_get(port->devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+ err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support IPsec packet mode");
+ return err;
+ }
+
+ vport = mlx5_devlink_port_vport_get(port);
+ mutex_lock(&esw->state_lock);
+ if (!vport->enabled) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+ goto unlock;
+ }
+
+ if (vport->info.ipsec_packet_enabled == enable)
+ goto unlock;
+
+ if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set IPsec packet mode");
+ goto unlock;
+ }
+
+ vport->info.ipsec_packet_enabled = enable;
+ if (enable)
+ esw->enabled_ipsec_vf_count++;
+ else
+ esw->enabled_ipsec_vf_count--;
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+#endif /* CONFIG_XFRM_OFFLOAD */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 377372f0578a..aa29f09e8356 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -32,6 +32,7 @@
#include <linux/clocksource.h>
#include <linux/highmem.h>
+#include <linux/log2.h>
#include <linux/ptp_clock_kernel.h>
#include <rdma/mlx5-abi.h>
#include "lib/eq.h"
@@ -39,10 +40,6 @@
#include "clock.h"
enum {
- MLX5_CYCLES_SHIFT = 31
-};
-
-enum {
MLX5_PIN_MODE_IN = 0x0,
MLX5_PIN_MODE_OUT = 0x1,
};
@@ -93,6 +90,31 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
}
+static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz)
+{
+ /* Optimal shift constant leads to corrections above just 1 scaled ppm.
+ *
+ * Two sets of equations are needed to derive the optimal shift
+ * constant for the cyclecounter.
+ *
+ * dev_freq_khz * 1000 / 2^shift_constant = 1 scaled_ppm
+ * ppb = scaled_ppm * 1000 / 2^16
+ *
+ * Using the two equations together
+ *
+ * dev_freq_khz * 1000 / 1 scaled_ppm = 2^shift_constant
+ * dev_freq_khz * 2^16 / 1 ppb = 2^shift_constant
+ * dev_freq_khz = 2^(shift_constant - 16)
+ *
+ * then yields
+ *
+ * shift_constant = ilog2(dev_freq_khz) + 16
+ */
+
+ return min(ilog2(dev_freq_khz) + 16,
+ ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz));
+}
+
static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
@@ -909,7 +931,7 @@ static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
timer->cycles.read = read_internal_timer;
- timer->cycles.shift = MLX5_CYCLES_SHIFT;
+ timer->cycles.shift = mlx5_ptp_shift_constant(dev_freq);
timer->cycles.mult = clocksource_khz2mult(dev_freq,
timer->cycles.shift);
timer->nominal_c_mult = timer->cycles.mult;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 70735068cf29..0fd290d776ff 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -405,7 +405,8 @@ mlxsw_hwmon_module_temp_label_show(struct device *dev,
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
return sprintf(buf, "front panel %03u\n",
- mlxsw_hwmon_attr->type_index);
+ mlxsw_hwmon_attr->type_index + 1 -
+ mlxsw_hwmon_attr->mlxsw_hwmon_dev->sensor_count);
}
static ssize_t
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 41298835a11e..d23f293e285c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -48,6 +48,7 @@
#define MLXSW_I2C_MBOX_SIZE_BITS 12
#define MLXSW_I2C_ADDR_BUF_SIZE 4
#define MLXSW_I2C_BLK_DEF 32
+#define MLXSW_I2C_BLK_MAX 100
#define MLXSW_I2C_RETRY 5
#define MLXSW_I2C_TIMEOUT_MSECS 5000
#define MLXSW_I2C_MAX_DATA_SIZE 256
@@ -444,7 +445,7 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
} else {
/* No input mailbox is case of initialization query command. */
reg_size = MLXSW_I2C_MAX_DATA_SIZE;
- num = reg_size / mlxsw_i2c->block_size;
+ num = DIV_ROUND_UP(reg_size, mlxsw_i2c->block_size);
if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
dev_err(&client->dev, "Could not acquire lock");
@@ -653,7 +654,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client)
return -EOPNOTSUPP;
}
- mlxsw_i2c->block_size = max_t(u16, MLXSW_I2C_BLK_DEF,
+ mlxsw_i2c->block_size = min_t(u16, MLXSW_I2C_BLK_MAX,
min_t(u16, quirks->max_read_len,
quirks->max_write_len));
} else {
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 5eb50b265c0b..6351a2dc13bc 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -5239,13 +5239,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disable ASPM L1 as that cause random device stop working
* problems as well as full system hangs for some PCIe devices users.
- * Chips from RTL8168h partially have issues with L1.2, but seem
- * to work fine with L1 and L1.1.
*/
if (rtl_aspm_is_safe(tp))
rc = 0;
- else if (tp->mac_version >= RTL_GIGA_MAC_VER_46)
- rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
else
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
tp->aspm_manageable = !rc;
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 3b8780c76b6e..c3e2b4a21d10 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -1219,6 +1219,71 @@ fail:
return rc;
}
+/**
+ * efx_mae_allocate_pedit_mac() - allocate pedit MAC address in HW.
+ * @efx: NIC we're installing a pedit MAC address on
+ * @ped: pedit MAC action to be installed
+ *
+ * Attempts to install @ped in HW and populates its id with an index of this
+ * entry in the firmware MAC address table on success.
+ *
+ * Return: negative value on error, 0 in success.
+ */
+int efx_mae_allocate_pedit_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MAC_ADDR_ALLOC_IN_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR_LEN !=
+ sizeof(ped->h_addr));
+ memcpy(MCDI_PTR(inbuf, MAE_MAC_ADDR_ALLOC_IN_MAC_ADDR), ped->h_addr,
+ sizeof(ped->h_addr));
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MAC_ADDR_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ ped->fw_id = MCDI_DWORD(outbuf, MAE_MAC_ADDR_ALLOC_OUT_MAC_ID);
+ return 0;
+}
+
+/**
+ * efx_mae_free_pedit_mac() - free pedit MAC address in HW.
+ * @efx: NIC we're installing a pedit MAC address on
+ * @ped: pedit MAC action that needs to be freed
+ *
+ * Frees @ped in HW, check that firmware did not free a different one and clears
+ * the id (which denotes the index of the entry in the MAC address table).
+ */
+void efx_mae_free_pedit_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MAC_ADDR_FREE_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MAC_ADDR_FREE_IN_LEN(1));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, MAE_MAC_ADDR_FREE_IN_MAC_ID, ped->fw_id);
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MAC_ADDR_FREE, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (rc || outlen < sizeof(outbuf))
+ return;
+ /* FW freed a different ID than we asked for, should also never happen.
+ * Warn because it means we've now got a different idea to the FW of
+ * what MAC addresses exist, which could cause mayhem later.
+ */
+ if (WARN_ON(MCDI_DWORD(outbuf, MAE_MAC_ADDR_FREE_OUT_FREED_MAC_ID) != ped->fw_id))
+ return;
+ /* We're probably about to free @ped, but let's just make sure its
+ * fw_id is blatted so that it won't look valid if it leaks out.
+ */
+ ped->fw_id = MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL;
+}
+
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
@@ -1226,15 +1291,27 @@ int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
size_t outlen;
int rc;
- MCDI_POPULATE_DWORD_3(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS,
+ MCDI_POPULATE_DWORD_4(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS,
MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH, act->vlan_push,
MAE_ACTION_SET_ALLOC_IN_VLAN_POP, act->vlan_pop,
- MAE_ACTION_SET_ALLOC_IN_DECAP, act->decap);
+ MAE_ACTION_SET_ALLOC_IN_DECAP, act->decap,
+ MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL,
+ act->do_ttl_dec);
+
+ if (act->src_mac)
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
+ act->src_mac->fw_id);
+ else
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
+
+ if (act->dst_mac)
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
+ act->dst_mac->fw_id);
+ else
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
+ MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
- MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_SRC_MAC_ID,
- MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
- MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_DST_MAC_ID,
- MC_CMD_MAE_MAC_ADDR_ALLOC_OUT_MAC_ID_NULL);
if (act->count && !WARN_ON(!act->count->cnt))
MCDI_SET_DWORD(inbuf, MAE_ACTION_SET_ALLOC_IN_COUNTER_ID,
act->count->cnt->fw_id);
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index e88e80574f15..8df30bc4f3ba 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -103,6 +103,10 @@ int efx_mae_update_encap_md(struct efx_nic *efx,
int efx_mae_free_encap_md(struct efx_nic *efx,
struct efx_tc_encap_action *encap);
+int efx_mae_allocate_pedit_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped);
+void efx_mae_free_pedit_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped);
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act);
int efx_mae_free_action_set(struct efx_nic *efx, u32 fw_id);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 3eab1802f6b0..f54200f03e15 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1359,7 +1359,9 @@ static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
goto fail;
rc = efx_ptp_insert_eth_multicast_filter(efx);
- if (rc < 0)
+
+ /* Not all firmware variants support this filter */
+ if (rc < 0 && rc != -EPROTONOSUPPORT)
goto fail;
}
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 039180c61c83..047322b04d4f 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -31,6 +31,9 @@ enum efx_encap_type efx_tc_indr_netdev_type(struct net_device *net_dev)
return EFX_ENCAP_TYPE_NONE;
}
+#define EFX_TC_HDR_TYPE_TTL_MASK ((u32)0xff)
+/* Hoplimit is stored in the most significant byte in the pedit ipv6 header action */
+#define EFX_TC_HDR_TYPE_HLIMIT_MASK ~((u32)0xff000000)
#define EFX_EFV_PF NULL
/* Look up the representor information (efv) for a device.
* May return NULL for the PF (us), or an error pointer for a device that
@@ -86,6 +89,12 @@ s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv)
return mport;
}
+static const struct rhashtable_params efx_tc_mac_ht_params = {
+ .key_len = offsetofend(struct efx_tc_mac_pedit_action, h_addr),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_mac_pedit_action, linkage),
+};
+
static const struct rhashtable_params efx_tc_encap_match_ht_params = {
.key_len = offsetof(struct efx_tc_encap_match, linkage),
.key_offset = 0,
@@ -110,6 +119,56 @@ static const struct rhashtable_params efx_tc_recirc_ht_params = {
.head_offset = offsetof(struct efx_tc_recirc_id, linkage),
};
+static struct efx_tc_mac_pedit_action *efx_tc_flower_get_mac(struct efx_nic *efx,
+ unsigned char h_addr[ETH_ALEN],
+ struct netlink_ext_ack *extack)
+{
+ struct efx_tc_mac_pedit_action *ped, *old;
+ int rc;
+
+ ped = kzalloc(sizeof(*ped), GFP_USER);
+ if (!ped)
+ return ERR_PTR(-ENOMEM);
+ memcpy(ped->h_addr, h_addr, ETH_ALEN);
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->mac_ht,
+ &ped->linkage,
+ efx_tc_mac_ht_params);
+ if (old) {
+ /* don't need our new entry */
+ kfree(ped);
+ if (!refcount_inc_not_zero(&old->ref))
+ return ERR_PTR(-EAGAIN);
+ /* existing entry found, ref taken */
+ return old;
+ }
+
+ rc = efx_mae_allocate_pedit_mac(efx, ped);
+ if (rc < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to store pedit MAC address in hw");
+ goto out_remove;
+ }
+
+ /* ref and return */
+ refcount_set(&ped->ref, 1);
+ return ped;
+out_remove:
+ rhashtable_remove_fast(&efx->tc->mac_ht, &ped->linkage,
+ efx_tc_mac_ht_params);
+ kfree(ped);
+ return ERR_PTR(rc);
+}
+
+static void efx_tc_flower_put_mac(struct efx_nic *efx,
+ struct efx_tc_mac_pedit_action *ped)
+{
+ if (!refcount_dec_and_test(&ped->ref))
+ return; /* still in use */
+ rhashtable_remove_fast(&efx->tc->mac_ht, &ped->linkage,
+ efx_tc_mac_ht_params);
+ efx_mae_free_pedit_mac(efx, ped);
+ kfree(ped);
+}
+
static void efx_tc_free_action_set(struct efx_nic *efx,
struct efx_tc_action_set *act, bool in_hw)
{
@@ -135,6 +194,10 @@ static void efx_tc_free_action_set(struct efx_nic *efx,
list_del(&act->encap_user);
efx_tc_flower_release_encap_md(efx, act->encap_md);
}
+ if (act->src_mac)
+ efx_tc_flower_put_mac(efx, act->src_mac);
+ if (act->dst_mac)
+ efx_tc_flower_put_mac(efx, act->dst_mac);
kfree(act);
}
@@ -697,6 +760,8 @@ static const char *efx_tc_encap_type_name(enum efx_encap_type typ)
/* For details of action order constraints refer to SF-123102-TC-1§12.6.1 */
enum efx_tc_action_order {
EFX_TC_AO_DECAP,
+ EFX_TC_AO_DEC_TTL,
+ EFX_TC_AO_PEDIT_MAC_ADDRS,
EFX_TC_AO_VLAN_POP,
EFX_TC_AO_VLAN_PUSH,
EFX_TC_AO_COUNT,
@@ -711,6 +776,15 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
case EFX_TC_AO_DECAP:
if (act->decap)
return false;
+ /* PEDIT_MAC_ADDRS must not happen before DECAP, though it
+ * can wait until much later
+ */
+ if (act->dst_mac || act->src_mac)
+ return false;
+
+ /* Decrementing ttl must not happen before DECAP */
+ if (act->do_ttl_dec)
+ return false;
fallthrough;
case EFX_TC_AO_VLAN_POP:
if (act->vlan_pop >= 2)
@@ -730,12 +804,17 @@ static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act,
if (act->count)
return false;
fallthrough;
+ case EFX_TC_AO_PEDIT_MAC_ADDRS:
case EFX_TC_AO_ENCAP:
if (act->encap_md)
return false;
fallthrough;
case EFX_TC_AO_DELIVER:
return !act->deliver;
+ case EFX_TC_AO_DEC_TTL:
+ if (act->encap_md)
+ return false;
+ return !act->do_ttl_dec;
default:
/* Bad caller. Whatever they wanted to do, say they can't. */
WARN_ON_ONCE(1);
@@ -900,6 +979,375 @@ static void efx_tc_flower_release_lhs_actions(struct efx_nic *efx,
efx_tc_flower_put_counter_index(efx, act->count);
}
+/**
+ * struct efx_tc_mangler_state - accumulates 32-bit pedits into fields
+ *
+ * @dst_mac_32: dst_mac[0:3] has been populated
+ * @dst_mac_16: dst_mac[4:5] has been populated
+ * @src_mac_16: src_mac[0:1] has been populated
+ * @src_mac_32: src_mac[2:5] has been populated
+ * @dst_mac: h_dest field of ethhdr
+ * @src_mac: h_source field of ethhdr
+ *
+ * Since FLOW_ACTION_MANGLE comes in 32-bit chunks that do not
+ * necessarily equate to whole fields of the packet header, this
+ * structure is used to hold the cumulative effect of the partial
+ * field pedits that have been processed so far.
+ */
+struct efx_tc_mangler_state {
+ u8 dst_mac_32:1; /* eth->h_dest[0:3] */
+ u8 dst_mac_16:1; /* eth->h_dest[4:5] */
+ u8 src_mac_16:1; /* eth->h_source[0:1] */
+ u8 src_mac_32:1; /* eth->h_source[2:5] */
+ unsigned char dst_mac[ETH_ALEN];
+ unsigned char src_mac[ETH_ALEN];
+};
+
+/** efx_tc_complete_mac_mangle() - pull complete field pedits out of @mung
+ * @efx: NIC we're installing a flow rule on
+ * @act: action set (cursor) to update
+ * @mung: accumulated partial mangles
+ * @extack: netlink extended ack for reporting errors
+ *
+ * Check @mung to find any combinations of partial mangles that can be
+ * combined into a complete packet field edit, add that edit to @act,
+ * and consume the partial mangles from @mung.
+ */
+
+static int efx_tc_complete_mac_mangle(struct efx_nic *efx,
+ struct efx_tc_action_set *act,
+ struct efx_tc_mangler_state *mung,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_tc_mac_pedit_action *ped;
+
+ if (mung->dst_mac_32 && mung->dst_mac_16) {
+ ped = efx_tc_flower_get_mac(efx, mung->dst_mac, extack);
+ if (IS_ERR(ped))
+ return PTR_ERR(ped);
+
+ /* Check that we have not already populated dst_mac */
+ if (act->dst_mac)
+ efx_tc_flower_put_mac(efx, act->dst_mac);
+
+ act->dst_mac = ped;
+
+ /* consume the incomplete state */
+ mung->dst_mac_32 = 0;
+ mung->dst_mac_16 = 0;
+ }
+ if (mung->src_mac_16 && mung->src_mac_32) {
+ ped = efx_tc_flower_get_mac(efx, mung->src_mac, extack);
+ if (IS_ERR(ped))
+ return PTR_ERR(ped);
+
+ /* Check that we have not already populated src_mac */
+ if (act->src_mac)
+ efx_tc_flower_put_mac(efx, act->src_mac);
+
+ act->src_mac = ped;
+
+ /* consume the incomplete state */
+ mung->src_mac_32 = 0;
+ mung->src_mac_16 = 0;
+ }
+ return 0;
+}
+
+static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
+ const struct flow_action_entry *fa,
+ struct netlink_ext_ack *extack)
+{
+ switch (fa->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ switch (fa->mangle.offset) {
+ case offsetof(struct iphdr, ttl):
+ /* check that pedit applies to ttl only */
+ if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK)
+ break;
+
+ /* Adding 0xff is equivalent to decrementing the ttl.
+ * Other added values are not supported.
+ */
+ if ((fa->mangle.val & EFX_TC_HDR_TYPE_TTL_MASK) != U8_MAX)
+ break;
+
+ /* check that we do not decrement ttl twice */
+ if (!efx_tc_flower_action_order_ok(act,
+ EFX_TC_AO_DEC_TTL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+ return -EOPNOTSUPP;
+ }
+ act->do_ttl_dec = 1;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ switch (fa->mangle.offset) {
+ case round_down(offsetof(struct ipv6hdr, hop_limit), 4):
+ /* check that pedit applies to hoplimit only */
+ if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK)
+ break;
+
+ /* Adding 0xff is equivalent to decrementing the hoplimit.
+ * Other added values are not supported.
+ */
+ if ((fa->mangle.val >> 24) != U8_MAX)
+ break;
+
+ /* check that we do not decrement hoplimit twice */
+ if (!efx_tc_flower_action_order_ok(act,
+ EFX_TC_AO_DEC_TTL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+ return -EOPNOTSUPP;
+ }
+ act->do_ttl_dec = 1;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: ttl add action type %x %x %x/%x",
+ fa->mangle.htype, fa->mangle.offset,
+ fa->mangle.val, fa->mangle.mask);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * efx_tc_mangle() - handle a single 32-bit (or less) pedit
+ * @efx: NIC we're installing a flow rule on
+ * @act: action set (cursor) to update
+ * @fa: FLOW_ACTION_MANGLE action metadata
+ * @mung: accumulator for partial mangles
+ * @extack: netlink extended ack for reporting errors
+ * @match: original match used along with the mangle action
+ *
+ * Identify the fields written by a FLOW_ACTION_MANGLE, and record
+ * the partial mangle state in @mung. If this mangle completes an
+ * earlier partial mangle, consume and apply to @act by calling
+ * efx_tc_complete_mac_mangle().
+ */
+
+static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
+ const struct flow_action_entry *fa,
+ struct efx_tc_mangler_state *mung,
+ struct netlink_ext_ack *extack,
+ struct efx_tc_match *match)
+{
+ __le32 mac32;
+ __le16 mac16;
+ u8 tr_ttl;
+
+ switch (fa->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ BUILD_BUG_ON(offsetof(struct ethhdr, h_dest) != 0);
+ BUILD_BUG_ON(offsetof(struct ethhdr, h_source) != 6);
+ if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_PEDIT_MAC_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Pedit mangle mac action violates action order");
+ return -EOPNOTSUPP;
+ }
+ switch (fa->mangle.offset) {
+ case 0:
+ if (fa->mangle.mask) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) of eth.dst32 mangle",
+ fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+ /* Ethernet address is little-endian */
+ mac32 = cpu_to_le32(fa->mangle.val);
+ memcpy(mung->dst_mac, &mac32, sizeof(mac32));
+ mung->dst_mac_32 = 1;
+ return efx_tc_complete_mac_mangle(efx, act, mung, extack);
+ case 4:
+ if (fa->mangle.mask == 0xffff) {
+ mac16 = cpu_to_le16(fa->mangle.val >> 16);
+ memcpy(mung->src_mac, &mac16, sizeof(mac16));
+ mung->src_mac_16 = 1;
+ } else if (fa->mangle.mask == 0xffff0000) {
+ mac16 = cpu_to_le16((u16)fa->mangle.val);
+ memcpy(mung->dst_mac + 4, &mac16, sizeof(mac16));
+ mung->dst_mac_16 = 1;
+ } else {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) of eth+4 mangle is not high or low 16b",
+ fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+ return efx_tc_complete_mac_mangle(efx, act, mung, extack);
+ case 8:
+ if (fa->mangle.mask) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) of eth.src32 mangle",
+ fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+ mac32 = cpu_to_le32(fa->mangle.val);
+ memcpy(mung->src_mac + 2, &mac32, sizeof(mac32));
+ mung->src_mac_32 = 1;
+ return efx_tc_complete_mac_mangle(efx, act, mung, extack);
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported: mangle eth+%u %x/%x",
+ fa->mangle.offset, fa->mangle.val, fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ switch (fa->mangle.offset) {
+ case offsetof(struct iphdr, ttl):
+ /* we currently only support pedit IP4 when it applies
+ * to TTL and then only when it can be achieved with a
+ * decrement ttl action
+ */
+
+ /* check that pedit applies to ttl only */
+ if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) out of range, only support mangle action on ipv4.ttl",
+ fa->mangle.mask);
+ return -EOPNOTSUPP;
+ }
+
+ /* we can only convert to a dec ttl when we have an
+ * exact match on the ttl field
+ */
+ if (match->mask.ip_ttl != U8_MAX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: only support mangle ipv4.ttl when we have an exact match on ttl, mask used for match (%#x)",
+ match->mask.ip_ttl);
+ return -EOPNOTSUPP;
+ }
+
+ /* check that we don't try to decrement 0, which equates
+ * to setting the ttl to 0xff
+ */
+ if (match->value.ip_ttl == 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported: we cannot decrement ttl past 0");
+ return -EOPNOTSUPP;
+ }
+
+ /* check that we do not decrement ttl twice */
+ if (!efx_tc_flower_action_order_ok(act,
+ EFX_TC_AO_DEC_TTL)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported: multiple dec ttl");
+ return -EOPNOTSUPP;
+ }
+
+ /* check pedit can be achieved with decrement action */
+ tr_ttl = match->value.ip_ttl - 1;
+ if ((fa->mangle.val & EFX_TC_HDR_TYPE_TTL_MASK) == tr_ttl) {
+ act->do_ttl_dec = 1;
+ return 0;
+ }
+
+ fallthrough;
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: only support mangle on the ttl field (offset is %u)",
+ fa->mangle.offset);
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ switch (fa->mangle.offset) {
+ case round_down(offsetof(struct ipv6hdr, hop_limit), 4):
+ /* we currently only support pedit IP6 when it applies
+ * to the hoplimit and then only when it can be achieved
+ * with a decrement hoplimit action
+ */
+
+ /* check that pedit applies to ttl only */
+ if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
+ fa->mangle.mask);
+
+ return -EOPNOTSUPP;
+ }
+
+ /* we can only convert to a dec ttl when we have an
+ * exact match on the ttl field
+ */
+ if (match->mask.ip_ttl != U8_MAX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: only support mangle ipv6.hop_limit when we have an exact match on ttl, mask used for match (%#x)",
+ match->mask.ip_ttl);
+ return -EOPNOTSUPP;
+ }
+
+ /* check that we don't try to decrement 0, which equates
+ * to setting the ttl to 0xff
+ */
+ if (match->value.ip_ttl == 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported: we cannot decrement hop_limit past 0");
+ return -EOPNOTSUPP;
+ }
+
+ /* check that we do not decrement hoplimit twice */
+ if (!efx_tc_flower_action_order_ok(act,
+ EFX_TC_AO_DEC_TTL)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported: multiple dec ttl");
+ return -EOPNOTSUPP;
+ }
+
+ /* check pedit can be achieved with decrement action */
+ tr_ttl = match->value.ip_ttl - 1;
+ if ((fa->mangle.val >> 24) == tr_ttl) {
+ act->do_ttl_dec = 1;
+ return 0;
+ }
+
+ fallthrough;
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported: only support mangle on the hop_limit field");
+ return -EOPNOTSUPP;
+ }
+ default:
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled mangle htype %u for action rule",
+ fa->mangle.htype);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/**
+ * efx_tc_incomplete_mangle() - check for leftover partial pedits
+ * @mung: accumulator for partial mangles
+ * @extack: netlink extended ack for reporting errors
+ *
+ * Since the MAE can only overwrite whole fields, any partial
+ * field mangle left over on reaching packet delivery (mirred or
+ * end of TC actions) cannot be offloaded. Check for any such
+ * and reject them with -%EOPNOTSUPP.
+ */
+
+static int efx_tc_incomplete_mangle(struct efx_tc_mangler_state *mung,
+ struct netlink_ext_ack *extack)
+{
+ if (mung->dst_mac_32 || mung->dst_mac_16) {
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete pedit of destination MAC address");
+ return -EOPNOTSUPP;
+ }
+ if (mung->src_mac_16 || mung->src_mac_32) {
+ NL_SET_ERR_MSG_MOD(extack, "Incomplete pedit of source MAC address");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
struct net_device *net_dev,
struct flow_cls_offload *tc)
@@ -1295,6 +1743,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
struct netlink_ext_ack *extack = tc->common.extack;
const struct ip_tunnel_info *encap_info = NULL;
struct efx_tc_flow_rule *rule = NULL, *old;
+ struct efx_tc_mangler_state mung = {};
struct efx_tc_action_set *act = NULL;
const struct flow_action_entry *fa;
struct efx_rep *from_efv, *to_efv;
@@ -1631,6 +2080,16 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
act->vlan_proto[act->vlan_push] = fa->vlan.proto;
act->vlan_push++;
break;
+ case FLOW_ACTION_ADD:
+ rc = efx_tc_pedit_add(efx, act, fa, extack);
+ if (rc < 0)
+ goto release;
+ break;
+ case FLOW_ACTION_MANGLE:
+ rc = efx_tc_mangle(efx, act, fa, &mung, extack, &match);
+ if (rc < 0)
+ goto release;
+ break;
case FLOW_ACTION_TUNNEL_ENCAP:
if (encap_info) {
/* Can't specify encap multiple times.
@@ -1670,6 +2129,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
}
}
+ rc = efx_tc_incomplete_mangle(&mung, extack);
+ if (rc < 0)
+ goto release;
if (act) {
/* Not shot/redirected, so deliver to default dest */
if (from_efv == EFX_EFV_PF)
@@ -2156,6 +2618,14 @@ static void efx_tc_lhs_free(void *ptr, void *arg)
kfree(rule);
}
+static void efx_tc_mac_free(void *ptr, void *__unused)
+{
+ struct efx_tc_mac_pedit_action *ped = ptr;
+
+ WARN_ON(refcount_read(&ped->ref));
+ kfree(ped);
+}
+
static void efx_tc_flow_free(void *ptr, void *arg)
{
struct efx_tc_flow_rule *rule = ptr;
@@ -2196,6 +2666,9 @@ int efx_init_struct_tc(struct efx_nic *efx)
rc = efx_tc_init_counters(efx);
if (rc < 0)
goto fail_counters;
+ rc = rhashtable_init(&efx->tc->mac_ht, &efx_tc_mac_ht_params);
+ if (rc < 0)
+ goto fail_mac_ht;
rc = rhashtable_init(&efx->tc->encap_match_ht, &efx_tc_encap_match_ht_params);
if (rc < 0)
goto fail_encap_match_ht;
@@ -2233,6 +2706,8 @@ fail_lhs_rule_ht:
fail_match_action_ht:
rhashtable_destroy(&efx->tc->encap_match_ht);
fail_encap_match_ht:
+ rhashtable_destroy(&efx->tc->mac_ht);
+fail_mac_ht:
efx_tc_destroy_counters(efx);
fail_counters:
efx_tc_destroy_encap_actions(efx);
@@ -2268,6 +2743,7 @@ void efx_fini_struct_tc(struct efx_nic *efx)
rhashtable_free_and_destroy(&efx->tc->recirc_ht, efx_tc_recirc_free, efx);
WARN_ON(!ida_is_empty(&efx->tc->recirc_ida));
ida_destroy(&efx->tc->recirc_ida);
+ rhashtable_free_and_destroy(&efx->tc->mac_ht, efx_tc_mac_free, NULL);
efx_tc_fini_counters(efx);
efx_tc_fini_encap_actions(efx);
mutex_unlock(&efx->tc->mutex);
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
index 40d2c803fca8..4dd2c378fd9f 100644
--- a/drivers/net/ethernet/sfc/tc.h
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -18,6 +18,23 @@
#define IS_ALL_ONES(v) (!(typeof (v))~(v))
+/**
+ * struct efx_tc_mac_pedit_action - mac pedit action fields
+ *
+ * @h_addr: mac address field of ethernet header
+ * @linkage: rhashtable reference
+ * @ref: reference count
+ * @fw_id: index of this entry in firmware MAC address table
+ *
+ * MAC address edits are indirected through a table in the hardware
+ */
+struct efx_tc_mac_pedit_action {
+ u8 h_addr[ETH_ALEN];
+ struct rhash_head linkage;
+ refcount_t ref;
+ u32 fw_id; /* index of this entry in firmware MAC address table */
+};
+
static inline bool efx_ipv6_addr_all_ones(struct in6_addr *addr)
{
return !memchr_inv(addr, 0xff, sizeof(*addr));
@@ -25,20 +42,45 @@ static inline bool efx_ipv6_addr_all_ones(struct in6_addr *addr)
struct efx_tc_encap_action; /* see tc_encap_actions.h */
+/**
+ * struct efx_tc_action_set - collection of tc action fields
+ *
+ * @vlan_push: the number of vlan headers to push
+ * @vlan_pop: the number of vlan headers to pop
+ * @decap: used to indicate a tunnel header decapsulation should take place
+ * @do_ttl_dec: used to indicate IP TTL / Hop Limit should be decremented
+ * @deliver: used to indicate a deliver action should take place
+ * @vlan_tci: tci fields for vlan push actions
+ * @vlan_proto: ethernet types for vlan push actions
+ * @count: counter mapping
+ * @encap_md: encap entry in tc_encap_ht table
+ * @encap_user: linked list of encap users (encap_md->users)
+ * @user: owning action-set-list. Only populated if @encap_md is; used by efx_tc_update_encap() fallback handling
+ * @count_user: linked list of counter users (counter->users)
+ * @dest_mport: destination mport
+ * @src_mac: source mac entry in tc_mac_ht table
+ * @dst_mac: destination mac entry in tc_mac_ht table
+ * @fw_id: index of this entry in firmware actions table
+ * @list: linked list of tc actions
+ *
+ */
struct efx_tc_action_set {
u16 vlan_push:2;
u16 vlan_pop:2;
u16 decap:1;
+ u16 do_ttl_dec:1;
u16 deliver:1;
- __be16 vlan_tci[2]; /* TCIs for vlan_push */
- __be16 vlan_proto[2]; /* Ethertypes for vlan_push */
+ __be16 vlan_tci[2];
+ __be16 vlan_proto[2];
struct efx_tc_counter_index *count;
- struct efx_tc_encap_action *encap_md; /* entry in tc_encap_ht table */
- struct list_head encap_user; /* entry on encap_md->users list */
- struct efx_tc_action_set_list *user; /* Only populated if encap_md */
- struct list_head count_user; /* entry on counter->users list, if encap */
+ struct efx_tc_encap_action *encap_md;
+ struct list_head encap_user;
+ struct efx_tc_action_set_list *user;
+ struct list_head count_user;
u32 dest_mport;
- u32 fw_id; /* index of this entry in firmware actions table */
+ struct efx_tc_mac_pedit_action *src_mac;
+ struct efx_tc_mac_pedit_action *dst_mac;
+ u32 fw_id;
struct list_head list;
};
@@ -220,6 +262,7 @@ struct efx_tc_table_ct { /* TABLE_ID_CONNTRACK_TABLE */
* @counter_ht: Hashtable of TC counters (FW IDs and counter values)
* @counter_id_ht: Hashtable mapping TC counter cookies to counters
* @encap_ht: Hashtable of TC encap actions
+ * @mac_ht: Hashtable of MAC address entries (for pedits)
* @encap_match_ht: Hashtable of TC encap matches
* @match_action_ht: Hashtable of TC match-action rules
* @lhs_rule_ht: Hashtable of TC left-hand (act ct & goto chain) rules
@@ -257,6 +300,7 @@ struct efx_tc_state {
struct rhashtable counter_ht;
struct rhashtable counter_id_ht;
struct rhashtable encap_ht;
+ struct rhashtable mac_ht;
struct rhashtable encap_match_ht;
struct rhashtable match_action_ht;
struct rhashtable lhs_rule_ht;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 535856fffaea..df34e34cc14f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -70,7 +70,7 @@ static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = GPR_ENET_QOS_INTF_SEL_MII;
break;
@@ -87,7 +87,7 @@ static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
pr_debug("imx dwmac doesn't support %d interface\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
return -EINVAL;
}
@@ -110,7 +110,7 @@ static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
struct imx_priv_data *dwmac = plat_dat->bsp_priv;
int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
break;
@@ -125,7 +125,7 @@ static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
dev_dbg(dwmac->dev, "imx dwmac doesn't support %d interface\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
return -EINVAL;
}
@@ -192,8 +192,8 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod
plat_dat = dwmac->plat_dat;
if (dwmac->ops->mac_rgmii_txclk_auto_adj ||
- (plat_dat->interface == PHY_INTERFACE_MODE_RMII) ||
- (plat_dat->interface == PHY_INTERFACE_MODE_MII))
+ (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) ||
+ (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII))
return;
switch (speed) {
@@ -260,7 +260,7 @@ static int imx_dwmac_mx93_reset(void *priv, void __iomem *ioaddr)
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
- if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
+ if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
usleep_range(100, 200);
writel(RMII_RESET_SPEED, ioaddr + MAC_CTRL_REG);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index e22ef0d6bc73..0a20c3d24722 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -89,7 +89,7 @@ static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII);
@@ -118,7 +118,7 @@ static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
@@ -130,13 +130,13 @@ static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
{
struct ingenic_mac *mac = plat_dat->bsp_priv;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
@@ -149,14 +149,14 @@ static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
@@ -169,7 +169,7 @@ static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) |
FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
@@ -177,7 +177,7 @@ static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
@@ -190,7 +190,7 @@ static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
struct ingenic_mac *mac = plat_dat->bsp_priv;
unsigned int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) |
FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) |
@@ -220,7 +220,7 @@ static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
- dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->mac_interface);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index a25c187d3185..2cd6fce5c993 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -117,7 +117,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
}
plat->phy_interface = phy_mode;
- plat->interface = PHY_INTERFACE_MODE_GMII;
+ plat->mac_interface = PHY_INTERFACE_MODE_GMII;
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index 18e84ba693a6..d0aa674ce705 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -50,9 +50,9 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
+ if (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
- } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
+ } else if (plat_dat->mac_interface == PHY_INTERFACE_MODE_RMII) {
ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
} else {
dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index 7580077383c0..cd796ec04132 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -587,7 +587,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
{
int i;
- plat->interface = priv_plat->phy_mode;
+ plat->mac_interface = priv_plat->phy_mode;
if (priv_plat->mac_wol)
plat->flags |= STMMAC_FLAG_USE_PHY_WOL;
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 7db176e8691f..9bf102bbc6a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -236,7 +236,7 @@ static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac)
struct net_device *ndev = dev_get_drvdata(dwmac->dev);
struct stmmac_priv *priv = netdev_priv(ndev);
- return priv->plat->interface;
+ return priv->plat->mac_interface;
}
static void socfpga_sgmii_config(struct socfpga_dwmac *dwmac, bool enable)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 892612564694..9289bb87c3e3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -60,7 +60,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
unsigned int mode;
int err;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_RMII:
mode = STARFIVE_DWMAC_PHY_INFT_RMII;
break;
@@ -72,7 +72,7 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
default:
dev_err(dwmac->dev, "unsupported interface %d\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 3a09085819dc..26ea8c687881 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -171,7 +171,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
clk_rate = clk_get_rate(dwmac->clk_eth_ck);
dwmac->enable_eth_ck = false;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk)
dwmac->enable_eth_ck = true;
@@ -210,7 +210,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
pr_debug("SYSCFG init : Do not manage %d interface\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
/* Do not manage others interfaces */
return -EINVAL;
}
@@ -230,7 +230,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
u32 reg = dwmac->mode_reg;
int val;
- switch (plat_dat->interface) {
+ switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = SYSCFG_MCU_ETH_SEL_MII;
pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
@@ -241,7 +241,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
break;
default:
pr_debug("SYSCFG init : Do not manage %d interface\n",
- plat_dat->interface);
+ plat_dat->mac_interface);
/* Do not manage others interfaces */
return -EINVAL;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index c23420863a8d..01e77368eef1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1016,7 +1016,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN;
- switch (plat->interface) {
+ switch (plat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -1031,7 +1031,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev,
break;
default:
dev_err(dev, "Unsupported interface mode: %s",
- phy_modes(plat->interface));
+ phy_modes(plat->mac_interface));
return -EINVAL;
}
@@ -1231,7 +1231,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
- plat_dat->interface = interface;
+ plat_dat->mac_interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->flags |= STMMAC_FLAG_HAS_SUN8I;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 33ca5c50bdcd..9a3182b9e767 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1119,7 +1119,7 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
*/
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
- int interface = priv->plat->interface;
+ int interface = priv->plat->mac_interface;
if (priv->dma_cap.pcs) {
if ((interface == PHY_INTERFACE_MODE_RGMII) ||
@@ -1214,7 +1214,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
priv->phylink_config.ovr_an_inband =
mdio_bus_data->xpcs_an_inband;
- /* Set the platform/firmware specified interface mode */
+ /* Set the platform/firmware specified interface mode. Note, phylink
+ * deals with the PHY interface mode, not the MAC interface mode.
+ */
__set_bit(mode, priv->phylink_config.supported_interfaces);
/* If we have an xpcs, it defines which PHY interfaces are supported. */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index ff330423ee66..35f4b1484029 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -419,9 +419,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
return ERR_PTR(phy_mode);
plat->phy_interface = phy_mode;
- plat->interface = stmmac_of_get_mac_mode(np);
- if (plat->interface < 0)
- plat->interface = plat->phy_interface;
+ plat->mac_interface = stmmac_of_get_mac_mode(np);
+ if (plat->mac_interface < 0)
+ plat->mac_interface = plat->phy_interface;
/* Some wrapper drivers still rely on phy_node. Let's save it while
* they are not converted to phylink. */
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 63e510b6860f..88b5b1b47779 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -186,6 +186,7 @@ config CPMAC
config TI_ICSSG_PRUETH
tristate "TI Gigabit PRU Ethernet driver"
select PHYLIB
+ select TI_ICSS_IEP
depends on PRU_REMOTEPROC
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
help
@@ -196,4 +197,15 @@ config TI_ICSSG_PRUETH
to support the Ethernet operation. Currently, it supports Ethernet
with 1G and 100M link speed.
+config TI_ICSS_IEP
+ tristate "TI PRU ICSS IEP driver"
+ depends on TI_PRUSS
+ default TI_PRUSS
+ help
+ This driver enables support for the PRU-ICSS Industrial Ethernet
+ Peripheral within a PRU-ICSS subsystem present on various TI SoCs.
+
+ To compile this driver as a module, choose M here. The module
+ will be called icss_iep.
+
endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 9176d79c36e1..34fd7a716ba6 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -38,3 +38,4 @@ icssg-prueth-y := k3-cppi-desc-pool.o \
icssg/icssg_mii_cfg.o \
icssg/icssg_stats.o \
icssg/icssg_ethtool.o
+obj-$(CONFIG_TI_ICSS_IEP) += icssg/icss_iep.o
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
new file mode 100644
index 000000000000..4cf2a52e4378
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -0,0 +1,965 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
+ *
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/timekeeping.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+
+#include "icss_iep.h"
+
+#define IEP_MAX_DEF_INC 0xf
+#define IEP_MAX_COMPEN_INC 0xfff
+#define IEP_MAX_COMPEN_COUNT 0xffffff
+
+#define IEP_GLOBAL_CFG_CNT_ENABLE BIT(0)
+#define IEP_GLOBAL_CFG_DEFAULT_INC_MASK GENMASK(7, 4)
+#define IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT 4
+#define IEP_GLOBAL_CFG_COMPEN_INC_MASK GENMASK(19, 8)
+#define IEP_GLOBAL_CFG_COMPEN_INC_SHIFT 8
+
+#define IEP_GLOBAL_STATUS_CNT_OVF BIT(0)
+
+#define IEP_CMP_CFG_SHADOW_EN BIT(17)
+#define IEP_CMP_CFG_CMP0_RST_CNT_EN BIT(0)
+#define IEP_CMP_CFG_CMP_EN(cmp) (GENMASK(16, 1) & (1 << ((cmp) + 1)))
+
+#define IEP_CMP_STATUS(cmp) (1 << (cmp))
+
+#define IEP_SYNC_CTRL_SYNC_EN BIT(0)
+#define IEP_SYNC_CTRL_SYNC_N_EN(n) (GENMASK(2, 1) & (BIT(1) << (n)))
+
+#define IEP_MIN_CMP 0
+#define IEP_MAX_CMP 15
+
+#define ICSS_IEP_64BIT_COUNTER_SUPPORT BIT(0)
+#define ICSS_IEP_SLOW_COMPEN_REG_SUPPORT BIT(1)
+#define ICSS_IEP_SHADOW_MODE_SUPPORT BIT(2)
+
+#define LATCH_INDEX(ts_index) ((ts_index) + 6)
+#define IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(n) BIT(LATCH_INDEX(n))
+#define IEP_CAP_CFG_CAP_ASYNC_EN(n) BIT(LATCH_INDEX(n) + 10)
+
+enum {
+ ICSS_IEP_GLOBAL_CFG_REG,
+ ICSS_IEP_GLOBAL_STATUS_REG,
+ ICSS_IEP_COMPEN_REG,
+ ICSS_IEP_SLOW_COMPEN_REG,
+ ICSS_IEP_COUNT_REG0,
+ ICSS_IEP_COUNT_REG1,
+ ICSS_IEP_CAPTURE_CFG_REG,
+ ICSS_IEP_CAPTURE_STAT_REG,
+
+ ICSS_IEP_CAP6_RISE_REG0,
+ ICSS_IEP_CAP6_RISE_REG1,
+
+ ICSS_IEP_CAP7_RISE_REG0,
+ ICSS_IEP_CAP7_RISE_REG1,
+
+ ICSS_IEP_CMP_CFG_REG,
+ ICSS_IEP_CMP_STAT_REG,
+ ICSS_IEP_CMP0_REG0,
+ ICSS_IEP_CMP0_REG1,
+ ICSS_IEP_CMP1_REG0,
+ ICSS_IEP_CMP1_REG1,
+
+ ICSS_IEP_CMP8_REG0,
+ ICSS_IEP_CMP8_REG1,
+ ICSS_IEP_SYNC_CTRL_REG,
+ ICSS_IEP_SYNC0_STAT_REG,
+ ICSS_IEP_SYNC1_STAT_REG,
+ ICSS_IEP_SYNC_PWIDTH_REG,
+ ICSS_IEP_SYNC0_PERIOD_REG,
+ ICSS_IEP_SYNC1_DELAY_REG,
+ ICSS_IEP_SYNC_START_REG,
+ ICSS_IEP_MAX_REGS,
+};
+
+/**
+ * struct icss_iep_plat_data - Plat data to handle SoC variants
+ * @config: Regmap configuration data
+ * @reg_offs: register offsets to capture offset differences across SoCs
+ * @flags: Flags to represent IEP properties
+ */
+struct icss_iep_plat_data {
+ struct regmap_config *config;
+ u32 reg_offs[ICSS_IEP_MAX_REGS];
+ u32 flags;
+};
+
+struct icss_iep {
+ struct device *dev;
+ void __iomem *base;
+ const struct icss_iep_plat_data *plat_data;
+ struct regmap *map;
+ struct device_node *client_np;
+ unsigned long refclk_freq;
+ int clk_tick_time; /* one refclk tick time in ns */
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct mutex ptp_clk_mutex; /* PHC access serializer */
+ spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */
+ u32 def_inc;
+ s16 slow_cmp_inc;
+ u32 slow_cmp_count;
+ const struct icss_iep_clockops *ops;
+ void *clockops_data;
+ u32 cycle_time_ns;
+ u32 perout_enabled;
+ bool pps_enabled;
+ int cap_cmp_irq;
+ u64 period;
+ u32 latch_enable;
+};
+
+/**
+ * icss_iep_get_count_hi() - Get the upper 32 bit IEP counter
+ * @iep: Pointer to structure representing IEP.
+ *
+ * Return: upper 32 bit IEP counter
+ */
+int icss_iep_get_count_hi(struct icss_iep *iep)
+{
+ u32 val = 0;
+
+ if (iep && (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT))
+ val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_count_hi);
+
+/**
+ * icss_iep_get_count_low() - Get the lower 32 bit IEP counter
+ * @iep: Pointer to structure representing IEP.
+ *
+ * Return: lower 32 bit IEP counter
+ */
+int icss_iep_get_count_low(struct icss_iep *iep)
+{
+ u32 val = 0;
+
+ if (iep)
+ val = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_count_low);
+
+/**
+ * icss_iep_get_ptp_clock_idx() - Get PTP clock index using IEP driver
+ * @iep: Pointer to structure representing IEP.
+ *
+ * Return: PTP clock index, -1 if not registered
+ */
+int icss_iep_get_ptp_clock_idx(struct icss_iep *iep)
+{
+ if (!iep || !iep->ptp_clock)
+ return -1;
+ return ptp_clock_index(iep->ptp_clock);
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_ptp_clock_idx);
+
+static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
+{
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ writel(upper_32_bits(ns), iep->base +
+ iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
+ writel(upper_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+}
+
+static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
+
+/**
+ * icss_iep_settime() - Set time of the PTP clock using IEP driver
+ * @iep: Pointer to structure representing IEP.
+ * @ns: Time to be set in nanoseconds
+ *
+ * This API uses writel() instead of regmap_write() for write operations as
+ * regmap_write() is too slow and this API is time sensitive.
+ */
+static void icss_iep_settime(struct icss_iep *iep, u64 ns)
+{
+ unsigned long flags;
+
+ if (iep->ops && iep->ops->settime) {
+ iep->ops->settime(iep->clockops_data, ns);
+ return;
+ }
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+ if (iep->pps_enabled || iep->perout_enabled)
+ writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
+
+ icss_iep_set_counter(iep, ns);
+
+ if (iep->pps_enabled || iep->perout_enabled) {
+ icss_iep_update_to_next_boundary(iep, ns);
+ writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
+ iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
+ }
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+}
+
+/**
+ * icss_iep_gettime() - Get time of the PTP clock using IEP driver
+ * @iep: Pointer to structure representing IEP.
+ * @sts: Pointer to structure representing PTP system timestamp.
+ *
+ * This API uses readl() instead of regmap_read() for read operations as
+ * regmap_read() is too slow and this API is time sensitive.
+ *
+ * Return: The current timestamp of the PTP clock using IEP driver
+ */
+static u64 icss_iep_gettime(struct icss_iep *iep,
+ struct ptp_system_timestamp *sts)
+{
+ u32 ts_hi = 0, ts_lo;
+ unsigned long flags;
+
+ if (iep->ops && iep->ops->gettime)
+ return iep->ops->gettime(iep->clockops_data, sts);
+
+ /* use local_irq_x() to make it work for both RT/non-RT */
+ local_irq_save(flags);
+
+ /* no need to play with hi-lo, hi is latched when lo is read */
+ ptp_read_system_prets(sts);
+ ts_lo = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+ ptp_read_system_postts(sts);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ ts_hi = readl(iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
+
+ local_irq_restore(flags);
+
+ return (u64)ts_lo | (u64)ts_hi << 32;
+}
+
+static void icss_iep_enable(struct icss_iep *iep)
+{
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_CNT_ENABLE,
+ IEP_GLOBAL_CFG_CNT_ENABLE);
+}
+
+static void icss_iep_disable(struct icss_iep *iep)
+{
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_CNT_ENABLE,
+ 0);
+}
+
+static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
+{
+ u32 cycle_time;
+ int cmp;
+
+ cycle_time = iep->cycle_time_ns - iep->def_inc;
+
+ icss_iep_disable(iep);
+
+ /* disable shadow mode */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_SHADOW_EN, 0);
+
+ /* enable shadow mode */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_SHADOW_EN, IEP_CMP_CFG_SHADOW_EN);
+
+ /* clear counters */
+ icss_iep_set_counter(iep, 0);
+
+ /* clear overflow status */
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_STATUS_REG,
+ IEP_GLOBAL_STATUS_CNT_OVF,
+ IEP_GLOBAL_STATUS_CNT_OVF);
+
+ /* clear compare status */
+ for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
+ IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
+ }
+
+ /* enable reset counter on CMP0 event */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP0_RST_CNT_EN,
+ IEP_CMP_CFG_CMP0_RST_CNT_EN);
+ /* enable compare */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(0),
+ IEP_CMP_CFG_CMP_EN(0));
+
+ /* set CMP0 value to cycle time */
+ regmap_write(iep->map, ICSS_IEP_CMP0_REG0, cycle_time);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP0_REG1, cycle_time);
+
+ icss_iep_set_counter(iep, 0);
+ icss_iep_enable(iep);
+}
+
+static void icss_iep_set_default_inc(struct icss_iep *iep, u8 def_inc)
+{
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_DEFAULT_INC_MASK,
+ def_inc << IEP_GLOBAL_CFG_DEFAULT_INC_SHIFT);
+}
+
+static void icss_iep_set_compensation_inc(struct icss_iep *iep, u16 compen_inc)
+{
+ struct device *dev = regmap_get_device(iep->map);
+
+ if (compen_inc > IEP_MAX_COMPEN_INC) {
+ dev_err(dev, "%s: too high compensation inc %d\n",
+ __func__, compen_inc);
+ compen_inc = IEP_MAX_COMPEN_INC;
+ }
+
+ regmap_update_bits(iep->map, ICSS_IEP_GLOBAL_CFG_REG,
+ IEP_GLOBAL_CFG_COMPEN_INC_MASK,
+ compen_inc << IEP_GLOBAL_CFG_COMPEN_INC_SHIFT);
+}
+
+static void icss_iep_set_compensation_count(struct icss_iep *iep,
+ u32 compen_count)
+{
+ struct device *dev = regmap_get_device(iep->map);
+
+ if (compen_count > IEP_MAX_COMPEN_COUNT) {
+ dev_err(dev, "%s: too high compensation count %d\n",
+ __func__, compen_count);
+ compen_count = IEP_MAX_COMPEN_COUNT;
+ }
+
+ regmap_write(iep->map, ICSS_IEP_COMPEN_REG, compen_count);
+}
+
+static void icss_iep_set_slow_compensation_count(struct icss_iep *iep,
+ u32 compen_count)
+{
+ regmap_write(iep->map, ICSS_IEP_SLOW_COMPEN_REG, compen_count);
+}
+
+/* PTP PHC operations */
+static int icss_iep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+ u32 cyc_count;
+ u16 cmp_inc;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ /* ppb is amount of frequency we want to adjust in 1GHz (billion)
+ * e.g. 100ppb means we need to speed up clock by 100Hz
+ * i.e. at end of 1 second (1 billion ns) clock time, we should be
+ * counting 100 more ns.
+ * We use IEP slow compensation to achieve continuous freq. adjustment.
+ * There are 2 parts. Cycle time and adjustment per cycle.
+ * Simplest case would be 1 sec Cycle time. Then adjustment
+ * pre cycle would be (def_inc + ppb) value.
+ * Cycle time will have to be chosen based on how worse the ppb is.
+ * e.g. smaller the ppb, cycle time has to be large.
+ * The minimum adjustment we can do is +-1ns per cycle so let's
+ * reduce the cycle time to get 1ns per cycle adjustment.
+ * 1ppb = 1sec cycle time & 1ns adjust
+ * 1000ppb = 1/1000 cycle time & 1ns adjust per cycle
+ */
+
+ if (iep->cycle_time_ns)
+ iep->slow_cmp_inc = iep->clk_tick_time; /* 4ns adj per cycle */
+ else
+ iep->slow_cmp_inc = 1; /* 1ns adjust per cycle */
+
+ if (ppb < 0) {
+ iep->slow_cmp_inc = -iep->slow_cmp_inc;
+ ppb = -ppb;
+ }
+
+ cyc_count = NSEC_PER_SEC; /* 1s cycle time @1GHz */
+ cyc_count /= ppb; /* cycle time per ppb */
+
+ /* slow_cmp_count is decremented every clock cycle, e.g. @250MHz */
+ if (!iep->cycle_time_ns)
+ cyc_count /= iep->clk_tick_time;
+ iep->slow_cmp_count = cyc_count;
+
+ /* iep->clk_tick_time is def_inc */
+ cmp_inc = iep->clk_tick_time + iep->slow_cmp_inc;
+ icss_iep_set_compensation_inc(iep, cmp_inc);
+ icss_iep_set_slow_compensation_count(iep, iep->slow_cmp_count);
+
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int icss_iep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ s64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+ if (iep->ops && iep->ops->adjtime) {
+ iep->ops->adjtime(iep->clockops_data, delta);
+ } else {
+ ns = icss_iep_gettime(iep, NULL);
+ ns += delta;
+ icss_iep_settime(iep, ns);
+ }
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int icss_iep_ptp_gettimeex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ u64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+ ns = icss_iep_gettime(iep, sts);
+ *ts = ns_to_timespec64(ns);
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int icss_iep_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+ u64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+ ns = timespec64_to_ns(ts);
+ icss_iep_settime(iep, ns);
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return 0;
+}
+
+static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
+{
+ u64 ns, p_ns;
+ u32 offset;
+
+ ns = icss_iep_gettime(iep, NULL);
+ if (start_ns < ns)
+ start_ns = ns;
+ p_ns = iep->period;
+ /* Round up to next period boundary */
+ start_ns += p_ns - 1;
+ offset = do_div(start_ns, p_ns);
+ start_ns = start_ns * p_ns;
+ /* If it is too close to update, shift to next boundary */
+ if (p_ns - offset < 10)
+ start_ns += p_ns;
+
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(start_ns));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(start_ns));
+}
+
+static int icss_iep_perout_enable_hw(struct icss_iep *iep,
+ struct ptp_perout_request *req, int on)
+{
+ int ret;
+ u64 cmp;
+
+ if (iep->ops && iep->ops->perout_enable) {
+ ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp);
+ if (ret)
+ return ret;
+
+ if (on) {
+ /* Configure CMP */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp));
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp));
+ /* Configure SYNC, 1ms pulse width */
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000);
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
+ } else {
+ /* Disable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), 0);
+
+ /* clear regs */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+ }
+ } else {
+ if (on) {
+ u64 start_ns;
+
+ iep->period = ((u64)req->period.sec * NSEC_PER_SEC) +
+ req->period.nsec;
+ start_ns = ((u64)req->period.sec * NSEC_PER_SEC)
+ + req->period.nsec;
+ icss_iep_update_to_next_boundary(iep, start_ns);
+
+ /* Enable Sync in single shot mode */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG,
+ IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN);
+ /* Enable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1));
+ } else {
+ /* Disable CMP 1 */
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
+ IEP_CMP_CFG_CMP_EN(1), 0);
+
+ /* clear CMP regs */
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0);
+
+ /* Disable sync */
+ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0);
+ }
+ }
+
+ return 0;
+}
+
+static int icss_iep_perout_enable(struct icss_iep *iep,
+ struct ptp_perout_request *req, int on)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->pps_enabled) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (iep->perout_enabled == !!on)
+ goto exit;
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+ ret = icss_iep_perout_enable_hw(iep, req, on);
+ if (!ret)
+ iep->perout_enabled = !!on;
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
+}
+
+static int icss_iep_pps_enable(struct icss_iep *iep, int on)
+{
+ struct ptp_clock_request rq;
+ struct timespec64 ts;
+ unsigned long flags;
+ int ret = 0;
+ u64 ns;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->perout_enabled) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (iep->pps_enabled == !!on)
+ goto exit;
+
+ spin_lock_irqsave(&iep->irq_lock, flags);
+
+ rq.perout.index = 0;
+ if (on) {
+ ns = icss_iep_gettime(iep, NULL);
+ ts = ns_to_timespec64(ns);
+ rq.perout.period.sec = 1;
+ rq.perout.period.nsec = 0;
+ rq.perout.start.sec = ts.tv_sec + 2;
+ rq.perout.start.nsec = 0;
+ ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+ } else {
+ ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+ }
+
+ if (!ret)
+ iep->pps_enabled = !!on;
+
+ spin_unlock_irqrestore(&iep->irq_lock, flags);
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
+}
+
+static int icss_iep_extts_enable(struct icss_iep *iep, u32 index, int on)
+{
+ u32 val, cap, ret = 0;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ if (iep->ops && iep->ops->extts_enable) {
+ ret = iep->ops->extts_enable(iep->clockops_data, index, on);
+ goto exit;
+ }
+
+ if (((iep->latch_enable & BIT(index)) >> index) == on)
+ goto exit;
+
+ regmap_read(iep->map, ICSS_IEP_CAPTURE_CFG_REG, &val);
+ cap = IEP_CAP_CFG_CAP_ASYNC_EN(index) | IEP_CAP_CFG_CAPNR_1ST_EVENT_EN(index);
+ if (on) {
+ val |= cap;
+ iep->latch_enable |= BIT(index);
+ } else {
+ val &= ~cap;
+ iep->latch_enable &= ~BIT(index);
+ }
+ regmap_write(iep->map, ICSS_IEP_CAPTURE_CFG_REG, val);
+
+exit:
+ mutex_unlock(&iep->ptp_clk_mutex);
+
+ return ret;
+}
+
+static int icss_iep_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct icss_iep *iep = container_of(ptp, struct icss_iep, ptp_info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return icss_iep_perout_enable(iep, &rq->perout, on);
+ case PTP_CLK_REQ_PPS:
+ return icss_iep_pps_enable(iep, on);
+ case PTP_CLK_REQ_EXTTS:
+ return icss_iep_extts_enable(iep, rq->extts.index, on);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info icss_iep_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "ICSS IEP timer",
+ .max_adj = 10000000,
+ .adjfine = icss_iep_ptp_adjfine,
+ .adjtime = icss_iep_ptp_adjtime,
+ .gettimex64 = icss_iep_ptp_gettimeex,
+ .settime64 = icss_iep_ptp_settime,
+ .enable = icss_iep_ptp_enable,
+};
+
+struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx)
+{
+ struct platform_device *pdev;
+ struct device_node *iep_np;
+ struct icss_iep *iep;
+
+ iep_np = of_parse_phandle(np, "ti,iep", idx);
+ if (!iep_np || !of_device_is_available(iep_np))
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(iep_np);
+ of_node_put(iep_np);
+
+ if (!pdev)
+ /* probably IEP not yet probed */
+ return ERR_PTR(-EPROBE_DEFER);
+
+ iep = platform_get_drvdata(pdev);
+ if (!iep)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ device_lock(iep->dev);
+ if (iep->client_np) {
+ device_unlock(iep->dev);
+ dev_err(iep->dev, "IEP is already acquired by %s",
+ iep->client_np->name);
+ return ERR_PTR(-EBUSY);
+ }
+ iep->client_np = np;
+ device_unlock(iep->dev);
+ get_device(iep->dev);
+
+ return iep;
+}
+EXPORT_SYMBOL_GPL(icss_iep_get_idx);
+
+struct icss_iep *icss_iep_get(struct device_node *np)
+{
+ return icss_iep_get_idx(np, 0);
+}
+EXPORT_SYMBOL_GPL(icss_iep_get);
+
+void icss_iep_put(struct icss_iep *iep)
+{
+ device_lock(iep->dev);
+ iep->client_np = NULL;
+ device_unlock(iep->dev);
+ put_device(iep->dev);
+}
+EXPORT_SYMBOL_GPL(icss_iep_put);
+
+void icss_iep_init_fw(struct icss_iep *iep)
+{
+ /* start IEP for FW use in raw 64bit mode, no PTP support */
+ iep->clk_tick_time = iep->def_inc;
+ iep->cycle_time_ns = 0;
+ iep->ops = NULL;
+ iep->clockops_data = NULL;
+ icss_iep_set_default_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_count(iep, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
+ icss_iep_set_slow_compensation_count(iep, 0);
+
+ icss_iep_enable(iep);
+ icss_iep_settime(iep, 0);
+}
+EXPORT_SYMBOL_GPL(icss_iep_init_fw);
+
+void icss_iep_exit_fw(struct icss_iep *iep)
+{
+ icss_iep_disable(iep);
+}
+EXPORT_SYMBOL_GPL(icss_iep_exit_fw);
+
+int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
+ void *clockops_data, u32 cycle_time_ns)
+{
+ int ret = 0;
+
+ iep->cycle_time_ns = cycle_time_ns;
+ iep->clk_tick_time = iep->def_inc;
+ iep->ops = clkops;
+ iep->clockops_data = clockops_data;
+ icss_iep_set_default_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_inc(iep, iep->def_inc);
+ icss_iep_set_compensation_count(iep, 0);
+ regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, iep->refclk_freq / 10); /* 100 ms pulse */
+ regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0);
+ if (iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT)
+ icss_iep_set_slow_compensation_count(iep, 0);
+
+ if (!(iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) ||
+ !(iep->plat_data->flags & ICSS_IEP_SLOW_COMPEN_REG_SUPPORT))
+ goto skip_perout;
+
+ if (iep->ops && iep->ops->perout_enable) {
+ iep->ptp_info.n_per_out = 1;
+ iep->ptp_info.pps = 1;
+ }
+
+ if (iep->ops && iep->ops->extts_enable)
+ iep->ptp_info.n_ext_ts = 2;
+
+skip_perout:
+ if (cycle_time_ns)
+ icss_iep_enable_shadow_mode(iep);
+ else
+ icss_iep_enable(iep);
+ icss_iep_settime(iep, ktime_get_real_ns());
+
+ iep->ptp_clock = ptp_clock_register(&iep->ptp_info, iep->dev);
+ if (IS_ERR(iep->ptp_clock)) {
+ ret = PTR_ERR(iep->ptp_clock);
+ iep->ptp_clock = NULL;
+ dev_err(iep->dev, "Failed to register ptp clk %d\n", ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(icss_iep_init);
+
+int icss_iep_exit(struct icss_iep *iep)
+{
+ if (iep->ptp_clock) {
+ ptp_clock_unregister(iep->ptp_clock);
+ iep->ptp_clock = NULL;
+ }
+ icss_iep_disable(iep);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icss_iep_exit);
+
+static int icss_iep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct icss_iep *iep;
+ struct clk *iep_clk;
+
+ iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
+ if (!iep)
+ return -ENOMEM;
+
+ iep->dev = dev;
+ iep->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(iep->base))
+ return -ENODEV;
+
+ iep_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(iep_clk))
+ return PTR_ERR(iep_clk);
+
+ iep->refclk_freq = clk_get_rate(iep_clk);
+
+ iep->def_inc = NSEC_PER_SEC / iep->refclk_freq; /* ns per clock tick */
+ if (iep->def_inc > IEP_MAX_DEF_INC) {
+ dev_err(dev, "Failed to set def_inc %d. IEP_clock is too slow to be supported\n",
+ iep->def_inc);
+ return -EINVAL;
+ }
+
+ iep->plat_data = device_get_match_data(dev);
+ if (!iep->plat_data)
+ return -EINVAL;
+
+ iep->map = devm_regmap_init(dev, NULL, iep, iep->plat_data->config);
+ if (IS_ERR(iep->map)) {
+ dev_err(dev, "Failed to create regmap for IEP %ld\n",
+ PTR_ERR(iep->map));
+ return PTR_ERR(iep->map);
+ }
+
+ iep->ptp_info = icss_iep_ptp_info;
+ mutex_init(&iep->ptp_clk_mutex);
+ spin_lock_init(&iep->irq_lock);
+ dev_set_drvdata(dev, iep);
+ icss_iep_disable(iep);
+
+ return 0;
+}
+
+static bool am654_icss_iep_valid_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ICSS_IEP_GLOBAL_CFG_REG ... ICSS_IEP_SYNC_START_REG:
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static int icss_iep_regmap_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct icss_iep *iep = context;
+
+ writel(val, iep->base + iep->plat_data->reg_offs[reg]);
+
+ return 0;
+}
+
+static int icss_iep_regmap_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct icss_iep *iep = context;
+
+ *val = readl(iep->base + iep->plat_data->reg_offs[reg]);
+
+ return 0;
+}
+
+static struct regmap_config am654_icss_iep_regmap_config = {
+ .name = "icss iep",
+ .reg_stride = 1,
+ .reg_write = icss_iep_regmap_write,
+ .reg_read = icss_iep_regmap_read,
+ .writeable_reg = am654_icss_iep_valid_reg,
+ .readable_reg = am654_icss_iep_valid_reg,
+ .fast_io = 1,
+};
+
+static const struct icss_iep_plat_data am654_icss_iep_plat_data = {
+ .flags = ICSS_IEP_64BIT_COUNTER_SUPPORT |
+ ICSS_IEP_SLOW_COMPEN_REG_SUPPORT |
+ ICSS_IEP_SHADOW_MODE_SUPPORT,
+ .reg_offs = {
+ [ICSS_IEP_GLOBAL_CFG_REG] = 0x00,
+ [ICSS_IEP_COMPEN_REG] = 0x08,
+ [ICSS_IEP_SLOW_COMPEN_REG] = 0x0C,
+ [ICSS_IEP_COUNT_REG0] = 0x10,
+ [ICSS_IEP_COUNT_REG1] = 0x14,
+ [ICSS_IEP_CAPTURE_CFG_REG] = 0x18,
+ [ICSS_IEP_CAPTURE_STAT_REG] = 0x1c,
+
+ [ICSS_IEP_CAP6_RISE_REG0] = 0x50,
+ [ICSS_IEP_CAP6_RISE_REG1] = 0x54,
+
+ [ICSS_IEP_CAP7_RISE_REG0] = 0x60,
+ [ICSS_IEP_CAP7_RISE_REG1] = 0x64,
+
+ [ICSS_IEP_CMP_CFG_REG] = 0x70,
+ [ICSS_IEP_CMP_STAT_REG] = 0x74,
+ [ICSS_IEP_CMP0_REG0] = 0x78,
+ [ICSS_IEP_CMP0_REG1] = 0x7c,
+ [ICSS_IEP_CMP1_REG0] = 0x80,
+ [ICSS_IEP_CMP1_REG1] = 0x84,
+
+ [ICSS_IEP_CMP8_REG0] = 0xc0,
+ [ICSS_IEP_CMP8_REG1] = 0xc4,
+ [ICSS_IEP_SYNC_CTRL_REG] = 0x180,
+ [ICSS_IEP_SYNC0_STAT_REG] = 0x188,
+ [ICSS_IEP_SYNC1_STAT_REG] = 0x18c,
+ [ICSS_IEP_SYNC_PWIDTH_REG] = 0x190,
+ [ICSS_IEP_SYNC0_PERIOD_REG] = 0x194,
+ [ICSS_IEP_SYNC1_DELAY_REG] = 0x198,
+ [ICSS_IEP_SYNC_START_REG] = 0x19c,
+ },
+ .config = &am654_icss_iep_regmap_config,
+};
+
+static const struct of_device_id icss_iep_of_match[] = {
+ {
+ .compatible = "ti,am654-icss-iep",
+ .data = &am654_icss_iep_plat_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, icss_iep_of_match);
+
+static struct platform_driver icss_iep_driver = {
+ .driver = {
+ .name = "icss-iep",
+ .of_match_table = icss_iep_of_match,
+ },
+ .probe = icss_iep_probe,
+};
+module_platform_driver(icss_iep_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI ICSS IEP driver");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.h b/drivers/net/ethernet/ti/icssg/icss_iep.h
new file mode 100644
index 000000000000..803a4b714893
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments ICSSG Industrial Ethernet Peripheral (IEP) Driver
+ *
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#ifndef __NET_TI_ICSS_IEP_H
+#define __NET_TI_ICSS_IEP_H
+
+#include <linux/mutex.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/regmap.h>
+
+struct icss_iep;
+extern const struct icss_iep_clockops prueth_iep_clockops;
+
+/* Firmware specific clock operations */
+struct icss_iep_clockops {
+ void (*settime)(void *clockops_data, u64 ns);
+ void (*adjtime)(void *clockops_data, s64 delta);
+ u64 (*gettime)(void *clockops_data, struct ptp_system_timestamp *sts);
+ int (*perout_enable)(void *clockops_data,
+ struct ptp_perout_request *req, int on,
+ u64 *cmp);
+ int (*extts_enable)(void *clockops_data, u32 index, int on);
+};
+
+struct icss_iep *icss_iep_get(struct device_node *np);
+struct icss_iep *icss_iep_get_idx(struct device_node *np, int idx);
+void icss_iep_put(struct icss_iep *iep);
+int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
+ void *clockops_data, u32 cycle_time_ns);
+int icss_iep_exit(struct icss_iep *iep);
+int icss_iep_get_count_low(struct icss_iep *iep);
+int icss_iep_get_count_hi(struct icss_iep *iep);
+int icss_iep_get_ptp_clock_idx(struct icss_iep *iep);
+void icss_iep_init_fw(struct icss_iep *iep);
+void icss_iep_exit_fw(struct icss_iep *iep);
+
+#endif /* __NET_TI_ICSS_IEP_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index ab648d3efe85..933b84666574 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -210,6 +210,10 @@ void icssg_config_ipg(struct prueth_emac *emac)
case SPEED_100:
icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M);
break;
+ case SPEED_10:
+ /* IPG for 10M is same as 100M */
+ icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M);
+ break;
default:
/* Other links speeds not supported */
netdev_err(emac->ndev, "Unsupported link speed\n");
@@ -440,6 +444,9 @@ void icssg_config_set_speed(struct prueth_emac *emac)
case SPEED_100:
fw_speed = FW_LINK_SPEED_100M;
break;
+ case SPEED_10:
+ fw_speed = FW_LINK_SPEED_10M;
+ break;
default:
/* Other links speeds not supported */
netdev_err(emac->ndev, "Unsupported link speed\n");
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index 02c312f01d10..a27ec1dcc8d5 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -109,6 +109,26 @@ static void emac_get_ethtool_stats(struct net_device *ndev,
*(data++) = emac->stats[i];
}
+static int emac_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = icss_iep_get_ptp_clock_idx(emac->iep);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static int emac_set_channels(struct net_device *ndev,
struct ethtool_channels *ch)
{
@@ -176,6 +196,7 @@ const struct ethtool_ops icssg_ethtool_ops = {
.get_sset_count = emac_get_sset_count,
.get_ethtool_stats = emac_get_ethtool_stats,
.get_strings = emac_get_strings,
+ .get_ts_info = emac_get_ts_info,
.get_channels = emac_get_channels,
.set_channels = emac_set_channels,
.get_link_ksettings = emac_get_link_ksettings,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 47b941fb0198..410612f43cbd 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -56,6 +56,8 @@
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
+#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */
+
static void prueth_cleanup_rx_chns(struct prueth_emac *emac,
struct prueth_rx_chn *rx_chn,
int max_rflows)
@@ -471,6 +473,37 @@ static int prueth_dma_rx_push(struct prueth_emac *emac,
desc_rx, desc_dma);
}
+static u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
+{
+ u32 iepcount_lo, iepcount_hi, hi_rollover_count;
+ u64 ns;
+
+ iepcount_lo = lo & GENMASK(19, 0);
+ iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
+ hi_rollover_count = hi >> 11;
+
+ ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
+ ns = ns * cycle_time_ns + iepcount_lo;
+
+ return ns;
+}
+
+static void emac_rx_timestamp(struct prueth_emac *emac,
+ struct sk_buff *skb, u32 *psdata)
+{
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ u32 hi_sw = readl(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
+ ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
+ IEP_DEFAULT_CYCLE_TIME_NS);
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
{
struct prueth_rx_chn *rx_chn = &emac->rx_chns;
@@ -480,6 +513,7 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
struct sk_buff *skb, *new_skb;
dma_addr_t desc_dma, buf_dma;
void **swdata;
+ u32 *psdata;
int ret;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
@@ -497,6 +531,11 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
swdata = cppi5_hdesc_get_swdata(desc_rx);
skb = *swdata;
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -557,6 +596,86 @@ static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static int emac_get_tx_ts(struct prueth_emac *emac,
+ struct emac_tx_ts_response *rsp)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ int addr;
+
+ addr = icssg_queue_pop(prueth, slice == 0 ?
+ ICSSG_TS_POP_SLICE0 : ICSSG_TS_POP_SLICE1);
+ if (addr < 0)
+ return addr;
+
+ memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
+ /* return buffer back for to pool */
+ icssg_queue_push(prueth, slice == 0 ?
+ ICSSG_TS_PUSH_SLICE0 : ICSSG_TS_PUSH_SLICE1, addr);
+
+ return 0;
+}
+
+static void tx_ts_work(struct prueth_emac *emac)
+{
+ struct skb_shared_hwtstamps ssh;
+ struct emac_tx_ts_response tsr;
+ struct sk_buff *skb;
+ int ret = 0;
+ u32 hi_sw;
+ u64 ns;
+
+ /* There may be more than one pending requests */
+ while (1) {
+ ret = emac_get_tx_ts(emac, &tsr);
+ if (ret) /* nothing more */
+ break;
+
+ if (tsr.cookie >= PRUETH_MAX_TX_TS_REQUESTS ||
+ !emac->tx_ts_skb[tsr.cookie]) {
+ netdev_err(emac->ndev, "Invalid TX TS cookie 0x%x\n",
+ tsr.cookie);
+ break;
+ }
+
+ skb = emac->tx_ts_skb[tsr.cookie];
+ emac->tx_ts_skb[tsr.cookie] = NULL; /* free slot */
+ if (!skb) {
+ netdev_err(emac->ndev, "Driver Bug! got NULL skb\n");
+ break;
+ }
+
+ hi_sw = readl(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
+ ns = icssg_ts_to_ns(hi_sw, tsr.hi_ts, tsr.lo_ts,
+ IEP_DEFAULT_CYCLE_TIME_NS);
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+
+ skb_tstamp_tx(skb, &ssh);
+ dev_consume_skb_any(skb);
+
+ if (atomic_dec_and_test(&emac->tx_ts_pending)) /* no more? */
+ break;
+ }
+}
+
+static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
+{
+ int i;
+
+ /* search and get the next free slot */
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (!emac->tx_ts_skb[i]) {
+ emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
+ return i;
+ }
+ }
+
+ return -EBUSY;
+}
+
/**
* emac_ndo_start_xmit - EMAC Transmit function
* @skb: SKB pointer
@@ -577,6 +696,8 @@ static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device
struct prueth_tx_chn *tx_chn;
dma_addr_t desc_dma, buf_dma;
int i, ret = 0, q_idx;
+ bool in_tx_ts = 0;
+ int tx_ts_cookie;
void **swdata;
u32 pkt_len;
u32 *epib;
@@ -608,6 +729,18 @@ static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device
epib = first_desc->epib;
epib[0] = 0;
epib[1] = 0;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ emac->tx_ts_enabled) {
+ tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
+ if (tx_ts_cookie >= 0) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Request TX timestamp */
+ epib[0] = (u32)tx_ts_cookie;
+ epib[1] = 0x80000000; /* TX TS request */
+ emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
+ in_tx_ts = 1;
+ }
+ }
/* set dst tag to indicate internal qid at the firmware which is at
* bit8..bit15. bit0..bit7 indicates port num for directed
@@ -629,7 +762,7 @@ static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device
if (!next_desc) {
netdev_err(ndev,
"tx: failed to allocate frag. descriptor\n");
- goto free_desc_stop_q_busy;
+ goto free_desc_stop_q_busy_cleanup_tx_ts;
}
buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
@@ -638,7 +771,7 @@ static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device
netdev_err(ndev, "tx: Failed to map skb page\n");
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
ret = NETDEV_TX_OK;
- goto drop_free_descs;
+ goto cleanup_tx_ts;
}
cppi5_hdesc_reset_hbdesc(next_desc);
@@ -670,6 +803,9 @@ static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device
goto drop_free_descs;
}
+ if (in_tx_ts)
+ atomic_inc(&emac->tx_ts_pending);
+
if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
netif_tx_stop_queue(netif_txq);
/* Barrier, so that stop_queue visible to other cpus */
@@ -682,6 +818,12 @@ static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device
return NETDEV_TX_OK;
+cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
+
drop_free_descs:
prueth_xmit_free(tx_chn, first_desc);
@@ -694,7 +836,11 @@ drop_free_skb:
return ret;
-free_desc_stop_q_busy:
+free_desc_stop_q_busy_cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
prueth_xmit_free(tx_chn, first_desc);
drop_stop_q_busy:
@@ -717,6 +863,16 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+
+ /* currently only TX timestamp is being returned */
+ tx_ts_work(emac);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t prueth_rx_irq(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
@@ -820,6 +976,18 @@ static void prueth_emac_stop(struct prueth_emac *emac)
rproc_shutdown(prueth->pru[slice]);
}
+static void prueth_cleanup_tx_ts(struct prueth_emac *emac)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (emac->tx_ts_skb[i]) {
+ dev_kfree_skb_any(emac->tx_ts_skb[i]);
+ emac->tx_ts_skb[i] = NULL;
+ }
+ }
+}
+
/* called back by PHY layer if there is change in link state of hw port*/
static void emac_adjust_link(struct net_device *ndev)
{
@@ -881,6 +1049,7 @@ static void emac_adjust_link(struct net_device *ndev)
netif_tx_wake_all_queues(ndev);
} else {
netif_tx_stop_all_queues(ndev);
+ prueth_cleanup_tx_ts(emac);
}
}
@@ -980,7 +1149,6 @@ static int emac_phy_connect(struct prueth_emac *emac)
/* remove unsupported modes */
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
- phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
@@ -992,6 +1160,139 @@ static int emac_phy_connect(struct prueth_emac *emac)
return 0;
}
+static u64 prueth_iep_gettime(void *clockops_data, struct ptp_system_timestamp *sts)
+{
+ u32 hi_rollover_count, hi_rollover_count_r;
+ struct prueth_emac *emac = clockops_data;
+ struct prueth *prueth = emac->prueth;
+ void __iomem *fw_hi_r_count_addr;
+ void __iomem *fw_count_hi_addr;
+ u32 iepcount_hi, iepcount_hi_r;
+ unsigned long flags;
+ u32 iepcount_lo;
+ u64 ts = 0;
+
+ fw_count_hi_addr = prueth->shram.va + TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET;
+ fw_hi_r_count_addr = prueth->shram.va + TIMESYNC_FW_WC_HI_ROLLOVER_COUNT_OFFSET;
+
+ local_irq_save(flags);
+ do {
+ iepcount_hi = icss_iep_get_count_hi(emac->iep);
+ iepcount_hi += readl(fw_count_hi_addr);
+ hi_rollover_count = readl(fw_hi_r_count_addr);
+ ptp_read_system_prets(sts);
+ iepcount_lo = icss_iep_get_count_low(emac->iep);
+ ptp_read_system_postts(sts);
+
+ iepcount_hi_r = icss_iep_get_count_hi(emac->iep);
+ iepcount_hi_r += readl(fw_count_hi_addr);
+ hi_rollover_count_r = readl(fw_hi_r_count_addr);
+ } while ((iepcount_hi_r != iepcount_hi) ||
+ (hi_rollover_count != hi_rollover_count_r));
+ local_irq_restore(flags);
+
+ ts = ((u64)hi_rollover_count) << 23 | iepcount_hi;
+ ts = ts * (u64)IEP_DEFAULT_CYCLE_TIME_NS + iepcount_lo;
+
+ return ts;
+}
+
+static void prueth_iep_settime(void *clockops_data, u64 ns)
+{
+ struct icssg_setclock_desc __iomem *sc_descp;
+ struct prueth_emac *emac = clockops_data;
+ struct icssg_setclock_desc sc_desc;
+ u64 cyclecount;
+ u32 cycletime;
+ int timeout;
+
+ if (!emac->fw_running)
+ return;
+
+ sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
+
+ cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
+ cyclecount = ns / cycletime;
+
+ memset(&sc_desc, 0, sizeof(sc_desc));
+ sc_desc.margin = cycletime - 1000;
+ sc_desc.cyclecounter0_set = cyclecount & GENMASK(31, 0);
+ sc_desc.cyclecounter1_set = (cyclecount & GENMASK(63, 32)) >> 32;
+ sc_desc.iepcount_set = ns % cycletime;
+ sc_desc.CMP0_current = cycletime - 4; //Count from 0 to (cycle time)-4
+
+ memcpy_toio(sc_descp, &sc_desc, sizeof(sc_desc));
+
+ writeb(1, &sc_descp->request);
+
+ timeout = 5; /* fw should take 2-3 ms */
+ while (timeout--) {
+ if (readb(&sc_descp->acknowledgment))
+ return;
+
+ usleep_range(500, 1000);
+ }
+
+ dev_err(emac->prueth->dev, "settime timeout\n");
+}
+
+static int prueth_perout_enable(void *clockops_data,
+ struct ptp_perout_request *req, int on,
+ u64 *cmp)
+{
+ struct prueth_emac *emac = clockops_data;
+ u32 reduction_factor = 0, offset = 0;
+ struct timespec64 ts;
+ u64 ns_period;
+
+ if (!on)
+ return 0;
+
+ /* Any firmware specific stuff for PPS/PEROUT handling */
+ ts.tv_sec = req->period.sec;
+ ts.tv_nsec = req->period.nsec;
+ ns_period = timespec64_to_ns(&ts);
+
+ /* f/w doesn't support period less than cycle time */
+ if (ns_period < IEP_DEFAULT_CYCLE_TIME_NS)
+ return -ENXIO;
+
+ reduction_factor = ns_period / IEP_DEFAULT_CYCLE_TIME_NS;
+ offset = ns_period % IEP_DEFAULT_CYCLE_TIME_NS;
+
+ /* f/w requires at least 1uS within a cycle so CMP
+ * can trigger after SYNC is enabled
+ */
+ if (offset < 5 * NSEC_PER_USEC)
+ offset = 5 * NSEC_PER_USEC;
+
+ /* if offset is close to cycle time then we will miss
+ * the CMP event for last tick when IEP rolls over.
+ * In normal mode, IEP tick is 4ns.
+ * In slow compensation it could be 0ns or 8ns at
+ * every slow compensation cycle.
+ */
+ if (offset > IEP_DEFAULT_CYCLE_TIME_NS - 8)
+ offset = IEP_DEFAULT_CYCLE_TIME_NS - 8;
+
+ /* we're in shadow mode so need to set upper 32-bits */
+ *cmp = (u64)offset << 32;
+
+ writel(reduction_factor, emac->prueth->shram.va +
+ TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET);
+
+ writel(0, emac->prueth->shram.va +
+ TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET);
+
+ return 0;
+}
+
+const struct icss_iep_clockops prueth_iep_clockops = {
+ .settime = prueth_iep_settime,
+ .gettime = prueth_iep_gettime,
+ .perout_enable = prueth_perout_enable,
+};
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -1066,10 +1367,20 @@ static int emac_ndo_open(struct net_device *ndev)
icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+ if (!prueth->emacs_initialized) {
+ ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
+ emac, IEP_DEFAULT_CYCLE_TIME_NS);
+ }
+
+ ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
+ IRQF_ONESHOT, dev_name(dev), emac);
+ if (ret)
+ goto stop;
+
/* Prepare RX */
ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
if (ret)
- goto stop;
+ goto free_tx_ts_irq;
ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
if (ret)
@@ -1102,6 +1413,8 @@ reset_tx_chan:
prueth_reset_tx_chan(emac, i, false);
reset_rx_chn:
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
+free_tx_ts_irq:
+ free_irq(emac->tx_ts_irq, emac);
stop:
prueth_emac_stop(emac);
free_rx_irq:
@@ -1173,6 +1486,14 @@ static int emac_ndo_stop(struct net_device *ndev)
/* stop PRUs */
prueth_emac_stop(emac);
+ if (prueth->emacs_initialized == 1)
+ icss_iep_exit(emac->iep);
+
+ /* stop PRUs */
+ prueth_emac_stop(emac);
+
+ free_irq(emac->tx_ts_irq, emac);
+
free_irq(emac->rx_chns.irq[rx_flow], emac);
prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
prueth_cleanup_tx_chns(emac);
@@ -1235,8 +1556,79 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev)
queue_work(emac->cmd_wq, &emac->rx_mode_work);
}
+static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ emac->tx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ emac->tx_ts_enabled = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ emac->rx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ emac->rx_ts_enabled = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
static int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
{
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return emac_get_ts_config(ndev, ifr);
+ case SIOCSHWTSTAMP:
+ return emac_set_ts_config(ndev, ifr);
+ default:
+ break;
+ }
+
return phy_do_ioctl(ndev, ifr, cmd);
}
@@ -1316,6 +1708,7 @@ static int prueth_netdev_init(struct prueth *prueth,
struct prueth_emac *emac;
struct net_device *ndev;
enum prueth_port port;
+ const char *irq_name;
enum prueth_mac mac;
port = prueth_node_port(eth_node);
@@ -1355,6 +1748,15 @@ static int prueth_netdev_init(struct prueth *prueth,
emac->tx_ch_num = 1;
+ irq_name = "tx_ts0";
+ if (emac->port_id == PRUETH_PORT_MII1)
+ irq_name = "tx_ts1";
+ emac->tx_ts_irq = platform_get_irq_byname_optional(prueth->pdev, irq_name);
+ if (emac->tx_ts_irq < 0) {
+ ret = dev_err_probe(prueth->dev, emac->tx_ts_irq, "could not get tx_ts_irq\n");
+ goto free;
+ }
+
SET_NETDEV_DEV(ndev, prueth->dev);
spin_lock_init(&emac->lock);
mutex_init(&emac->cmd_lock);
@@ -1680,14 +2082,38 @@ static int prueth_probe(struct platform_device *pdev)
dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
prueth->msmcram.va, prueth->msmcram.size);
+ prueth->iep0 = icss_iep_get_idx(np, 0);
+ if (IS_ERR(prueth->iep0)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), "iep0 get failed\n");
+ prueth->iep0 = NULL;
+ goto free_pool;
+ }
+
+ prueth->iep1 = icss_iep_get_idx(np, 1);
+ if (IS_ERR(prueth->iep1)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
+ icss_iep_put(prueth->iep0);
+ prueth->iep0 = NULL;
+ prueth->iep1 = NULL;
+ goto free_pool;
+ }
+
+ if (prueth->pdata.quirk_10m_link_issue) {
+ /* Enable IEP1 for FW in 64bit mode as W/A for 10M FD link detect issue under TX
+ * traffic.
+ */
+ icss_iep_init_fw(prueth->iep1);
+ }
+
/* setup netdev interfaces */
if (eth0_node) {
ret = prueth_netdev_init(prueth, eth0_node);
if (ret) {
dev_err_probe(dev, ret, "netdev init %s failed\n",
eth0_node->name);
- goto netdev_exit;
+ goto exit_iep;
}
+ prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
}
if (eth1_node) {
@@ -1697,6 +2123,8 @@ static int prueth_probe(struct platform_device *pdev)
eth1_node->name);
goto netdev_exit;
}
+
+ prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
}
/* register the network devices */
@@ -1754,6 +2182,11 @@ netdev_exit:
prueth_netdev_exit(prueth, eth_node);
}
+exit_iep:
+ if (prueth->pdata.quirk_10m_link_issue)
+ icss_iep_exit_fw(prueth->iep1);
+
+free_pool:
gen_pool_free(prueth->sram_pool,
(unsigned long)prueth->msmcram.va, msmc_ram_size);
@@ -1798,6 +2231,12 @@ static void prueth_remove(struct platform_device *pdev)
prueth_netdev_exit(prueth, eth_node);
}
+ if (prueth->pdata.quirk_10m_link_issue)
+ icss_iep_exit_fw(prueth->iep1);
+
+ icss_iep_put(prueth->iep1);
+ icss_iep_put(prueth->iep0);
+
gen_pool_free(prueth->sram_pool,
(unsigned long)prueth->msmcram.va,
MSMC_RAM_SIZE);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index a8ce4d01ef16..3fe80a8758d3 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -35,6 +35,7 @@
#include <net/devlink.h>
#include "icssg_config.h"
+#include "icss_iep.h"
#include "icssg_switch_map.h"
#define PRUETH_MAX_MTU (2000 - ETH_HLEN - ETH_FCS_LEN)
@@ -122,6 +123,8 @@ struct prueth_rx_chn {
*/
#define PRUETH_MAX_TX_QUEUES 4
+#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */
+
/* data for each emac port */
struct prueth_emac {
bool fw_running;
@@ -139,6 +142,9 @@ struct prueth_emac {
struct device_node *phy_node;
phy_interface_t phy_if;
enum prueth_port port_id;
+ struct icss_iep *iep;
+ unsigned int rx_ts_enabled : 1;
+ unsigned int tx_ts_enabled : 1;
/* DMA related */
struct prueth_tx_chn tx_chns[PRUETH_MAX_TX_QUEUES];
@@ -150,7 +156,15 @@ struct prueth_emac {
spinlock_t lock; /* serialize access */
- unsigned long state;
+ /* TX HW Timestamping */
+ /* TX TS cookie will be index to the tx_ts_skb array */
+ struct sk_buff *tx_ts_skb[PRUETH_MAX_TX_TS_REQUESTS];
+ atomic_t tx_ts_pending;
+ int tx_ts_irq;
+
+ u8 cmd_seq;
+ /* shutdown related */
+ u32 cmd_data[4];
struct completion cmd_complete;
/* Mutex to serialize access to firmware command interface */
struct mutex cmd_lock;
@@ -193,6 +207,8 @@ struct prueth_pdata {
* @pdata: pointer to platform data for ICSSG driver
* @icssg_hwcmdseq: seq counter or HWQ messages
* @emacs_initialized: num of EMACs/ext ports that are up/running
+ * @iep0: pointer to IEP0 device
+ * @iep1: pointer to IEP1 device
*/
struct prueth {
struct device *dev;
@@ -214,8 +230,16 @@ struct prueth {
struct platform_device *pdev;
struct prueth_pdata pdata;
u8 icssg_hwcmdseq;
-
int emacs_initialized;
+ struct icss_iep *iep0;
+ struct icss_iep *iep1;
+};
+
+struct emac_tx_ts_response {
+ u32 reserved[2];
+ u32 cookie;
+ u32 lo_ts;
+ u32 hi_ts;
};
/* get PRUSS SLICE number from prueth_emac */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index bb234cf0cea0..332c853ca99b 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -9832,6 +9832,7 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) },
{ USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
{ USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
+ { USB_DEVICE(VENDOR_ID_DLINK, 0xb301) },
{}
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 6eef6bc430e2..9d248ba1c0b2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -442,7 +442,12 @@ struct brcmf_scan_params_v2_le {
* fixed parameter portion is assumed, otherwise
* ssid in the fixed portion is ignored
*/
- __le16 channel_list[1]; /* list of chanspecs */
+ union {
+ __le16 padding; /* Reserve space for at least 1 entry for abort
+ * which uses an on stack brcmf_scan_params_v2_le
+ */
+ DECLARE_FLEX_ARRAY(__le16, channel_list); /* chanspecs */
+ };
};
struct brcmf_scan_results {
@@ -702,7 +707,7 @@ struct brcmf_sta_info_le {
struct brcmf_chanspec_list {
__le32 count; /* # of entries */
- __le32 element[1]; /* variable length uint32 list */
+ __le32 element[]; /* variable length uint32 list */
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index f5e08988dc7b..06d6f7f66430 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -310,9 +310,9 @@ struct iwl_fw_ini_fifo_hdr {
struct iwl_fw_ini_error_dump_range {
__le32 range_data_size;
union {
- __le32 internal_base_addr;
- __le64 dram_base_addr;
- __le32 page_num;
+ __le32 internal_base_addr __packed;
+ __le64 dram_base_addr __packed;
+ __le32 page_num __packed;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
struct iwl_cmd_header fw_pkt_hdr;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 5c719636c9bd..a5348b015310 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -819,7 +819,7 @@ out:
mvm->nvm_data->bands[0].n_channels = 1;
mvm->nvm_data->bands[0].n_bitrates = 1;
mvm->nvm_data->bands[0].bitrates =
- (void *)((u8 *)mvm->nvm_data->channels + 1);
+ (void *)(mvm->nvm_data->channels + 1);
mvm->nvm_data->bands[0].bitrates->hw_value = 10;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 5449deb3c2d6..46e207211f21 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -731,73 +731,78 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
mvmvif->associated = vif->cfg.assoc;
- if (!(changes & BSS_CHANGED_ASSOC))
- return;
-
- if (vif->cfg.assoc) {
- /* clear statistics to get clean beacon counter */
- iwl_mvm_request_statistics(mvm, true);
- iwl_mvm_sf_update(mvm, vif, false);
- iwl_mvm_power_vif_assoc(mvm, vif);
-
- for_each_mvm_vif_valid_link(mvmvif, i) {
- memset(&mvmvif->link[i]->beacon_stats, 0,
- sizeof(mvmvif->link[i]->beacon_stats));
+ if (changes & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc) {
+ /* clear statistics to get clean beacon counter */
+ iwl_mvm_request_statistics(mvm, true);
+ iwl_mvm_sf_update(mvm, vif, false);
+ iwl_mvm_power_vif_assoc(mvm, vif);
+
+ for_each_mvm_vif_valid_link(mvmvif, i) {
+ memset(&mvmvif->link[i]->beacon_stats, 0,
+ sizeof(mvmvif->link[i]->beacon_stats));
+
+ if (vif->p2p) {
+ iwl_mvm_update_smps(mvm, vif,
+ IWL_MVM_SMPS_REQ_PROT,
+ IEEE80211_SMPS_DYNAMIC, i);
+ }
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(vif->link_conf[i]);
+ if (link_conf && !link_conf->dtim_period)
+ protect = true;
+ rcu_read_unlock();
+ }
- if (vif->p2p) {
- iwl_mvm_update_smps(mvm, vif,
- IWL_MVM_SMPS_REQ_PROT,
- IEEE80211_SMPS_DYNAMIC, i);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ protect) {
+ /* If we're not restarting and still haven't
+ * heard a beacon (dtim period unknown) then
+ * make sure we still have enough minimum time
+ * remaining in the time event, since the auth
+ * might actually have taken quite a while
+ * (especially for SAE) and so the remaining
+ * time could be small without us having heard
+ * a beacon yet.
+ */
+ iwl_mvm_protect_assoc(mvm, vif, 0);
}
- rcu_read_lock();
- link_conf = rcu_dereference(vif->link_conf[i]);
- if (link_conf && !link_conf->dtim_period)
- protect = true;
- rcu_read_unlock();
- }
+ iwl_mvm_sf_update(mvm, vif, false);
+
+ /* FIXME: need to decide about misbehaving AP handling */
+ iwl_mvm_power_vif_assoc(mvm, vif);
+ } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
+ iwl_mvm_mei_host_disassociated(mvm);
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- protect) {
- /* If we're not restarting and still haven't
- * heard a beacon (dtim period unknown) then
- * make sure we still have enough minimum time
- * remaining in the time event, since the auth
- * might actually have taken quite a while
- * (especially for SAE) and so the remaining
- * time could be small without us having heard
- * a beacon yet.
+ /* If update fails - SF might be running in associated
+ * mode while disassociated - which is forbidden.
*/
- iwl_mvm_protect_assoc(mvm, vif, 0);
+ ret = iwl_mvm_sf_update(mvm, vif, false);
+ WARN_ONCE(ret &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status),
+ "Failed to update SF upon disassociation\n");
+
+ /* If we get an assert during the connection (after the
+ * station has been added, but before the vif is set
+ * to associated), mac80211 will re-add the station and
+ * then configure the vif. Since the vif is not
+ * associated, we would remove the station here and
+ * this would fail the recovery.
+ */
+ iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
}
- iwl_mvm_sf_update(mvm, vif, false);
-
- /* FIXME: need to decide about misbehaving AP handling */
- iwl_mvm_power_vif_assoc(mvm, vif);
- } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
- iwl_mvm_mei_host_disassociated(mvm);
-
- /* If update fails - SF might be running in associated
- * mode while disassociated - which is forbidden.
- */
- ret = iwl_mvm_sf_update(mvm, vif, false);
- WARN_ONCE(ret &&
- !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
- &mvm->status),
- "Failed to update SF upon disassociation\n");
-
- /* If we get an assert during the connection (after the
- * station has been added, but before the vif is set
- * to associated), mac80211 will re-add the station and
- * then configure the vif. Since the vif is not
- * associated, we would remove the station here and
- * this would fail the recovery.
- */
- iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
+ iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
}
- iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
+ if (changes & BSS_CHANGED_PS) {
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ }
}
static void
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index c1d9ce753468..3cbe2c0b8d6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2342,7 +2342,7 @@ iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm,
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
- if (version < 12) {
+ if (version < 16) {
gp->scan_start_mac_or_link_id = scan_vif->id;
} else {
struct iwl_mvm_vif_link_info *link_info;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index ec18e348553d..674ddf951b79 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1636,6 +1636,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
memset(&info->status, 0, sizeof(info->status));
+ info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
/* inform mac80211 about what happened with the frame */
switch (status & TX_STATUS_MSK) {
@@ -1988,6 +1989,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
*/
if (!is_flush)
info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 391793a16adc..10690e82358b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -918,9 +918,17 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len);
- while (tlv_buf_left >= sizeof(*tlv_rxba)) {
+ while (tlv_buf_left > sizeof(*tlv_rxba)) {
tlv_type = le16_to_cpu(tlv_rxba->header.type);
tlv_len = le16_to_cpu(tlv_rxba->header.len);
+ if (size_add(sizeof(tlv_rxba->header), tlv_len) > tlv_buf_left) {
+ mwifiex_dbg(priv->adapter, WARN,
+ "TLV size (%zu) overflows event_buf buf_left=%d\n",
+ size_add(sizeof(tlv_rxba->header), tlv_len),
+ tlv_buf_left);
+ return;
+ }
+
if (tlv_type != TLV_TYPE_RXBA_SYNC) {
mwifiex_dbg(priv->adapter, ERROR,
"Wrong TLV id=0x%x\n", tlv_type);
@@ -929,6 +937,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num);
tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len);
+ if (size_add(sizeof(*tlv_rxba), tlv_bitmap_len) > tlv_buf_left) {
+ mwifiex_dbg(priv->adapter, WARN,
+ "TLV size (%zu) overflows event_buf buf_left=%d\n",
+ size_add(sizeof(*tlv_rxba), tlv_bitmap_len),
+ tlv_buf_left);
+ return;
+ }
+
mwifiex_dbg(priv->adapter, INFO,
"%pM tid=%d seq_num=%d bitmap_len=%d\n",
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
@@ -965,8 +981,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
}
}
- tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
- tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
+ tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
+ tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len;
tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index f2168fac95ed..8e6db904e5b2 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -779,7 +779,7 @@ struct mwifiex_ie_types_rxba_sync {
u8 reserved;
__le16 seq_num;
__le16 bitmap_len;
- u8 bitmap[1];
+ u8 bitmap[];
} __packed;
struct chan_band_param_set {
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 65420ad67416..257737137cd7 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -86,7 +86,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
- if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
+ if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
+ rx_pkt_off > skb->len) {
mwifiex_dbg(priv->adapter, ERROR,
"wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
skb->len, rx_pkt_off);
@@ -95,12 +96,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return -1;
}
- if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
- sizeof(bridge_tunnel_header))) ||
- (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
- sizeof(rfc1042_header)) &&
- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
+ if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
+ ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ sizeof(bridge_tunnel_header))) ||
+ (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+ sizeof(rfc1042_header)) &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 05d9ab3ce819..dc8f4e157eb2 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -93,13 +93,13 @@ __mt76_get_rxwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = NULL;
- spin_lock(&dev->wed_lock);
+ spin_lock_bh(&dev->wed_lock);
if (!list_empty(&dev->rxwi_cache)) {
t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
list);
list_del(&t->list);
}
- spin_unlock(&dev->wed_lock);
+ spin_unlock_bh(&dev->wed_lock);
return t;
}
@@ -145,9 +145,9 @@ mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
if (!t)
return;
- spin_lock(&dev->wed_lock);
+ spin_lock_bh(&dev->wed_lock);
list_add(&t->list, &dev->rxwi_cache);
- spin_unlock(&dev->wed_lock);
+ spin_unlock_bh(&dev->wed_lock);
}
EXPORT_SYMBOL_GPL(mt76_put_rxwi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
index 0acabba2d1a5..5d402cf2951c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan)
{
- u16 val;
u8 lna;
- val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
- *lna_2g = 0;
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
- memset(lna_5g, 0, sizeof(s8) * 3);
-
if (chan->band == NL80211_BAND_2GHZ)
lna = *lna_2g;
else if (chan->hw_value <= 64)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index d5809408d1d3..8c01855885ce 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
- u8 lna;
+ bool use_lna;
+ u8 lna = 0;
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
@@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
- lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if (chan->band == NL80211_BAND_2GHZ)
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
+ else
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
+
+ if (use_lna)
+ lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
index 3642a2c7f80c..2434e2480cbe 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
@@ -46,6 +46,7 @@ struct rtw8723du_efuse {
u8 vender_id[2]; /* 0x100 */
u8 product_id[2]; /* 0x102 */
u8 usb_option; /* 0x104 */
+ u8 res5[2]; /* 0x105 */
u8 mac_addr[ETH_ALEN]; /* 0x107 */
};
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 3f46e499d615..919cc53bc02e 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -177,25 +177,24 @@ unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_power);
/**
- * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
- * @opp: opp for which frequency has to be returned for
+ * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an
+ * available opp with specified index
+ * @opp: opp for which frequency has to be returned for
+ * @index: index of the frequency within the required opp
*
- * Return: frequency in hertz corresponding to the opp, else
- * return 0
+ * Return: frequency in hertz corresponding to the opp with specified index,
+ * else return 0
*/
-unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index)
{
- if (IS_ERR_OR_NULL(opp)) {
+ if (IS_ERR_OR_NULL(opp) || index >= opp->opp_table->clk_count) {
pr_err("%s: Invalid parameters\n", __func__);
return 0;
}
- if (!assert_single_clk(opp->opp_table))
- return 0;
-
- return opp->rates[0];
+ return opp->rates[index];
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed);
/**
* dev_pm_opp_get_level() - Gets the level corresponding to an available opp
@@ -227,20 +226,18 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
unsigned int dev_pm_opp_get_required_pstate(struct dev_pm_opp *opp,
unsigned int index)
{
- struct opp_table *opp_table = opp->opp_table;
-
if (IS_ERR_OR_NULL(opp) || !opp->available ||
- index >= opp_table->required_opp_count) {
+ index >= opp->opp_table->required_opp_count) {
pr_err("%s: Invalid parameters\n", __func__);
return 0;
}
/* required-opps not fully initialized yet */
- if (lazy_linking_pending(opp_table))
+ if (lazy_linking_pending(opp->opp_table))
return 0;
/* The required OPP table must belong to a genpd */
- if (unlikely(!opp_table->required_opp_tables[index]->is_genpd)) {
+ if (unlikely(!opp->opp_table->required_opp_tables[index]->is_genpd)) {
pr_err("%s: Performance state is only valid for genpds.\n", __func__);
return 0;
}
@@ -450,7 +447,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
/* Helpers to read keys */
static unsigned long _read_freq(struct dev_pm_opp *opp, int index)
{
- return opp->rates[0];
+ return opp->rates[index];
}
static unsigned long _read_level(struct dev_pm_opp *opp, int index)
@@ -626,6 +623,34 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
+/**
+ * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the
+ * clock corresponding to the index
+ * @dev: Device for which we do this operation
+ * @freq: frequency to search for
+ * @index: Clock index
+ * @available: true/false - match for available opp
+ *
+ * Search for the matching exact OPP for the clock corresponding to the
+ * specified index from a starting freq for a device.
+ *
+ * Return: matching *opp , else returns ERR_PTR in case of error and should be
+ * handled using IS_ERR. Error return values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *
+dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
+ u32 index, bool available)
+{
+ return _find_key_exact(dev, freq, index, available, _read_freq, NULL);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed);
+
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
@@ -659,6 +684,34 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
/**
+ * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the
+ * clock corresponding to the index
+ * @dev: Device for which we do this operation
+ * @freq: Start frequency
+ * @index: Clock index
+ *
+ * Search for the matching ceil *available* OPP for the clock corresponding to
+ * the specified index from a starting freq for a device.
+ *
+ * Return: matching *opp and refreshes *freq accordingly, else returns
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *
+dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq,
+ u32 index)
+{
+ return _find_key_ceil(dev, freq, index, true, _read_freq, NULL);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed);
+
+/**
* dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
* @dev: device for which we do this operation
* @freq: Start frequency
@@ -684,6 +737,34 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
/**
+ * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the
+ * clock corresponding to the index
+ * @dev: Device for which we do this operation
+ * @freq: Start frequency
+ * @index: Clock index
+ *
+ * Search for the matching floor *available* OPP for the clock corresponding to
+ * the specified index from a starting freq for a device.
+ *
+ * Return: matching *opp and refreshes *freq accordingly, else returns
+ * ERR_PTR in case of error and should be handled using IS_ERR. Error return
+ * values can be:
+ * EINVAL: for bad pointer
+ * ERANGE: no match found for search
+ * ENODEV: if device not found in list of registered devices
+ *
+ * The callers are required to call dev_pm_opp_put() for the returned OPP after
+ * use.
+ */
+struct dev_pm_opp *
+dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq,
+ u32 index)
+{
+ return _find_key_floor(dev, freq, index, true, _read_freq, NULL);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed);
+
+/**
* dev_pm_opp_find_level_exact() - search for an exact level
* @dev: device for which we do this operation
* @level: level to search for
@@ -2379,7 +2460,7 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
virt_dev = dev_pm_domain_attach_by_name(dev, *name);
if (IS_ERR_OR_NULL(virt_dev)) {
- ret = PTR_ERR(virt_dev) ? : -ENODEV;
+ ret = virt_dev ? PTR_ERR(virt_dev) : -ENODEV;
dev_err(dev, "Couldn't attach to pm_domain: %d\n", ret);
goto err;
}
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
index 3c3506021501..12c429b407ca 100644
--- a/drivers/opp/cpu.c
+++ b/drivers/opp/cpu.c
@@ -24,7 +24,7 @@
/**
* dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
* @dev: device for which we do this operation
- * @table: Cpufreq table returned back to caller
+ * @opp_table: Cpufreq table returned back to caller
*
* Generate a cpufreq table for a provided device- this assumes that the
* opp table is already initialized and ready for usage.
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
/**
* dev_pm_opp_free_cpufreq_table() - free the cpufreq table
* @dev: device for which we do this operation
- * @table: table to free
+ * @opp_table: table to free
*
* Free up the table allocated by dev_pm_opp_init_cpufreq_table
*/
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index f4572a5cca72..273d67ecf6d2 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -92,7 +92,7 @@ config ARM_PMU_ACPI
config ARM_SMMU_V3_PMU
tristate "ARM SMMUv3 Performance Monitors Extension"
- depends on (ARM64 && ACPI) || (COMPILE_TEST && 64BIT)
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on GENERIC_MSI_IRQ
help
Provides support for the ARM SMMUv3 Performance Monitor Counter
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
index 5c5be9fc1b15..19d459a36be5 100644
--- a/drivers/perf/alibaba_uncore_drw_pmu.c
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -236,10 +236,37 @@ static const struct attribute_group ali_drw_pmu_cpumask_attr_group = {
.attrs = ali_drw_pmu_cpumask_attrs,
};
+static ssize_t ali_drw_pmu_identifier_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ return sysfs_emit(page, "%s\n", "ali_drw_pmu");
+}
+
+static umode_t ali_drw_pmu_identifier_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return attr->mode;
+}
+
+static struct device_attribute ali_drw_pmu_identifier_attr =
+ __ATTR(identifier, 0444, ali_drw_pmu_identifier_show, NULL);
+
+static struct attribute *ali_drw_pmu_identifier_attrs[] = {
+ &ali_drw_pmu_identifier_attr.attr,
+ NULL
+};
+
+static const struct attribute_group ali_drw_pmu_identifier_attr_group = {
+ .attrs = ali_drw_pmu_identifier_attrs,
+ .is_visible = ali_drw_pmu_identifier_attr_visible
+};
+
static const struct attribute_group *ali_drw_pmu_attr_groups[] = {
&ali_drw_pmu_events_attr_group,
&ali_drw_pmu_cpumask_attr_group,
&ali_drw_pmu_format_group,
+ &ali_drw_pmu_identifier_attr_group,
NULL,
};
diff --git a/drivers/perf/amlogic/meson_ddr_pmu_core.c b/drivers/perf/amlogic/meson_ddr_pmu_core.c
index 0b24dee1ed3c..bbc7285fd934 100644
--- a/drivers/perf/amlogic/meson_ddr_pmu_core.c
+++ b/drivers/perf/amlogic/meson_ddr_pmu_core.c
@@ -9,8 +9,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 998259f1d973..61de861eaf91 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -7,10 +7,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index b8c15878bc86..913dc04b3a40 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -72,6 +72,8 @@
/* For most nodes, this is all there is */
#define CMN_PMU_EVENT_SEL 0x000
#define CMN__PMU_CBUSY_SNTHROTTLE_SEL GENMASK_ULL(44, 42)
+#define CMN__PMU_SN_HOME_SEL GENMASK_ULL(40, 39)
+#define CMN__PMU_HBT_LBT_SEL GENMASK_ULL(38, 37)
#define CMN__PMU_CLASS_OCCUP_ID GENMASK_ULL(36, 35)
/* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
#define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32)
@@ -226,6 +228,7 @@ enum cmn_revision {
REV_CMN700_R0P0 = 0,
REV_CMN700_R1P0,
REV_CMN700_R2P0,
+ REV_CMN700_R3P0,
REV_CI700_R0P0 = 0,
REV_CI700_R1P0,
REV_CI700_R2P0,
@@ -254,6 +257,9 @@ enum cmn_node_type {
CMN_TYPE_CCHA,
CMN_TYPE_CCLA,
CMN_TYPE_CCLA_RNI,
+ CMN_TYPE_HNS = 0x200,
+ CMN_TYPE_HNS_MPAM_S,
+ CMN_TYPE_HNS_MPAM_NS,
/* Not a real node type */
CMN_TYPE_WP = 0x7770
};
@@ -263,6 +269,8 @@ enum cmn_filter_select {
SEL_OCCUP1ID,
SEL_CLASS_OCCUP_ID,
SEL_CBUSY_SNTHROTTLE_SEL,
+ SEL_HBT_LBT_SEL,
+ SEL_SN_HOME_SEL,
SEL_MAX
};
@@ -742,8 +750,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
_CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup, _fsel)
#define CMN_EVENT_DTC(_name) \
CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0)
-#define _CMN_EVENT_HNF(_model, _name, _event, _occup, _fsel) \
- _CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup, _fsel)
+#define CMN_EVENT_HNF(_model, _name, _event) \
+ CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event)
#define CMN_EVENT_HNI(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event)
#define CMN_EVENT_HNP(_name, _event) \
@@ -768,6 +776,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
#define CMN_EVENT_CCLA_RNI(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event)
+#define CMN_EVENT_HNS(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
#define CMN_EVENT_DVM(_model, _name, _event) \
_CMN_EVENT_DVM(_model, _name, _event, 0, SEL_NONE)
@@ -775,32 +785,68 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
_CMN_EVENT_DVM(_model, _name##_all, _event, 0, SEL_OCCUP1ID), \
_CMN_EVENT_DVM(_model, _name##_dvmop, _event, 1, SEL_OCCUP1ID), \
_CMN_EVENT_DVM(_model, _name##_dvmsync, _event, 2, SEL_OCCUP1ID)
-#define CMN_EVENT_HNF(_model, _name, _event) \
- _CMN_EVENT_HNF(_model, _name, _event, 0, SEL_NONE)
+
+#define CMN_EVENT_HN_OCC(_model, _name, _type, _event) \
+ _CMN_EVENT_ATTR(_model, _name##_all, _type, _event, 0, SEL_OCCUP1ID), \
+ _CMN_EVENT_ATTR(_model, _name##_read, _type, _event, 1, SEL_OCCUP1ID), \
+ _CMN_EVENT_ATTR(_model, _name##_write, _type, _event, 2, SEL_OCCUP1ID), \
+ _CMN_EVENT_ATTR(_model, _name##_atomic, _type, _event, 3, SEL_OCCUP1ID), \
+ _CMN_EVENT_ATTR(_model, _name##_stash, _type, _event, 4, SEL_OCCUP1ID)
+#define CMN_EVENT_HN_CLS(_model, _name, _type, _event) \
+ _CMN_EVENT_ATTR(_model, _name##_class0, _type, _event, 0, SEL_CLASS_OCCUP_ID), \
+ _CMN_EVENT_ATTR(_model, _name##_class1, _type, _event, 1, SEL_CLASS_OCCUP_ID), \
+ _CMN_EVENT_ATTR(_model, _name##_class2, _type, _event, 2, SEL_CLASS_OCCUP_ID), \
+ _CMN_EVENT_ATTR(_model, _name##_class3, _type, _event, 3, SEL_CLASS_OCCUP_ID)
+#define CMN_EVENT_HN_SNT(_model, _name, _type, _event) \
+ _CMN_EVENT_ATTR(_model, _name##_all, _type, _event, 0, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_ATTR(_model, _name##_group0_read, _type, _event, 1, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_ATTR(_model, _name##_group0_write, _type, _event, 2, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_ATTR(_model, _name##_group1_read, _type, _event, 3, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_ATTR(_model, _name##_group1_write, _type, _event, 4, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_ATTR(_model, _name##_read, _type, _event, 5, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_ATTR(_model, _name##_write, _type, _event, 6, SEL_CBUSY_SNTHROTTLE_SEL)
+
+#define CMN_EVENT_HNF_OCC(_model, _name, _event) \
+ CMN_EVENT_HN_OCC(_model, hnf_##_name, CMN_TYPE_HNF, _event)
#define CMN_EVENT_HNF_CLS(_model, _name, _event) \
- _CMN_EVENT_HNF(_model, _name##_class0, _event, 0, SEL_CLASS_OCCUP_ID), \
- _CMN_EVENT_HNF(_model, _name##_class1, _event, 1, SEL_CLASS_OCCUP_ID), \
- _CMN_EVENT_HNF(_model, _name##_class2, _event, 2, SEL_CLASS_OCCUP_ID), \
- _CMN_EVENT_HNF(_model, _name##_class3, _event, 3, SEL_CLASS_OCCUP_ID)
+ CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNS, _event)
#define CMN_EVENT_HNF_SNT(_model, _name, _event) \
- _CMN_EVENT_HNF(_model, _name##_all, _event, 0, SEL_CBUSY_SNTHROTTLE_SEL), \
- _CMN_EVENT_HNF(_model, _name##_group0_read, _event, 1, SEL_CBUSY_SNTHROTTLE_SEL), \
- _CMN_EVENT_HNF(_model, _name##_group0_write, _event, 2, SEL_CBUSY_SNTHROTTLE_SEL), \
- _CMN_EVENT_HNF(_model, _name##_group1_read, _event, 3, SEL_CBUSY_SNTHROTTLE_SEL), \
- _CMN_EVENT_HNF(_model, _name##_group1_write, _event, 4, SEL_CBUSY_SNTHROTTLE_SEL), \
- _CMN_EVENT_HNF(_model, _name##_read, _event, 5, SEL_CBUSY_SNTHROTTLE_SEL), \
- _CMN_EVENT_HNF(_model, _name##_write, _event, 6, SEL_CBUSY_SNTHROTTLE_SEL)
-
-#define _CMN_EVENT_XP(_name, _event) \
+ CMN_EVENT_HN_SNT(_model, hnf_##_name, CMN_TYPE_HNF, _event)
+
+#define CMN_EVENT_HNS_OCC(_name, _event) \
+ CMN_EVENT_HN_OCC(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event), \
+ _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_rxsnp, CMN_TYPE_HNS, _event, 5, SEL_OCCUP1ID), \
+ _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_lbt, CMN_TYPE_HNS, _event, 6, SEL_OCCUP1ID), \
+ _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_hbt, CMN_TYPE_HNS, _event, 7, SEL_OCCUP1ID)
+#define CMN_EVENT_HNS_CLS( _name, _event) \
+ CMN_EVENT_HN_CLS(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
+#define CMN_EVENT_HNS_SNT(_name, _event) \
+ CMN_EVENT_HN_SNT(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
+#define CMN_EVENT_HNS_HBT(_name, _event) \
+ _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_all, CMN_TYPE_HNS, _event, 0, SEL_HBT_LBT_SEL), \
+ _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_hbt, CMN_TYPE_HNS, _event, 1, SEL_HBT_LBT_SEL), \
+ _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_lbt, CMN_TYPE_HNS, _event, 2, SEL_HBT_LBT_SEL)
+#define CMN_EVENT_HNS_SNH(_name, _event) \
+ _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_all, CMN_TYPE_HNS, _event, 0, SEL_SN_HOME_SEL), \
+ _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_sn, CMN_TYPE_HNS, _event, 1, SEL_SN_HOME_SEL), \
+ _CMN_EVENT_ATTR(CMN_ANY, hns_##_name##_home, CMN_TYPE_HNS, _event, 2, SEL_SN_HOME_SEL)
+
+#define _CMN_EVENT_XP_MESH(_name, _event) \
__CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \
__CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \
__CMN_EVENT_XP(n_##_name, (_event) | (2 << 2)), \
- __CMN_EVENT_XP(s_##_name, (_event) | (3 << 2)), \
+ __CMN_EVENT_XP(s_##_name, (_event) | (3 << 2))
+
+#define _CMN_EVENT_XP_PORT(_name, _event) \
__CMN_EVENT_XP(p0_##_name, (_event) | (4 << 2)), \
__CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)), \
__CMN_EVENT_XP(p2_##_name, (_event) | (6 << 2)), \
__CMN_EVENT_XP(p3_##_name, (_event) | (7 << 2))
+#define _CMN_EVENT_XP(_name, _event) \
+ _CMN_EVENT_XP_MESH(_name, _event), \
+ _CMN_EVENT_XP_PORT(_name, _event)
+
/* Good thing there are only 3 fundamental XP events... */
#define CMN_EVENT_XP(_name, _event) \
_CMN_EVENT_XP(req_##_name, (_event) | (0 << 5)), \
@@ -813,6 +859,10 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
_CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)), \
_CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5))
+#define CMN_EVENT_XP_DAT(_name, _event) \
+ _CMN_EVENT_XP_PORT(dat_##_name, (_event) | (3 << 5)), \
+ _CMN_EVENT_XP_PORT(dat2_##_name, (_event) | (6 << 5))
+
static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_DTC(cycles),
@@ -862,11 +912,7 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_HNF(CMN_ANY, mc_retries, 0x0c),
CMN_EVENT_HNF(CMN_ANY, mc_reqs, 0x0d),
CMN_EVENT_HNF(CMN_ANY, qos_hh_retry, 0x0e),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_all, 0x0f, 0, SEL_OCCUP1ID),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_read, 0x0f, 1, SEL_OCCUP1ID),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_write, 0x0f, 2, SEL_OCCUP1ID),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_atomic, 0x0f, 3, SEL_OCCUP1ID),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_stash, 0x0f, 4, SEL_OCCUP1ID),
+ CMN_EVENT_HNF_OCC(CMN_ANY, qos_pocq_occupancy, 0x0f),
CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz, 0x10),
CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz, 0x11),
CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full, 0x12),
@@ -943,7 +989,7 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_XP(txflit_valid, 0x01),
CMN_EVENT_XP(txflit_stall, 0x02),
- CMN_EVENT_XP(partial_dat_flit, 0x03),
+ CMN_EVENT_XP_DAT(partial_dat_flit, 0x03),
/* We treat watchpoints as a special made-up class of XP events */
CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP),
CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN),
@@ -1132,6 +1178,66 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_CCLA(pfwd_sndr_stalls_static_crd, 0x2a),
CMN_EVENT_CCLA(pfwd_sndr_stalls_dynmaic_crd, 0x2b),
+ CMN_EVENT_HNS_HBT(cache_miss, 0x01),
+ CMN_EVENT_HNS_HBT(slc_sf_cache_access, 0x02),
+ CMN_EVENT_HNS_HBT(cache_fill, 0x03),
+ CMN_EVENT_HNS_HBT(pocq_retry, 0x04),
+ CMN_EVENT_HNS_HBT(pocq_reqs_recvd, 0x05),
+ CMN_EVENT_HNS_HBT(sf_hit, 0x06),
+ CMN_EVENT_HNS_HBT(sf_evictions, 0x07),
+ CMN_EVENT_HNS(dir_snoops_sent, 0x08),
+ CMN_EVENT_HNS(brd_snoops_sent, 0x09),
+ CMN_EVENT_HNS_HBT(slc_eviction, 0x0a),
+ CMN_EVENT_HNS_HBT(slc_fill_invalid_way, 0x0b),
+ CMN_EVENT_HNS(mc_retries_local, 0x0c),
+ CMN_EVENT_HNS_SNH(mc_reqs_local, 0x0d),
+ CMN_EVENT_HNS(qos_hh_retry, 0x0e),
+ CMN_EVENT_HNS_OCC(qos_pocq_occupancy, 0x0f),
+ CMN_EVENT_HNS(pocq_addrhaz, 0x10),
+ CMN_EVENT_HNS(pocq_atomic_addrhaz, 0x11),
+ CMN_EVENT_HNS(ld_st_swp_adq_full, 0x12),
+ CMN_EVENT_HNS(cmp_adq_full, 0x13),
+ CMN_EVENT_HNS(txdat_stall, 0x14),
+ CMN_EVENT_HNS(txrsp_stall, 0x15),
+ CMN_EVENT_HNS(seq_full, 0x16),
+ CMN_EVENT_HNS(seq_hit, 0x17),
+ CMN_EVENT_HNS(snp_sent, 0x18),
+ CMN_EVENT_HNS(sfbi_dir_snp_sent, 0x19),
+ CMN_EVENT_HNS(sfbi_brd_snp_sent, 0x1a),
+ CMN_EVENT_HNS(intv_dirty, 0x1c),
+ CMN_EVENT_HNS(stash_snp_sent, 0x1d),
+ CMN_EVENT_HNS(stash_data_pull, 0x1e),
+ CMN_EVENT_HNS(snp_fwded, 0x1f),
+ CMN_EVENT_HNS(atomic_fwd, 0x20),
+ CMN_EVENT_HNS(mpam_hardlim, 0x21),
+ CMN_EVENT_HNS(mpam_softlim, 0x22),
+ CMN_EVENT_HNS(snp_sent_cluster, 0x23),
+ CMN_EVENT_HNS(sf_imprecise_evict, 0x24),
+ CMN_EVENT_HNS(sf_evict_shared_line, 0x25),
+ CMN_EVENT_HNS_CLS(pocq_class_occup, 0x26),
+ CMN_EVENT_HNS_CLS(pocq_class_retry, 0x27),
+ CMN_EVENT_HNS_CLS(class_mc_reqs_local, 0x28),
+ CMN_EVENT_HNS_CLS(class_cgnt_cmin, 0x29),
+ CMN_EVENT_HNS_SNT(sn_throttle, 0x2a),
+ CMN_EVENT_HNS_SNT(sn_throttle_min, 0x2b),
+ CMN_EVENT_HNS(sf_precise_to_imprecise, 0x2c),
+ CMN_EVENT_HNS(snp_intv_cln, 0x2d),
+ CMN_EVENT_HNS(nc_excl, 0x2e),
+ CMN_EVENT_HNS(excl_mon_ovfl, 0x2f),
+ CMN_EVENT_HNS(snp_req_recvd, 0x30),
+ CMN_EVENT_HNS(snp_req_byp_pocq, 0x31),
+ CMN_EVENT_HNS(dir_ccgha_snp_sent, 0x32),
+ CMN_EVENT_HNS(brd_ccgha_snp_sent, 0x33),
+ CMN_EVENT_HNS(ccgha_snp_stall, 0x34),
+ CMN_EVENT_HNS(lbt_req_hardlim, 0x35),
+ CMN_EVENT_HNS(hbt_req_hardlim, 0x36),
+ CMN_EVENT_HNS(sf_reupdate, 0x37),
+ CMN_EVENT_HNS(excl_sf_imprecise, 0x38),
+ CMN_EVENT_HNS(snp_pocq_addrhaz, 0x39),
+ CMN_EVENT_HNS(mc_retries_remote, 0x3a),
+ CMN_EVENT_HNS_SNH(mc_reqs_remote, 0x3b),
+ CMN_EVENT_HNS_CLS(class_mc_reqs_remote, 0x3c),
+
NULL
};
@@ -1373,6 +1479,10 @@ static int arm_cmn_set_event_sel_hi(struct arm_cmn_node *dn,
dn->occupid[fsel].val = occupid;
reg = FIELD_PREP(CMN__PMU_CBUSY_SNTHROTTLE_SEL,
dn->occupid[SEL_CBUSY_SNTHROTTLE_SEL].val) |
+ FIELD_PREP(CMN__PMU_SN_HOME_SEL,
+ dn->occupid[SEL_SN_HOME_SEL].val) |
+ FIELD_PREP(CMN__PMU_HBT_LBT_SEL,
+ dn->occupid[SEL_HBT_LBT_SEL].val) |
FIELD_PREP(CMN__PMU_CLASS_OCCUP_ID,
dn->occupid[SEL_CLASS_OCCUP_ID].val) |
FIELD_PREP(CMN__PMU_OCCUP1_ID,
@@ -2200,6 +2310,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_CCRA:
case CMN_TYPE_CCHA:
case CMN_TYPE_CCLA:
+ case CMN_TYPE_HNS:
dn++;
break;
/* Nothing to see here */
@@ -2207,6 +2318,8 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_MPAM_NS:
case CMN_TYPE_RNSAM:
case CMN_TYPE_CXLA:
+ case CMN_TYPE_HNS_MPAM_S:
+ case CMN_TYPE_HNS_MPAM_NS:
break;
/*
* Split "optimised" combination nodes into separate
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 9d0f01c4455a..30cea6859574 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -66,8 +66,13 @@
#define DMC620_PMU_COUNTERn_OFFSET(n) \
(DMC620_PMU_COUNTERS_BASE + 0x28 * (n))
-static LIST_HEAD(dmc620_pmu_irqs);
+/*
+ * dmc620_pmu_irqs_lock: protects dmc620_pmu_irqs list
+ * dmc620_pmu_node_lock: protects pmus_node lists in all dmc620_pmu instances
+ */
static DEFINE_MUTEX(dmc620_pmu_irqs_lock);
+static DEFINE_MUTEX(dmc620_pmu_node_lock);
+static LIST_HEAD(dmc620_pmu_irqs);
struct dmc620_pmu_irq {
struct hlist_node node;
@@ -475,9 +480,9 @@ static int dmc620_pmu_get_irq(struct dmc620_pmu *dmc620_pmu, int irq_num)
return PTR_ERR(irq);
dmc620_pmu->irq = irq;
- mutex_lock(&dmc620_pmu_irqs_lock);
+ mutex_lock(&dmc620_pmu_node_lock);
list_add_rcu(&dmc620_pmu->pmus_node, &irq->pmus_node);
- mutex_unlock(&dmc620_pmu_irqs_lock);
+ mutex_unlock(&dmc620_pmu_node_lock);
return 0;
}
@@ -486,9 +491,11 @@ static void dmc620_pmu_put_irq(struct dmc620_pmu *dmc620_pmu)
{
struct dmc620_pmu_irq *irq = dmc620_pmu->irq;
- mutex_lock(&dmc620_pmu_irqs_lock);
+ mutex_lock(&dmc620_pmu_node_lock);
list_del_rcu(&dmc620_pmu->pmus_node);
+ mutex_unlock(&dmc620_pmu_node_lock);
+ mutex_lock(&dmc620_pmu_irqs_lock);
if (!refcount_dec_and_test(&irq->refcount)) {
mutex_unlock(&dmc620_pmu_irqs_lock);
return;
@@ -638,10 +645,10 @@ static int dmc620_pmu_cpu_teardown(unsigned int cpu,
return 0;
/* We're only reading, but this isn't the place to be involving RCU */
- mutex_lock(&dmc620_pmu_irqs_lock);
+ mutex_lock(&dmc620_pmu_node_lock);
list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node)
perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
- mutex_unlock(&dmc620_pmu_irqs_lock);
+ mutex_unlock(&dmc620_pmu_node_lock);
WARN_ON(irq_set_affinity(irq->irq_num, cpumask_of(target)));
irq->cpu = target;
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index fe2abb412c00..8223c49bd082 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -20,7 +20,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index f6ccb2cd4dfc..d712a19e47ac 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -877,11 +877,13 @@ struct arm_pmu *armpmu_alloc(void)
.attr_groups = pmu->attr_groups,
/*
* This is a CPU PMU potentially in a heterogeneous
- * configuration (e.g. big.LITTLE). This is not an uncore PMU,
- * and we have taken ctx sharing into account (e.g. with our
- * pmu::filter callback and pmu::event_init group validation).
+ * configuration (e.g. big.LITTLE) so
+ * PERF_PMU_CAP_EXTENDED_HW_TYPE is required to open
+ * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE events on a
+ * specific PMU.
*/
- .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS | PERF_PMU_CAP_EXTENDED_REGS,
+ .capabilities = PERF_PMU_CAP_EXTENDED_REGS |
+ PERF_PMU_CAP_EXTENDED_HW_TYPE,
};
pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 90815ad762eb..05dda19c5359 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -69,6 +69,62 @@ static void arm_pmu_acpi_unregister_irq(int cpu)
acpi_unregister_gsi(gsi);
}
+static int __maybe_unused
+arm_acpi_register_pmu_device(struct platform_device *pdev, u8 len,
+ u16 (*parse_gsi)(struct acpi_madt_generic_interrupt *))
+{
+ int cpu, this_hetid, hetid, irq, ret;
+ u16 this_gsi = 0, gsi = 0;
+
+ /*
+ * Ensure that platform device must have IORESOURCE_IRQ
+ * resource to hold gsi interrupt.
+ */
+ if (pdev->num_resources != 1)
+ return -ENXIO;
+
+ if (pdev->resource[0].flags != IORESOURCE_IRQ)
+ return -ENXIO;
+
+ /*
+ * Sanity check all the GICC tables for the same interrupt
+ * number. For now, only support homogeneous ACPI machines.
+ */
+ for_each_possible_cpu(cpu) {
+ struct acpi_madt_generic_interrupt *gicc;
+
+ gicc = acpi_cpu_get_madt_gicc(cpu);
+ if (gicc->header.length < len)
+ return gsi ? -ENXIO : 0;
+
+ this_gsi = parse_gsi(gicc);
+ this_hetid = find_acpi_cpu_topology_hetero_id(cpu);
+ if (!gsi) {
+ hetid = this_hetid;
+ gsi = this_gsi;
+ } else if (hetid != this_hetid || gsi != this_gsi) {
+ pr_warn("ACPI: %s: must be homogeneous\n", pdev->name);
+ return -ENXIO;
+ }
+ }
+
+ if (!this_gsi)
+ return 0;
+
+ irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
+ if (irq < 0) {
+ pr_warn("ACPI: %s Unable to register interrupt: %d\n", pdev->name, gsi);
+ return -ENXIO;
+ }
+
+ pdev->resource[0].start = irq;
+ ret = platform_device_register(pdev);
+ if (ret)
+ acpi_unregister_gsi(gsi);
+
+ return ret;
+}
+
#if IS_ENABLED(CONFIG_ARM_SPE_PMU)
static struct resource spe_resources[] = {
{
@@ -84,6 +140,11 @@ static struct platform_device spe_dev = {
.num_resources = ARRAY_SIZE(spe_resources)
};
+static u16 arm_spe_parse_gsi(struct acpi_madt_generic_interrupt *gicc)
+{
+ return gicc->spe_interrupt;
+}
+
/*
* For lack of a better place, hook the normal PMU MADT walk
* and create a SPE device if we detect a recent MADT with
@@ -91,53 +152,50 @@ static struct platform_device spe_dev = {
*/
static void arm_spe_acpi_register_device(void)
{
- int cpu, hetid, irq, ret;
- bool first = true;
- u16 gsi = 0;
-
- /*
- * Sanity check all the GICC tables for the same interrupt number.
- * For now, we only support homogeneous ACPI/SPE machines.
- */
- for_each_possible_cpu(cpu) {
- struct acpi_madt_generic_interrupt *gicc;
+ int ret = arm_acpi_register_pmu_device(&spe_dev, ACPI_MADT_GICC_SPE,
+ arm_spe_parse_gsi);
+ if (ret)
+ pr_warn("ACPI: SPE: Unable to register device\n");
+}
+#else
+static inline void arm_spe_acpi_register_device(void)
+{
+}
+#endif /* CONFIG_ARM_SPE_PMU */
- gicc = acpi_cpu_get_madt_gicc(cpu);
- if (gicc->header.length < ACPI_MADT_GICC_SPE)
- return;
-
- if (first) {
- gsi = gicc->spe_interrupt;
- if (!gsi)
- return;
- hetid = find_acpi_cpu_topology_hetero_id(cpu);
- first = false;
- } else if ((gsi != gicc->spe_interrupt) ||
- (hetid != find_acpi_cpu_topology_hetero_id(cpu))) {
- pr_warn("ACPI: SPE must be homogeneous\n");
- return;
- }
+#if IS_ENABLED(CONFIG_CORESIGHT_TRBE)
+static struct resource trbe_resources[] = {
+ {
+ /* irq */
+ .flags = IORESOURCE_IRQ,
}
+};
- irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE,
- ACPI_ACTIVE_HIGH);
- if (irq < 0) {
- pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi);
- return;
- }
+static struct platform_device trbe_dev = {
+ .name = ARMV8_TRBE_PDEV_NAME,
+ .id = -1,
+ .resource = trbe_resources,
+ .num_resources = ARRAY_SIZE(trbe_resources)
+};
- spe_resources[0].start = irq;
- ret = platform_device_register(&spe_dev);
- if (ret < 0) {
- pr_warn("ACPI: SPE: Unable to register device\n");
- acpi_unregister_gsi(gsi);
- }
+static u16 arm_trbe_parse_gsi(struct acpi_madt_generic_interrupt *gicc)
+{
+ return gicc->trbe_interrupt;
+}
+
+static void arm_trbe_acpi_register_device(void)
+{
+ int ret = arm_acpi_register_pmu_device(&trbe_dev, ACPI_MADT_GICC_TRBE,
+ arm_trbe_parse_gsi);
+ if (ret)
+ pr_warn("ACPI: TRBE: Unable to register device\n");
}
#else
-static inline void arm_spe_acpi_register_device(void)
+static inline void arm_trbe_acpi_register_device(void)
{
+
}
-#endif /* CONFIG_ARM_SPE_PMU */
+#endif /* CONFIG_CORESIGHT_TRBE */
static int arm_pmu_acpi_parse_irqs(void)
{
@@ -374,6 +432,7 @@ static int arm_pmu_acpi_init(void)
return 0;
arm_spe_acpi_register_device();
+ arm_trbe_acpi_register_device();
return 0;
}
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 933b96e243b8..3596db36cbff 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -16,7 +16,6 @@
#include <linux/irqdesc.h>
#include <linux/kconfig.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/percpu.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index 08b3a1bf0ef6..e5a2ac4155f6 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -721,38 +721,15 @@ static void armv8pmu_enable_event(struct perf_event *event)
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
-
- /*
- * Disable counter
- */
armv8pmu_disable_event_counter(event);
-
- /*
- * Set event.
- */
armv8pmu_write_event_type(event);
-
- /*
- * Enable interrupt for this counter
- */
armv8pmu_enable_event_irq(event);
-
- /*
- * Enable counter
- */
armv8pmu_enable_event_counter(event);
}
static void armv8pmu_disable_event(struct perf_event *event)
{
- /*
- * Disable counter
- */
armv8pmu_disable_event_counter(event);
-
- /*
- * Disable interrupt for this counter
- */
armv8pmu_disable_event_irq(event);
}
@@ -1266,9 +1243,14 @@ PMUV3_INIT_SIMPLE(armv8_cortex_a76)
PMUV3_INIT_SIMPLE(armv8_cortex_a77)
PMUV3_INIT_SIMPLE(armv8_cortex_a78)
PMUV3_INIT_SIMPLE(armv9_cortex_a510)
+PMUV3_INIT_SIMPLE(armv9_cortex_a520)
PMUV3_INIT_SIMPLE(armv9_cortex_a710)
+PMUV3_INIT_SIMPLE(armv9_cortex_a715)
+PMUV3_INIT_SIMPLE(armv9_cortex_a720)
PMUV3_INIT_SIMPLE(armv8_cortex_x1)
PMUV3_INIT_SIMPLE(armv9_cortex_x2)
+PMUV3_INIT_SIMPLE(armv9_cortex_x3)
+PMUV3_INIT_SIMPLE(armv9_cortex_x4)
PMUV3_INIT_SIMPLE(armv8_neoverse_e1)
PMUV3_INIT_SIMPLE(armv8_neoverse_n1)
PMUV3_INIT_SIMPLE(armv9_neoverse_n2)
@@ -1334,9 +1316,14 @@ static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init},
{.compatible = "arm,cortex-a78-pmu", .data = armv8_cortex_a78_pmu_init},
{.compatible = "arm,cortex-a510-pmu", .data = armv9_cortex_a510_pmu_init},
+ {.compatible = "arm,cortex-a520-pmu", .data = armv9_cortex_a520_pmu_init},
{.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init},
+ {.compatible = "arm,cortex-a715-pmu", .data = armv9_cortex_a715_pmu_init},
+ {.compatible = "arm,cortex-a720-pmu", .data = armv9_cortex_a720_pmu_init},
{.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init},
{.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init},
+ {.compatible = "arm,cortex-x3-pmu", .data = armv9_cortex_x3_pmu_init},
+ {.compatible = "arm,cortex-x4-pmu", .data = armv9_cortex_x4_pmu_init},
{.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init},
{.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init},
{.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init},
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 25a269d431e4..6303b82566f9 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -115,6 +115,7 @@
#define SMMU_PMCG_PA_SHIFT 12
#define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
+#define SMMU_PMCG_HARDEN_DISABLE BIT(1)
static int cpuhp_state_num;
@@ -159,6 +160,20 @@ static inline void smmu_pmu_enable(struct pmu *pmu)
writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
}
+static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
+ struct perf_event *event, int idx);
+
+static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+ unsigned int idx;
+
+ for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
+ smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx);
+
+ smmu_pmu_enable(pmu);
+}
+
static inline void smmu_pmu_disable(struct pmu *pmu)
{
struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
@@ -167,6 +182,22 @@ static inline void smmu_pmu_disable(struct pmu *pmu)
writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
}
+static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu)
+{
+ struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
+ unsigned int idx;
+
+ /*
+ * The global disable of PMU sometimes fail to stop the counting.
+ * Harden this by writing an invalid event type to each used counter
+ * to forcibly stop counting.
+ */
+ for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters)
+ writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
+
+ smmu_pmu_disable(pmu);
+}
+
static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
u32 idx, u64 value)
{
@@ -765,7 +796,10 @@ static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
switch (model) {
case IORT_SMMU_V3_PMCG_HISI_HIP08:
/* HiSilicon Erratum 162001800 */
- smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
+ smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE;
+ break;
+ case IORT_SMMU_V3_PMCG_HISI_HIP09:
+ smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE;
break;
}
@@ -890,6 +924,16 @@ static int smmu_pmu_probe(struct platform_device *pdev)
if (!dev->of_node)
smmu_pmu_get_acpi_options(smmu_pmu);
+ /*
+ * For platforms suffer this quirk, the PMU disable sometimes fails to
+ * stop the counters. This will leads to inaccurate or error counting.
+ * Forcibly disable the counters with these quirk handler.
+ */
+ if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) {
+ smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09;
+ smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09;
+ }
+
/* Pick one CPU to be the preferred one to use */
smmu_pmu->on_cpu = raw_smp_processor_id();
WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
@@ -984,6 +1028,7 @@ static void __exit arm_smmu_pmu_exit(void)
module_exit(arm_smmu_pmu_exit);
+MODULE_ALIAS("platform:arm-smmu-v3-pmcg");
MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index b9ba4c4fe5a2..d2b0cbf0e0c4 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -25,8 +25,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 5222ba1e79d0..92611c98120f 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -10,10 +10,9 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#define COUNTER_CNTL 0x0
@@ -28,6 +27,8 @@
#define CNTL_CLEAR_MASK 0xFFFFFFFD
#define CNTL_OVER_MASK 0xFFFFFFFE
+#define CNTL_CP_SHIFT 16
+#define CNTL_CP_MASK (0xFF << CNTL_CP_SHIFT)
#define CNTL_CSV_SHIFT 24
#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
@@ -35,6 +36,8 @@
#define EVENT_CYCLES_COUNTER 0
#define NUM_COUNTERS 4
+/* For removing bias if cycle counter CNTL.CP is set to 0xf0 */
+#define CYCLES_COUNTER_MASK 0x0FFFFFFF
#define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
@@ -101,6 +104,7 @@ struct ddr_pmu {
const struct fsl_ddr_devtype_data *devtype_data;
int irq;
int id;
+ int active_counter;
};
static ssize_t ddr_perf_identifier_show(struct device *dev,
@@ -427,6 +431,17 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
writel(0, pmu->base + reg);
val = CNTL_EN | CNTL_CLEAR;
val |= FIELD_PREP(CNTL_CSV_MASK, config);
+
+ /*
+ * On i.MX8MP we need to bias the cycle counter to overflow more often.
+ * We do this by initializing bits [23:16] of the counter value via the
+ * COUNTER_CTRL Counter Parameter (CP) field.
+ */
+ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) {
+ if (counter == EVENT_CYCLES_COUNTER)
+ val |= FIELD_PREP(CNTL_CP_MASK, 0xf0);
+ }
+
writel(val, pmu->base + reg);
} else {
/* Disable counter */
@@ -466,6 +481,12 @@ static void ddr_perf_event_update(struct perf_event *event)
int ret;
new_raw_count = ddr_perf_read_counter(pmu, counter);
+ /* Remove the bias applied in ddr_perf_counter_enable(). */
+ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) {
+ if (counter == EVENT_CYCLES_COUNTER)
+ new_raw_count &= CYCLES_COUNTER_MASK;
+ }
+
local64_add(new_raw_count, &event->count);
/*
@@ -495,6 +516,10 @@ static void ddr_perf_event_start(struct perf_event *event, int flags)
ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
+ if (!pmu->active_counter++)
+ ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER, true);
+
hwc->state = 0;
}
@@ -548,6 +573,10 @@ static void ddr_perf_event_stop(struct perf_event *event, int flags)
ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
ddr_perf_event_update(event);
+ if (!--pmu->active_counter)
+ ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
+ EVENT_CYCLES_COUNTER, false);
+
hwc->state |= PERF_HES_STOPPED;
}
@@ -565,25 +594,10 @@ static void ddr_perf_event_del(struct perf_event *event, int flags)
static void ddr_perf_pmu_enable(struct pmu *pmu)
{
- struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
-
- /* enable cycle counter if cycle is not active event list */
- if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
- ddr_perf_counter_enable(ddr_pmu,
- EVENT_CYCLES_ID,
- EVENT_CYCLES_COUNTER,
- true);
}
static void ddr_perf_pmu_disable(struct pmu *pmu)
{
- struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
-
- if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
- ddr_perf_counter_enable(ddr_pmu,
- EVENT_CYCLES_ID,
- EVENT_CYCLES_COUNTER,
- false);
}
static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 71d5b07e3aff..5cf770a1bc31 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -7,9 +7,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/perf_event.h>
/* Performance monitor configuration */
diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
index e10fc7cb9493..5a00adb2de8c 100644
--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
+++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
@@ -665,8 +665,8 @@ static int hisi_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
if (pcie_pmu->on_cpu == -1) {
- pcie_pmu->on_cpu = cpu;
- WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(cpu)));
+ pcie_pmu->on_cpu = cpumask_local_spread(0, dev_to_node(&pcie_pmu->pdev->dev));
+ WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(pcie_pmu->on_cpu)));
}
return 0;
@@ -676,14 +676,23 @@ static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
unsigned int target;
+ cpumask_t mask;
+ int numa_node;
/* Nothing to do if this CPU doesn't own the PMU */
if (pcie_pmu->on_cpu != cpu)
return 0;
pcie_pmu->on_cpu = -1;
- /* Choose a new CPU from all online cpus. */
- target = cpumask_any_but(cpu_online_mask, cpu);
+
+ /* Choose a local CPU from all online cpus. */
+ numa_node = dev_to_node(&pcie_pmu->pdev->dev);
+ if (cpumask_and(&mask, cpumask_of_node(numa_node), cpu_online_mask) &&
+ cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
+ target = cpumask_any(&mask);
+ else
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
if (target >= nr_cpu_ids) {
pci_err(pcie_pmu->pdev, "There is no CPU to set\n");
return 0;
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index b94a5f6cc22b..524ba82bfce2 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -8,11 +8,10 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/perf_event.h>
#include <linux/hrtimer.h>
#include <linux/acpi.h>
+#include <linux/platform_device.h>
/* Performance Counters Operating Mode Control Registers */
#define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index 3972197e2210..fec8e82edb95 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -6,10 +6,9 @@
#define pr_fmt(fmt) "tad_pmu: " fmt
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/cpuhotplug.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 0c32dffc7ede..9972bfc11a5c 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1833,7 +1833,6 @@ static int xgene_pmu_probe(struct platform_device *pdev)
const struct xgene_pmu_data *dev_data;
const struct of_device_id *of_id;
struct xgene_pmu *xgene_pmu;
- struct resource *res;
int irq, rc;
int version;
@@ -1883,8 +1882,7 @@ static int xgene_pmu_probe(struct platform_device *pdev)
xgene_pmu->version = version;
dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res);
+ xgene_pmu->pcppmu_csr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xgene_pmu->pcppmu_csr)) {
dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n");
return PTR_ERR(xgene_pmu->pcppmu_csr);
diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig
index 530426a74f75..d6318cb57aff 100644
--- a/drivers/pinctrl/cirrus/Kconfig
+++ b/drivers/pinctrl/cirrus/Kconfig
@@ -1,4 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
+config PINCTRL_CS42L43
+ tristate "Cirrus Logic CS42L43 Pinctrl Driver"
+ depends on MFD_CS42L43
+ select GPIOLIB
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ help
+ Select this to support the GPIO/Pinctrl functions of the Cirrus
+ Logic CS42L43 PC CODEC.
+
config PINCTRL_LOCHNAGAR
tristate "Cirrus Logic Lochnagar pinctrl driver"
depends on MFD_LOCHNAGAR
diff --git a/drivers/pinctrl/cirrus/Makefile b/drivers/pinctrl/cirrus/Makefile
index a484518c840e..9b618d766907 100644
--- a/drivers/pinctrl/cirrus/Makefile
+++ b/drivers/pinctrl/cirrus/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
# Cirrus Logic pinctrl drivers
+obj-$(CONFIG_PINCTRL_CS42L43) += pinctrl-cs42l43.o
+
obj-$(CONFIG_PINCTRL_LOCHNAGAR) += pinctrl-lochnagar.o
pinctrl-madera-objs := pinctrl-madera-core.o
diff --git a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
new file mode 100644
index 000000000000..c09646318419
--- /dev/null
+++ b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
@@ -0,0 +1,609 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// CS42L43 Pinctrl and GPIO driver
+//
+// Copyright (c) 2023 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/bits.h>
+#include <linux/build_bug.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/mfd/cs42l43.h>
+#include <linux/mfd/cs42l43-regs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/string_helpers.h>
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "../pinctrl-utils.h"
+
+#define CS42L43_NUM_GPIOS 3
+
+struct cs42l43_pin {
+ struct gpio_chip gpio_chip;
+
+ struct device *dev;
+ struct regmap *regmap;
+ bool shutters_locked;
+};
+
+struct cs42l43_pin_data {
+ unsigned int reg;
+ unsigned int shift;
+ unsigned int mask;
+};
+
+#define CS42L43_PIN(_number, _name, _reg, _field) { \
+ .number = _number, .name = _name, \
+ .drv_data = &((struct cs42l43_pin_data){ \
+ .reg = CS42L43_##_reg, \
+ .shift = CS42L43_##_field##_DRV_SHIFT, \
+ .mask = CS42L43_##_field##_DRV_MASK, \
+ }), \
+}
+
+static const struct pinctrl_pin_desc cs42l43_pin_pins[] = {
+ CS42L43_PIN(0, "gpio1", DRV_CTRL4, GPIO1),
+ CS42L43_PIN(1, "gpio2", DRV_CTRL4, GPIO2),
+ CS42L43_PIN(2, "gpio3", DRV_CTRL4, GPIO3),
+ CS42L43_PIN(3, "asp_dout", DRV_CTRL1, ASP_DOUT),
+ CS42L43_PIN(4, "asp_fsync", DRV_CTRL1, ASP_FSYNC),
+ CS42L43_PIN(5, "asp_bclk", DRV_CTRL1, ASP_BCLK),
+ CS42L43_PIN(6, "pdmout2_clk", DRV_CTRL3, PDMOUT2_CLK),
+ CS42L43_PIN(7, "pdmout2_data", DRV_CTRL3, PDMOUT2_DATA),
+ CS42L43_PIN(8, "pdmout1_clk", DRV_CTRL3, PDMOUT1_CLK),
+ CS42L43_PIN(9, "pdmout1_data", DRV_CTRL3, PDMOUT1_DATA),
+ CS42L43_PIN(10, "i2c_sda", DRV_CTRL3, I2C_SDA),
+ CS42L43_PIN(11, "i2c_scl", DRV_CTRL_5, I2C_SCL),
+ CS42L43_PIN(12, "spi_miso", DRV_CTRL3, SPI_MISO),
+ CS42L43_PIN(13, "spi_sck", DRV_CTRL_5, SPI_SCK),
+ CS42L43_PIN(14, "spi_ssb", DRV_CTRL_5, SPI_SSB),
+};
+
+static const unsigned int cs42l43_pin_gpio1_pins[] = { 0 };
+static const unsigned int cs42l43_pin_gpio2_pins[] = { 1 };
+static const unsigned int cs42l43_pin_gpio3_pins[] = { 2 };
+static const unsigned int cs42l43_pin_asp_pins[] = { 3, 4, 5 };
+static const unsigned int cs42l43_pin_pdmout2_pins[] = { 6, 7 };
+static const unsigned int cs42l43_pin_pdmout1_pins[] = { 8, 9 };
+static const unsigned int cs42l43_pin_i2c_pins[] = { 10, 11 };
+static const unsigned int cs42l43_pin_spi_pins[] = { 12, 13, 14 };
+
+#define CS42L43_PINGROUP(_name) \
+ PINCTRL_PINGROUP(#_name, cs42l43_pin_##_name##_pins, \
+ ARRAY_SIZE(cs42l43_pin_##_name##_pins))
+
+static const struct pingroup cs42l43_pin_groups[] = {
+ CS42L43_PINGROUP(gpio1),
+ CS42L43_PINGROUP(gpio2),
+ CS42L43_PINGROUP(gpio3),
+ CS42L43_PINGROUP(asp),
+ CS42L43_PINGROUP(pdmout2),
+ CS42L43_PINGROUP(pdmout1),
+ CS42L43_PINGROUP(i2c),
+ CS42L43_PINGROUP(spi),
+};
+
+static int cs42l43_pin_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(cs42l43_pin_groups);
+}
+
+static const char *cs42l43_pin_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group_idx)
+{
+ return cs42l43_pin_groups[group_idx].name;
+}
+
+static int cs42l43_pin_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group_idx,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ *pins = cs42l43_pin_groups[group_idx].pins;
+ *num_pins = cs42l43_pin_groups[group_idx].npins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops cs42l43_pin_group_ops = {
+ .get_groups_count = cs42l43_pin_get_groups_count,
+ .get_group_name = cs42l43_pin_get_group_name,
+ .get_group_pins = cs42l43_pin_get_group_pins,
+#if IS_ENABLED(CONFIG_OF)
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+#endif
+};
+
+enum cs42l43_pin_funcs {
+ CS42L43_FUNC_GPIO,
+ CS42L43_FUNC_SPDIF,
+ CS42L43_FUNC_IRQ,
+ CS42L43_FUNC_MIC_SHT,
+ CS42L43_FUNC_SPK_SHT,
+ CS42L43_FUNC_MAX
+};
+
+static const char * const cs42l43_pin_funcs[] = {
+ "gpio", "spdif", "irq", "mic-shutter", "spk-shutter",
+};
+
+static const char * const cs42l43_pin_gpio_groups[] = { "gpio1", "gpio3" };
+static const char * const cs42l43_pin_spdif_groups[] = { "gpio3" };
+static const char * const cs42l43_pin_irq_groups[] = { "gpio1" };
+static const char * const cs42l43_pin_shutter_groups[] = { "gpio1", "gpio2", "gpio3" };
+
+static const struct pinfunction cs42l43_pin_func_groups[] = {
+ PINCTRL_PINFUNCTION("gpio", cs42l43_pin_gpio_groups,
+ ARRAY_SIZE(cs42l43_pin_gpio_groups)),
+ PINCTRL_PINFUNCTION("spdif", cs42l43_pin_spdif_groups,
+ ARRAY_SIZE(cs42l43_pin_spdif_groups)),
+ PINCTRL_PINFUNCTION("irq", cs42l43_pin_irq_groups,
+ ARRAY_SIZE(cs42l43_pin_irq_groups)),
+ PINCTRL_PINFUNCTION("mic-shutter", cs42l43_pin_shutter_groups,
+ ARRAY_SIZE(cs42l43_pin_shutter_groups)),
+ PINCTRL_PINFUNCTION("spk-shutter", cs42l43_pin_shutter_groups,
+ ARRAY_SIZE(cs42l43_pin_shutter_groups)),
+};
+
+static_assert(ARRAY_SIZE(cs42l43_pin_funcs) == CS42L43_FUNC_MAX);
+static_assert(ARRAY_SIZE(cs42l43_pin_func_groups) == CS42L43_FUNC_MAX);
+
+static int cs42l43_pin_get_func_count(struct pinctrl_dev *pctldev)
+{
+ return ARRAY_SIZE(cs42l43_pin_funcs);
+}
+
+static const char *cs42l43_pin_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned int func_idx)
+{
+ return cs42l43_pin_funcs[func_idx];
+}
+
+static int cs42l43_pin_get_func_groups(struct pinctrl_dev *pctldev,
+ unsigned int func_idx,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ *groups = cs42l43_pin_func_groups[func_idx].groups;
+ *num_groups = cs42l43_pin_func_groups[func_idx].ngroups;
+
+ return 0;
+}
+
+static int cs42l43_pin_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_idx, unsigned int group_idx)
+{
+ struct cs42l43_pin *priv = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int reg, mask, val;
+
+ dev_dbg(priv->dev, "Setting %s to %s\n",
+ cs42l43_pin_groups[group_idx].name, cs42l43_pin_funcs[func_idx]);
+
+ switch (func_idx) {
+ case CS42L43_FUNC_MIC_SHT:
+ reg = CS42L43_SHUTTER_CONTROL;
+ mask = CS42L43_MIC_SHUTTER_CFG_MASK;
+ val = 0x2 << (group_idx + CS42L43_MIC_SHUTTER_CFG_SHIFT);
+ break;
+ case CS42L43_FUNC_SPK_SHT:
+ reg = CS42L43_SHUTTER_CONTROL;
+ mask = CS42L43_SPK_SHUTTER_CFG_MASK;
+ val = 0x2 << (group_idx + CS42L43_SPK_SHUTTER_CFG_SHIFT);
+ break;
+ default:
+ reg = CS42L43_GPIO_FN_SEL;
+ mask = BIT(group_idx + CS42L43_GPIO1_FN_SEL_SHIFT);
+ val = (func_idx == CS42L43_FUNC_GPIO) ?
+ (0x1 << (group_idx + CS42L43_GPIO1_FN_SEL_SHIFT)) : 0;
+ break;
+ }
+
+ if (priv->shutters_locked && reg == CS42L43_SHUTTER_CONTROL) {
+ dev_err(priv->dev, "Shutter configuration not available\n");
+ return -EPERM;
+ }
+
+ return regmap_update_bits(priv->regmap, reg, mask, val);
+}
+
+static int cs42l43_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset, bool input)
+{
+ struct cs42l43_pin *priv = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int shift = offset + CS42L43_GPIO1_DIR_SHIFT;
+ int ret;
+
+ dev_dbg(priv->dev, "Setting gpio%d to %s\n",
+ offset + 1, input ? "input" : "output");
+
+ ret = pm_runtime_resume_and_get(priv->dev);
+ if (ret) {
+ dev_err(priv->dev, "Failed to resume for direction: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(priv->regmap, CS42L43_GPIO_CTRL1,
+ BIT(shift), !!input << shift);
+ if (ret)
+ dev_err(priv->dev, "Failed to set gpio%d direction: %d\n",
+ offset + 1, ret);
+
+ pm_runtime_put(priv->dev);
+
+ return ret;
+}
+
+static int cs42l43_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ return cs42l43_pin_set_mux(pctldev, 0, offset);
+}
+
+static void cs42l43_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ cs42l43_gpio_set_direction(pctldev, range, offset, true);
+}
+
+static const struct pinmux_ops cs42l43_pin_mux_ops = {
+ .get_functions_count = cs42l43_pin_get_func_count,
+ .get_function_name = cs42l43_pin_get_func_name,
+ .get_function_groups = cs42l43_pin_get_func_groups,
+
+ .set_mux = cs42l43_pin_set_mux,
+
+ .gpio_request_enable = cs42l43_gpio_request_enable,
+ .gpio_disable_free = cs42l43_gpio_disable_free,
+ .gpio_set_direction = cs42l43_gpio_set_direction,
+
+ .strict = true,
+};
+
+static const unsigned int cs42l43_pin_drv_str_ma[] = { 1, 2, 4, 8, 9, 10, 12, 16 };
+
+static inline int cs42l43_pin_get_drv_str(struct cs42l43_pin *priv, unsigned int pin)
+{
+ const struct cs42l43_pin_data *pdat = cs42l43_pin_pins[pin].drv_data;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(priv->regmap, pdat->reg, &val);
+ if (ret)
+ return ret;
+
+ return cs42l43_pin_drv_str_ma[(val & pdat->mask) >> pdat->shift];
+}
+
+static inline int cs42l43_pin_set_drv_str(struct cs42l43_pin *priv, unsigned int pin,
+ unsigned int ma)
+{
+ const struct cs42l43_pin_data *pdat = cs42l43_pin_pins[pin].drv_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cs42l43_pin_drv_str_ma); i++) {
+ if (ma == cs42l43_pin_drv_str_ma[i]) {
+ if ((i << pdat->shift) > pdat->mask)
+ goto err;
+
+ dev_dbg(priv->dev, "Set drive strength for %s to %d mA\n",
+ cs42l43_pin_pins[pin].name, ma);
+
+ return regmap_update_bits(priv->regmap, pdat->reg,
+ pdat->mask, i << pdat->shift);
+ }
+ }
+
+err:
+ dev_err(priv->dev, "Invalid drive strength for %s: %d mA\n",
+ cs42l43_pin_pins[pin].name, ma);
+ return -EINVAL;
+}
+
+static inline int cs42l43_pin_get_db(struct cs42l43_pin *priv, unsigned int pin)
+{
+ unsigned int val;
+ int ret;
+
+ if (pin >= CS42L43_NUM_GPIOS)
+ return -ENOTSUPP;
+
+ ret = regmap_read(priv->regmap, CS42L43_GPIO_CTRL2, &val);
+ if (ret)
+ return ret;
+
+ if (val & (CS42L43_GPIO1_DEGLITCH_BYP_MASK << pin))
+ return 0;
+
+ return 85; // Debounce is roughly 85uS
+}
+
+static inline int cs42l43_pin_set_db(struct cs42l43_pin *priv, unsigned int pin,
+ unsigned int us)
+{
+ if (pin >= CS42L43_NUM_GPIOS)
+ return -ENOTSUPP;
+
+ dev_dbg(priv->dev, "Set debounce %s for %s\n",
+ str_on_off(us), cs42l43_pin_pins[pin].name);
+
+ return regmap_update_bits(priv->regmap, CS42L43_GPIO_CTRL2,
+ CS42L43_GPIO1_DEGLITCH_BYP_MASK << pin,
+ !!us << pin);
+}
+
+static int cs42l43_pin_config_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct cs42l43_pin *priv = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int param = pinconf_to_config_param(*config);
+ int ret;
+
+ switch (param) {
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = cs42l43_pin_get_drv_str(priv, pin);
+ if (ret < 0)
+ return ret;
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ ret = cs42l43_pin_get_db(priv, pin);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, ret);
+
+ return 0;
+}
+
+static int cs42l43_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct cs42l43_pin *priv = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int val;
+ int ret;
+
+ while (num_configs) {
+ val = pinconf_to_config_argument(*configs);
+
+ switch (pinconf_to_config_param(*configs)) {
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = cs42l43_pin_set_drv_str(priv, pin, val);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ ret = cs42l43_pin_set_db(priv, pin, val);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ configs++;
+ num_configs--;
+ }
+
+ return 0;
+}
+
+static int cs42l43_pin_config_group_get(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned long *config)
+{
+ int i, ret;
+
+ for (i = 0; i < cs42l43_pin_groups[selector].npins; ++i) {
+ ret = cs42l43_pin_config_get(pctldev,
+ cs42l43_pin_groups[selector].pins[i],
+ config);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cs42l43_pin_config_group_set(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ int i, ret;
+
+ for (i = 0; i < cs42l43_pin_groups[selector].npins; ++i) {
+ ret = cs42l43_pin_config_set(pctldev,
+ cs42l43_pin_groups[selector].pins[i],
+ configs, num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pinconf_ops cs42l43_pin_conf_ops = {
+ .is_generic = true,
+
+ .pin_config_get = cs42l43_pin_config_get,
+ .pin_config_set = cs42l43_pin_config_set,
+ .pin_config_group_get = cs42l43_pin_config_group_get,
+ .pin_config_group_set = cs42l43_pin_config_group_set,
+};
+
+static struct pinctrl_desc cs42l43_pin_desc = {
+ .name = "cs42l43-pinctrl",
+ .owner = THIS_MODULE,
+
+ .pins = cs42l43_pin_pins,
+ .npins = ARRAY_SIZE(cs42l43_pin_pins),
+
+ .pctlops = &cs42l43_pin_group_ops,
+ .pmxops = &cs42l43_pin_mux_ops,
+ .confops = &cs42l43_pin_conf_ops,
+};
+
+static int cs42l43_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct cs42l43_pin *priv = gpiochip_get_data(chip);
+ unsigned int val;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(priv->dev);
+ if (ret) {
+ dev_err(priv->dev, "Failed to resume for get: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(priv->regmap, CS42L43_GPIO_STS, &val);
+ if (ret)
+ dev_err(priv->dev, "Failed to get gpio%d: %d\n", offset + 1, ret);
+ else
+ ret = !!(val & BIT(offset + CS42L43_GPIO1_STS_SHIFT));
+
+ pm_runtime_put(priv->dev);
+
+ return ret;
+}
+
+static void cs42l43_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct cs42l43_pin *priv = gpiochip_get_data(chip);
+ unsigned int shift = offset + CS42L43_GPIO1_LVL_SHIFT;
+ int ret;
+
+ dev_dbg(priv->dev, "Setting gpio%d to %s\n",
+ offset + 1, value ? "high" : "low");
+
+ ret = pm_runtime_resume_and_get(priv->dev);
+ if (ret) {
+ dev_err(priv->dev, "Failed to resume for set: %d\n", ret);
+ return;
+ }
+
+ ret = regmap_update_bits(priv->regmap, CS42L43_GPIO_CTRL1,
+ BIT(shift), value << shift);
+ if (ret)
+ dev_err(priv->dev, "Failed to set gpio%d: %d\n", offset + 1, ret);
+
+ pm_runtime_put(priv->dev);
+}
+
+static int cs42l43_gpio_direction_in(struct gpio_chip *chip, unsigned int offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int cs42l43_gpio_direction_out(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ cs42l43_gpio_set(chip, offset, value);
+
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
+static int cs42l43_gpio_add_pin_ranges(struct gpio_chip *chip)
+{
+ struct cs42l43_pin *priv = gpiochip_get_data(chip);
+ int ret;
+
+ ret = gpiochip_add_pin_range(&priv->gpio_chip, priv->gpio_chip.label,
+ 0, 0, CS42L43_NUM_GPIOS);
+ if (ret)
+ dev_err(priv->dev, "Failed to add GPIO pin range: %d\n", ret);
+
+ return ret;
+}
+
+static int cs42l43_pin_probe(struct platform_device *pdev)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(pdev->dev.parent);
+ struct cs42l43_pin *priv;
+ struct pinctrl_dev *pctldev;
+ struct fwnode_handle *fwnode = dev_fwnode(cs42l43->dev);
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ priv->regmap = cs42l43->regmap;
+
+ priv->shutters_locked = cs42l43->hw_lock;
+
+ priv->gpio_chip.request = gpiochip_generic_request;
+ priv->gpio_chip.free = gpiochip_generic_free;
+ priv->gpio_chip.direction_input = cs42l43_gpio_direction_in;
+ priv->gpio_chip.direction_output = cs42l43_gpio_direction_out;
+ priv->gpio_chip.add_pin_ranges = cs42l43_gpio_add_pin_ranges;
+ priv->gpio_chip.get = cs42l43_gpio_get;
+ priv->gpio_chip.set = cs42l43_gpio_set;
+ priv->gpio_chip.label = dev_name(priv->dev);
+ priv->gpio_chip.parent = priv->dev;
+ priv->gpio_chip.can_sleep = true;
+ priv->gpio_chip.base = -1;
+ priv->gpio_chip.ngpio = CS42L43_NUM_GPIOS;
+
+ if (is_of_node(fwnode)) {
+ fwnode = fwnode_get_named_child_node(fwnode, "pinctrl");
+
+ if (fwnode && !fwnode->dev)
+ fwnode->dev = priv->dev;
+ }
+
+ priv->gpio_chip.fwnode = fwnode;
+
+ device_set_node(priv->dev, fwnode);
+
+ devm_pm_runtime_enable(priv->dev);
+ pm_runtime_idle(priv->dev);
+
+ pctldev = devm_pinctrl_register(priv->dev, &cs42l43_pin_desc, priv);
+ if (IS_ERR(pctldev))
+ return dev_err_probe(priv->dev, PTR_ERR(pctldev),
+ "Failed to register pinctrl\n");
+
+ ret = devm_gpiochip_add_data(priv->dev, &priv->gpio_chip, priv);
+ if (ret)
+ return dev_err_probe(priv->dev, ret,
+ "Failed to register gpiochip\n");
+
+ return 0;
+}
+
+static const struct platform_device_id cs42l43_pin_id_table[] = {
+ { "cs42l43-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cs42l43_pin_id_table);
+
+static struct platform_driver cs42l43_pin_driver = {
+ .driver = {
+ .name = "cs42l43-pinctrl",
+ },
+ .probe = cs42l43_pin_probe,
+ .id_table = cs42l43_pin_id_table,
+};
+module_platform_driver(cs42l43_pin_driver);
+
+MODULE_DESCRIPTION("CS42L43 Pinctrl Driver");
+MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 4a8c1b57a90d..4dff656af3ad 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -862,6 +862,33 @@ static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_group_set = amd_pinconf_group_set,
};
+static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
+{
+ struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
+ u32 pin_reg, mask;
+ int i;
+
+ mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
+ BIT(WAKE_CNTRL_OFF_S4);
+
+ for (i = 0; i < desc->npins; i++) {
+ int pin = desc->pins[i].number;
+ const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
+
+ if (!pd)
+ continue;
+
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+
+ pin_reg = readl(gpio_dev->base + pin * 4);
+ pin_reg &= ~mask;
+ writel(pin_reg, gpio_dev->base + pin * 4);
+
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ }
+}
+
#ifdef CONFIG_PM_SLEEP
static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
{
@@ -1099,6 +1126,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gpio_dev->pctrl);
}
+ /* Disable and mask interrupts */
+ amd_gpio_irq_init(gpio_dev);
+
girq = &gpio_dev->gc.irq;
gpio_irq_chip_set_chip(girq, &amd_gpio_irqchip);
/* This will let us handle the parent IRQ in the driver */
diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
index 40b1326a1077..5591ddf16fdf 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
@@ -14,6 +14,7 @@
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/pinctrl/pinmux.h>
@@ -46,6 +47,7 @@ struct rza2_pinctrl_priv {
struct pinctrl_dev *pctl;
struct pinctrl_gpio_range gpio_range;
int npins;
+ struct mutex mutex; /* serialize adding groups and functions */
};
#define RZA2_PDR(port) (0x0000 + (port) * 2) /* Direction 16-bit */
@@ -358,10 +360,14 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
psel_val[i] = MUX_FUNC(value);
}
+ mutex_lock(&priv->mutex);
+
/* Register a single pin group listing all the pins we read from DT */
gsel = pinctrl_generic_add_group(pctldev, np->name, pins, npins, NULL);
- if (gsel < 0)
- return gsel;
+ if (gsel < 0) {
+ ret = gsel;
+ goto unlock;
+ }
/*
* Register a single group function where the 'data' is an array PSEL
@@ -390,6 +396,8 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev,
(*map)->data.mux.function = np->name;
*num_maps = 1;
+ mutex_unlock(&priv->mutex);
+
return 0;
remove_function:
@@ -398,6 +406,9 @@ remove_function:
remove_group:
pinctrl_generic_remove_group(pctldev, gsel);
+unlock:
+ mutex_unlock(&priv->mutex);
+
dev_err(priv->dev, "Unable to parse DT node %s\n", np->name);
return ret;
@@ -473,6 +484,8 @@ static int rza2_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
+ mutex_init(&priv->mutex);
+
platform_set_drvdata(pdev, priv);
priv->npins = (int)(uintptr_t)of_device_get_match_data(&pdev->dev) *
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index b53d26167da5..6e8a76556e23 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/seq_file.h>
@@ -149,10 +150,11 @@ struct rzg2l_pinctrl {
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range gpio_range;
DECLARE_BITMAP(tint_slot, RZG2L_TINT_MAX_INTERRUPT);
- spinlock_t bitmap_lock;
+ spinlock_t bitmap_lock; /* protect tint_slot bitmap */
unsigned int hwirq[RZG2L_TINT_MAX_INTERRUPT];
- spinlock_t lock;
+ spinlock_t lock; /* lock read/write registers */
+ struct mutex mutex; /* serialize adding groups and functions */
};
static const unsigned int iolh_groupa_mA[] = { 2, 4, 8, 12 };
@@ -362,11 +364,13 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
name = np->name;
}
+ mutex_lock(&pctrl->mutex);
+
/* Register a single pin group listing all the pins we read from DT */
gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
if (gsel < 0) {
ret = gsel;
- goto done;
+ goto unlock;
}
/*
@@ -380,6 +384,8 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
goto remove_group;
}
+ mutex_unlock(&pctrl->mutex);
+
maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
maps[idx].data.mux.group = name;
maps[idx].data.mux.function = name;
@@ -391,6 +397,8 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev,
remove_group:
pinctrl_generic_remove_group(pctldev, gsel);
+unlock:
+ mutex_unlock(&pctrl->mutex);
done:
*index = idx;
kfree(configs);
@@ -1509,6 +1517,7 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
spin_lock_init(&pctrl->lock);
spin_lock_init(&pctrl->bitmap_lock);
+ mutex_init(&pctrl->mutex);
platform_set_drvdata(pdev, pctrl);
diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
index 35b23c1a5684..9146101ea9e2 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c
@@ -14,6 +14,7 @@
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/spinlock.h>
@@ -123,7 +124,8 @@ struct rzv2m_pinctrl {
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range gpio_range;
- spinlock_t lock;
+ spinlock_t lock; /* lock read/write registers */
+ struct mutex mutex; /* serialize adding groups and functions */
};
static const unsigned int drv_1_8V_group2_uA[] = { 1800, 3800, 7800, 11000 };
@@ -322,11 +324,13 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
name = np->name;
}
+ mutex_lock(&pctrl->mutex);
+
/* Register a single pin group listing all the pins we read from DT */
gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL);
if (gsel < 0) {
ret = gsel;
- goto done;
+ goto unlock;
}
/*
@@ -340,6 +344,8 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
goto remove_group;
}
+ mutex_unlock(&pctrl->mutex);
+
maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
maps[idx].data.mux.group = name;
maps[idx].data.mux.function = name;
@@ -351,6 +357,8 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev,
remove_group:
pinctrl_generic_remove_group(pctldev, gsel);
+unlock:
+ mutex_unlock(&pctrl->mutex);
done:
*index = idx;
kfree(configs);
@@ -1071,6 +1079,7 @@ static int rzv2m_pinctrl_probe(struct platform_device *pdev)
}
spin_lock_init(&pctrl->lock);
+ mutex_init(&pctrl->mutex);
platform_set_drvdata(pdev, pctrl);
diff --git a/drivers/platform/chrome/chromeos_acpi.c b/drivers/platform/chrome/chromeos_acpi.c
index 50d8a4d4352d..e6e6dcfc74d1 100644
--- a/drivers/platform/chrome/chromeos_acpi.c
+++ b/drivers/platform/chrome/chromeos_acpi.c
@@ -90,7 +90,36 @@ static int chromeos_acpi_handle_package(struct device *dev, union acpi_object *o
case ACPI_TYPE_STRING:
return sysfs_emit(buf, "%s\n", element->string.pointer);
case ACPI_TYPE_BUFFER:
- return sysfs_emit(buf, "%s\n", element->buffer.pointer);
+ {
+ int i, r, at, room_left;
+ const int byte_per_line = 16;
+
+ at = 0;
+ room_left = PAGE_SIZE - 1;
+ for (i = 0; i < element->buffer.length && room_left; i += byte_per_line) {
+ r = hex_dump_to_buffer(element->buffer.pointer + i,
+ element->buffer.length - i,
+ byte_per_line, 1, buf + at, room_left,
+ false);
+ if (r > room_left)
+ goto truncating;
+ at += r;
+ room_left -= r;
+
+ r = sysfs_emit_at(buf, at, "\n");
+ if (!r)
+ goto truncating;
+ at += r;
+ room_left -= r;
+ }
+
+ buf[at] = 0;
+ return at;
+truncating:
+ dev_info_once(dev, "truncating sysfs content for %s\n", name);
+ sysfs_emit_at(buf, PAGE_SIZE - 4, "..\n");
+ return PAGE_SIZE - 1;
+ }
default:
dev_err(dev, "element type %d not supported\n", element->type);
return -EINVAL;
@@ -235,9 +264,9 @@ static int chromeos_acpi_device_probe(struct platform_device *pdev)
return 0;
}
-/* GGL is valid PNP ID of Google. PNP ID can be used with the ACPI devices. */
static const struct acpi_device_id chromeos_device_ids[] = {
{ "GGL0001", 0 },
+ { "GOOG0016", 0 },
{}
};
MODULE_DEVICE_TABLE(acpi, chromeos_device_ids);
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 500a61b093e4..356572452898 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -327,8 +327,8 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
dev_emerg(ec_dev->dev, "CrOS EC Panic Reported. Shutdown is imminent!");
blocking_notifier_call_chain(&ec_dev->panic_notifier, 0, ec_dev);
kobject_uevent_env(&ec_dev->dev->kobj, KOBJ_CHANGE, (char **)env);
- /* Begin orderly shutdown. Force shutdown after 1 second. */
- hw_protection_shutdown("CrOS EC Panic", 1000);
+ /* Begin orderly shutdown. EC will force reset after a short period. */
+ hw_protection_shutdown("CrOS EC Panic", -1);
/* Do not query for other events after a panic is reported */
return;
}
diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c
index e6ae8265f3a3..cefd0d886cfd 100644
--- a/drivers/platform/x86/intel/ifs/load.c
+++ b/drivers/platform/x86/intel/ifs/load.c
@@ -3,7 +3,7 @@
#include <linux/firmware.h>
#include <asm/cpu.h>
-#include <asm/microcode_intel.h>
+#include <asm/microcode.h>
#include "ifs.h"
@@ -56,12 +56,13 @@ struct metadata_header {
static struct metadata_header *find_meta_data(void *ucode, unsigned int meta_type)
{
+ struct microcode_header_intel *hdr = &((struct microcode_intel *)ucode)->hdr;
struct metadata_header *meta_header;
unsigned long data_size, total_meta;
unsigned long meta_size = 0;
- data_size = get_datasize(ucode);
- total_meta = ((struct microcode_intel *)ucode)->hdr.metasize;
+ data_size = intel_microcode_get_datasize(hdr);
+ total_meta = hdr->metasize;
if (!total_meta)
return NULL;
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 5a36b3f77bc5..84c175b9721a 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -1123,7 +1123,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, icl_core_init),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, tgl_core_init),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, tgl_core_init),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, tgl_core_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, tgl_core_init),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, adl_core_init),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, tgl_core_init),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, adl_core_init),
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index a95004e3d80b..08df9494603c 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -720,7 +720,7 @@ static struct miscdevice isst_if_char_driver = {
static const struct x86_cpu_id hpm_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(SIERRAFOREST_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
{}
};
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 38928ff7472b..6ab272c84b7b 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -254,6 +254,9 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
else
strncpy(dev->name, acpi_device_bid(device), sizeof(dev->name));
+ /* Handle possible string truncation */
+ dev->name[sizeof(dev->name) - 1] = '\0';
+
if (dev->active)
pnpacpi_parse_allocated_resource(dev);
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index fff07b2bd77b..59e1ebb7842e 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -148,13 +148,6 @@ config POWER_RESET_ODROID_GO_ULTRA_POWEROFF
help
This driver supports Power off for Odroid Go Ultra device.
-config POWER_RESET_OXNAS
- bool "OXNAS SoC restart driver"
- depends on ARCH_OXNAS
- default MACH_OX820
- help
- Restart support for OXNAS/PLXTECH OX820 SoC.
-
config POWER_RESET_PIIX4_POWEROFF
tristate "Intel PIIX4 power-off driver"
depends on PCI
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index d763e6735ee3..a95d1bd275d1 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_LINKSTATION) += linkstation-poweroff.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_MT6323) += mt6323-poweroff.o
-obj-$(CONFIG_POWER_RESET_OXNAS) += oxnas-restart.o
obj-$(CONFIG_POWER_RESET_QCOM_PON) += qcom-pon.o
obj-$(CONFIG_POWER_RESET_OCELOT_RESET) += ocelot-reset.o
obj-$(CONFIG_POWER_RESET_ODROID_GO_ULTRA_POWEROFF) += odroid-go-ultra-poweroff.o
diff --git a/drivers/power/reset/as3722-poweroff.c b/drivers/power/reset/as3722-poweroff.c
index 80edff1a556f..829e0dba2fda 100644
--- a/drivers/power/reset/as3722-poweroff.c
+++ b/drivers/power/reset/as3722-poweroff.c
@@ -10,7 +10,6 @@
#include <linux/mfd/as3722.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index 9e74e131c675..dd5399785b69 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -151,13 +151,11 @@ static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev)
static int __init at91_poweroff_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device_node *np;
u32 ddr_type;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- at91_shdwc.shdwc_base = devm_ioremap_resource(&pdev->dev, res);
+ at91_shdwc.shdwc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(at91_shdwc.shdwc_base))
return PTR_ERR(at91_shdwc.shdwc_base);
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index d6884841a6dc..aa9b012d3d00 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/reset-controller.h>
+#include <linux/power/power_on_reason.h>
#include <soc/at91/at91sam9_ddrsdr.h>
#include <soc/at91/at91sam9_sdramc.h>
@@ -149,44 +150,54 @@ static int at91_reset(struct notifier_block *this, unsigned long mode,
return NOTIFY_DONE;
}
-static const char * __init at91_reset_reason(struct at91_reset *reset)
+static const char *at91_reset_reason(struct at91_reset *reset)
{
u32 reg = readl(reset->rstc_base + AT91_RSTC_SR);
const char *reason;
switch ((reg & AT91_RSTC_RSTTYP) >> 8) {
case RESET_TYPE_GENERAL:
- reason = "general reset";
+ reason = POWER_ON_REASON_REGULAR;
break;
case RESET_TYPE_WAKEUP:
- reason = "wakeup";
+ reason = POWER_ON_REASON_RTC;
break;
case RESET_TYPE_WATCHDOG:
- reason = "watchdog reset";
+ reason = POWER_ON_REASON_WATCHDOG;
break;
case RESET_TYPE_SOFTWARE:
- reason = "software reset";
+ reason = POWER_ON_REASON_SOFTWARE;
break;
case RESET_TYPE_USER:
- reason = "user reset";
+ reason = POWER_ON_REASON_RST_BTN;
break;
case RESET_TYPE_CPU_FAIL:
- reason = "CPU clock failure detection";
+ reason = POWER_ON_REASON_CPU_CLK_FAIL;
break;
case RESET_TYPE_XTAL_FAIL:
- reason = "32.768 kHz crystal failure detection";
+ reason = POWER_ON_REASON_XTAL_FAIL;
break;
case RESET_TYPE_ULP2:
- reason = "ULP2 reset";
+ reason = POWER_ON_REASON_BROWN_OUT;
break;
default:
- reason = "unknown reset";
+ reason = POWER_ON_REASON_UNKNOWN;
break;
}
return reason;
}
+static ssize_t power_on_reason_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct at91_reset *reset = platform_get_drvdata(pdev);
+
+ return sprintf(buf, "%s\n", at91_reset_reason(reset));
+}
+static DEVICE_ATTR_RO(power_on_reason);
+
static const struct of_device_id at91_ramc_of_match[] = {
{
.compatible = "atmel,at91sam9260-sdramc",
@@ -391,6 +402,12 @@ static int __init at91_reset_probe(struct platform_device *pdev)
if (ret)
goto disable_clk;
+ ret = device_create_file(&pdev->dev, &dev_attr_power_on_reason);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not create sysfs entry\n");
+ return ret;
+ }
+
dev_info(&pdev->dev, "Starting after %s\n", at91_reset_reason(reset));
return 0;
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index d8ecffe72f16..e76b102b57b1 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -331,7 +331,6 @@ static const struct of_device_id at91_pmc_ids[] = {
static int __init at91_shdwc_probe(struct platform_device *pdev)
{
- struct resource *res;
const struct of_device_id *match;
struct device_node *np;
u32 ddr_type;
@@ -349,8 +348,7 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, at91_shdwc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- at91_shdwc->shdwc_base = devm_ioremap_resource(&pdev->dev, res);
+ at91_shdwc->shdwc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(at91_shdwc->shdwc_base))
return PTR_ERR(at91_shdwc->shdwc_base);
diff --git a/drivers/power/reset/brcm-kona-reset.c b/drivers/power/reset/brcm-kona-reset.c
index 3de024e3ceb7..d05728b1db09 100644
--- a/drivers/power/reset/brcm-kona-reset.c
+++ b/drivers/power/reset/brcm-kona-reset.c
@@ -2,8 +2,8 @@
// Copyright (C) 2016 Broadcom
#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <linux/reboot.h>
#define RSTMGR_REG_WR_ACCESS_OFFSET 0
@@ -38,9 +38,7 @@ static struct notifier_block kona_reset_nb = {
static int kona_reset_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- kona_reset_base = devm_ioremap_resource(&pdev->dev, res);
+ kona_reset_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kona_reset_base))
return PTR_ERR(kona_reset_base);
diff --git a/drivers/power/reset/gemini-poweroff.c b/drivers/power/reset/gemini-poweroff.c
index b7f7a8225f22..d309b610142c 100644
--- a/drivers/power/reset/gemini-poweroff.c
+++ b/drivers/power/reset/gemini-poweroff.c
@@ -91,7 +91,6 @@ static void gemini_poweroff(void)
static int gemini_poweroff_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct gemini_powercon *gpw;
u32 val;
int irq;
@@ -101,8 +100,7 @@ static int gemini_poweroff_probe(struct platform_device *pdev)
if (!gpw)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpw->base = devm_ioremap_resource(dev, res);
+ gpw->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpw->base))
return PTR_ERR(gpw->base);
diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c
index 84b3c3528afa..b28f24da1b3c 100644
--- a/drivers/power/reset/gpio-poweroff.c
+++ b/drivers/power/reset/gpio-poweroff.c
@@ -11,8 +11,9 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#define DEFAULT_TIMEOUT_MS 3000
diff --git a/drivers/power/reset/gpio-restart.c b/drivers/power/reset/gpio-restart.c
index a479d3536eb1..3aa19765772d 100644
--- a/drivers/power/reset/gpio-restart.c
+++ b/drivers/power/reset/gpio-restart.c
@@ -12,8 +12,8 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_platform.h>
#include <linux/module.h>
+#include <linux/of.h>
struct gpio_restart {
struct gpio_desc *reset_gpio;
diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c
index 83a4e1c9bf94..dbc4ff61cd74 100644
--- a/drivers/power/reset/keystone-reset.c
+++ b/drivers/power/reset/keystone-reset.c
@@ -10,10 +10,11 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/notifier.h>
+#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#define RSTYPE_RG 0x0
#define RSCTRL_RG 0x4
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 0c439f83bf65..b9a401bd280b 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -35,11 +35,7 @@ static void do_msm_poweroff(void)
static int msm_restart_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct resource *mem;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- msm_ps_hold = devm_ioremap_resource(dev, mem);
+ msm_ps_hold = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(msm_ps_hold))
return PTR_ERR(msm_ps_hold);
diff --git a/drivers/power/reset/ocelot-reset.c b/drivers/power/reset/ocelot-reset.c
index 8caa90cb58fc..56be64decf54 100644
--- a/drivers/power/reset/ocelot-reset.c
+++ b/drivers/power/reset/ocelot-reset.c
@@ -8,10 +8,10 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/notifier.h>
+#include <linux/mod_devicetable.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
@@ -69,8 +69,6 @@ static int ocelot_restart_handle(struct notifier_block *this,
static int ocelot_reset_probe(struct platform_device *pdev)
{
struct ocelot_reset_context *ctx;
- struct resource *res;
-
struct device *dev = &pdev->dev;
int err;
@@ -78,8 +76,7 @@ static int ocelot_reset_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base))
return PTR_ERR(ctx->base);
diff --git a/drivers/power/reset/odroid-go-ultra-poweroff.c b/drivers/power/reset/odroid-go-ultra-poweroff.c
index f46271da4e8e..9cac7aef77f0 100644
--- a/drivers/power/reset/odroid-go-ultra-poweroff.c
+++ b/drivers/power/reset/odroid-go-ultra-poweroff.c
@@ -4,7 +4,8 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/mfd/rk808.h>
#include <linux/regmap.h>
#include <linux/module.h>
diff --git a/drivers/power/reset/oxnas-restart.c b/drivers/power/reset/oxnas-restart.c
deleted file mode 100644
index 13090bec058a..000000000000
--- a/drivers/power/reset/oxnas-restart.c
+++ /dev/null
@@ -1,233 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0)
-/*
- * oxnas SoC reset driver
- * based on:
- * Microsemi MIPS SoC reset driver
- * and ox820_assert_system_reset() written by Ma Hajun <mahaijuns@gmail.com>
- *
- * Copyright (c) 2013 Ma Hajun <mahaijuns@gmail.com>
- * Copyright (c) 2017 Microsemi Corporation
- * Copyright (c) 2020 Daniel Golle <daniel@makrotopia.org>
- */
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/notifier.h>
-#include <linux/mfd/syscon.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/reboot.h>
-#include <linux/regmap.h>
-
-/* bit numbers of reset control register */
-#define OX820_SYS_CTRL_RST_SCU 0
-#define OX820_SYS_CTRL_RST_COPRO 1
-#define OX820_SYS_CTRL_RST_ARM0 2
-#define OX820_SYS_CTRL_RST_ARM1 3
-#define OX820_SYS_CTRL_RST_USBHS 4
-#define OX820_SYS_CTRL_RST_USBHSPHYA 5
-#define OX820_SYS_CTRL_RST_MACA 6
-#define OX820_SYS_CTRL_RST_MAC OX820_SYS_CTRL_RST_MACA
-#define OX820_SYS_CTRL_RST_PCIEA 7
-#define OX820_SYS_CTRL_RST_SGDMA 8
-#define OX820_SYS_CTRL_RST_CIPHER 9
-#define OX820_SYS_CTRL_RST_DDR 10
-#define OX820_SYS_CTRL_RST_SATA 11
-#define OX820_SYS_CTRL_RST_SATA_LINK 12
-#define OX820_SYS_CTRL_RST_SATA_PHY 13
-#define OX820_SYS_CTRL_RST_PCIEPHY 14
-#define OX820_SYS_CTRL_RST_STATIC 15
-#define OX820_SYS_CTRL_RST_GPIO 16
-#define OX820_SYS_CTRL_RST_UART1 17
-#define OX820_SYS_CTRL_RST_UART2 18
-#define OX820_SYS_CTRL_RST_MISC 19
-#define OX820_SYS_CTRL_RST_I2S 20
-#define OX820_SYS_CTRL_RST_SD 21
-#define OX820_SYS_CTRL_RST_MACB 22
-#define OX820_SYS_CTRL_RST_PCIEB 23
-#define OX820_SYS_CTRL_RST_VIDEO 24
-#define OX820_SYS_CTRL_RST_DDR_PHY 25
-#define OX820_SYS_CTRL_RST_USBHSPHYB 26
-#define OX820_SYS_CTRL_RST_USBDEV 27
-#define OX820_SYS_CTRL_RST_ARMDBG 29
-#define OX820_SYS_CTRL_RST_PLLA 30
-#define OX820_SYS_CTRL_RST_PLLB 31
-
-/* bit numbers of clock control register */
-#define OX820_SYS_CTRL_CLK_COPRO 0
-#define OX820_SYS_CTRL_CLK_DMA 1
-#define OX820_SYS_CTRL_CLK_CIPHER 2
-#define OX820_SYS_CTRL_CLK_SD 3
-#define OX820_SYS_CTRL_CLK_SATA 4
-#define OX820_SYS_CTRL_CLK_I2S 5
-#define OX820_SYS_CTRL_CLK_USBHS 6
-#define OX820_SYS_CTRL_CLK_MACA 7
-#define OX820_SYS_CTRL_CLK_MAC OX820_SYS_CTRL_CLK_MACA
-#define OX820_SYS_CTRL_CLK_PCIEA 8
-#define OX820_SYS_CTRL_CLK_STATIC 9
-#define OX820_SYS_CTRL_CLK_MACB 10
-#define OX820_SYS_CTRL_CLK_PCIEB 11
-#define OX820_SYS_CTRL_CLK_REF600 12
-#define OX820_SYS_CTRL_CLK_USBDEV 13
-#define OX820_SYS_CTRL_CLK_DDR 14
-#define OX820_SYS_CTRL_CLK_DDRPHY 15
-#define OX820_SYS_CTRL_CLK_DDRCK 16
-
-/* Regmap offsets */
-#define OX820_CLK_SET_REGOFFSET 0x2c
-#define OX820_CLK_CLR_REGOFFSET 0x30
-#define OX820_RST_SET_REGOFFSET 0x34
-#define OX820_RST_CLR_REGOFFSET 0x38
-#define OX820_SECONDARY_SEL_REGOFFSET 0x14
-#define OX820_TERTIARY_SEL_REGOFFSET 0x8c
-#define OX820_QUATERNARY_SEL_REGOFFSET 0x94
-#define OX820_DEBUG_SEL_REGOFFSET 0x9c
-#define OX820_ALTERNATIVE_SEL_REGOFFSET 0xa4
-#define OX820_PULLUP_SEL_REGOFFSET 0xac
-#define OX820_SEC_SECONDARY_SEL_REGOFFSET 0x100014
-#define OX820_SEC_TERTIARY_SEL_REGOFFSET 0x10008c
-#define OX820_SEC_QUATERNARY_SEL_REGOFFSET 0x100094
-#define OX820_SEC_DEBUG_SEL_REGOFFSET 0x10009c
-#define OX820_SEC_ALTERNATIVE_SEL_REGOFFSET 0x1000a4
-#define OX820_SEC_PULLUP_SEL_REGOFFSET 0x1000ac
-
-struct oxnas_restart_context {
- struct regmap *sys_ctrl;
- struct notifier_block restart_handler;
-};
-
-static int ox820_restart_handle(struct notifier_block *this,
- unsigned long mode, void *cmd)
-{
- struct oxnas_restart_context *ctx = container_of(this, struct
- oxnas_restart_context,
- restart_handler);
- u32 value;
-
- /*
- * Assert reset to cores as per power on defaults
- * Don't touch the DDR interface as things will come to an impromptu
- * stop NB Possibly should be asserting reset for PLLB, but there are
- * timing concerns here according to the docs
- */
- value = BIT(OX820_SYS_CTRL_RST_COPRO) |
- BIT(OX820_SYS_CTRL_RST_USBHS) |
- BIT(OX820_SYS_CTRL_RST_USBHSPHYA) |
- BIT(OX820_SYS_CTRL_RST_MACA) |
- BIT(OX820_SYS_CTRL_RST_PCIEA) |
- BIT(OX820_SYS_CTRL_RST_SGDMA) |
- BIT(OX820_SYS_CTRL_RST_CIPHER) |
- BIT(OX820_SYS_CTRL_RST_SATA) |
- BIT(OX820_SYS_CTRL_RST_SATA_LINK) |
- BIT(OX820_SYS_CTRL_RST_SATA_PHY) |
- BIT(OX820_SYS_CTRL_RST_PCIEPHY) |
- BIT(OX820_SYS_CTRL_RST_STATIC) |
- BIT(OX820_SYS_CTRL_RST_UART1) |
- BIT(OX820_SYS_CTRL_RST_UART2) |
- BIT(OX820_SYS_CTRL_RST_MISC) |
- BIT(OX820_SYS_CTRL_RST_I2S) |
- BIT(OX820_SYS_CTRL_RST_SD) |
- BIT(OX820_SYS_CTRL_RST_MACB) |
- BIT(OX820_SYS_CTRL_RST_PCIEB) |
- BIT(OX820_SYS_CTRL_RST_VIDEO) |
- BIT(OX820_SYS_CTRL_RST_USBHSPHYB) |
- BIT(OX820_SYS_CTRL_RST_USBDEV);
-
- regmap_write(ctx->sys_ctrl, OX820_RST_SET_REGOFFSET, value);
-
- /* Release reset to cores as per power on defaults */
- regmap_write(ctx->sys_ctrl, OX820_RST_CLR_REGOFFSET,
- BIT(OX820_SYS_CTRL_RST_GPIO));
-
- /*
- * Disable clocks to cores as per power-on defaults - must leave DDR
- * related clocks enabled otherwise we'll stop rather abruptly.
- */
- value = BIT(OX820_SYS_CTRL_CLK_COPRO) |
- BIT(OX820_SYS_CTRL_CLK_DMA) |
- BIT(OX820_SYS_CTRL_CLK_CIPHER) |
- BIT(OX820_SYS_CTRL_CLK_SD) |
- BIT(OX820_SYS_CTRL_CLK_SATA) |
- BIT(OX820_SYS_CTRL_CLK_I2S) |
- BIT(OX820_SYS_CTRL_CLK_USBHS) |
- BIT(OX820_SYS_CTRL_CLK_MAC) |
- BIT(OX820_SYS_CTRL_CLK_PCIEA) |
- BIT(OX820_SYS_CTRL_CLK_STATIC) |
- BIT(OX820_SYS_CTRL_CLK_MACB) |
- BIT(OX820_SYS_CTRL_CLK_PCIEB) |
- BIT(OX820_SYS_CTRL_CLK_REF600) |
- BIT(OX820_SYS_CTRL_CLK_USBDEV);
-
- regmap_write(ctx->sys_ctrl, OX820_CLK_CLR_REGOFFSET, value);
-
- /* Enable clocks to cores as per power-on defaults */
-
- /* Set sys-control pin mux'ing as per power-on defaults */
- regmap_write(ctx->sys_ctrl, OX820_SECONDARY_SEL_REGOFFSET, 0);
- regmap_write(ctx->sys_ctrl, OX820_TERTIARY_SEL_REGOFFSET, 0);
- regmap_write(ctx->sys_ctrl, OX820_QUATERNARY_SEL_REGOFFSET, 0);
- regmap_write(ctx->sys_ctrl, OX820_DEBUG_SEL_REGOFFSET, 0);
- regmap_write(ctx->sys_ctrl, OX820_ALTERNATIVE_SEL_REGOFFSET, 0);
- regmap_write(ctx->sys_ctrl, OX820_PULLUP_SEL_REGOFFSET, 0);
-
- regmap_write(ctx->sys_ctrl, OX820_SEC_SECONDARY_SEL_REGOFFSET, 0);
- regmap_write(ctx->sys_ctrl, OX820_SEC_TERTIARY_SEL_REGOFFSET, 0);
- regmap_write(ctx->sys_ctrl, OX820_SEC_QUATERNARY_SEL_REGOFFSET, 0);
- regmap_write(ctx->sys_ctrl, OX820_SEC_DEBUG_SEL_REGOFFSET, 0);
- regmap_write(ctx->sys_ctrl, OX820_SEC_ALTERNATIVE_SEL_REGOFFSET, 0);
- regmap_write(ctx->sys_ctrl, OX820_SEC_PULLUP_SEL_REGOFFSET, 0);
-
- /*
- * No need to save any state, as the ROM loader can determine whether
- * reset is due to power cycling or programatic action, just hit the
- * (self-clearing) CPU reset bit of the block reset register
- */
- value =
- BIT(OX820_SYS_CTRL_RST_SCU) |
- BIT(OX820_SYS_CTRL_RST_ARM0) |
- BIT(OX820_SYS_CTRL_RST_ARM1);
-
- regmap_write(ctx->sys_ctrl, OX820_RST_SET_REGOFFSET, value);
-
- pr_emerg("Unable to restart system\n");
- return NOTIFY_DONE;
-}
-
-static int ox820_restart_probe(struct platform_device *pdev)
-{
- struct oxnas_restart_context *ctx;
- struct regmap *sys_ctrl;
- struct device *dev = &pdev->dev;
- int err = 0;
-
- sys_ctrl = syscon_node_to_regmap(pdev->dev.of_node);
- if (IS_ERR(sys_ctrl))
- return PTR_ERR(sys_ctrl);
-
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->sys_ctrl = sys_ctrl;
- ctx->restart_handler.notifier_call = ox820_restart_handle;
- ctx->restart_handler.priority = 192;
- err = register_restart_handler(&ctx->restart_handler);
- if (err)
- dev_err(dev, "can't register restart notifier (err=%d)\n", err);
-
- return err;
-}
-
-static const struct of_device_id ox820_restart_of_match[] = {
- { .compatible = "oxsemi,ox820-sys-ctrl" },
- {}
-};
-
-static struct platform_driver ox820_restart_driver = {
- .probe = ox820_restart_probe,
- .driver = {
- .name = "ox820-chip-reset",
- .of_match_table = ox820_restart_of_match,
- },
-};
-builtin_platform_driver(ox820_restart_driver);
diff --git a/drivers/power/reset/st-poweroff.c b/drivers/power/reset/st-poweroff.c
index 5ccaacffab54..56ba21873882 100644
--- a/drivers/power/reset/st-poweroff.c
+++ b/drivers/power/reset/st-poweroff.c
@@ -9,7 +9,7 @@
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/reboot.h>
diff --git a/drivers/power/reset/syscon-poweroff.c b/drivers/power/reset/syscon-poweroff.c
index ed58bdf41e27..430d440d55c6 100644
--- a/drivers/power/reset/syscon-poweroff.c
+++ b/drivers/power/reset/syscon-poweroff.c
@@ -10,8 +10,7 @@
#include <linux/io.h>
#include <linux/notifier.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c
index 45e34e6885f7..4d622c19bc48 100644
--- a/drivers/power/reset/syscon-reboot.c
+++ b/drivers/power/reset/syscon-reboot.c
@@ -9,8 +9,7 @@
#include <linux/io.h>
#include <linux/notifier.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
diff --git a/drivers/power/reset/xgene-reboot.c b/drivers/power/reset/xgene-reboot.c
index 0b0d2fd2bd0c..3260bd93158e 100644
--- a/drivers/power/reset/xgene-reboot.c
+++ b/drivers/power/reset/xgene-reboot.c
@@ -13,7 +13,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/notifier.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
diff --git a/drivers/power/supply/axp20x_ac_power.c b/drivers/power/supply/axp20x_ac_power.c
index 57e50208d537..19a118633115 100644
--- a/drivers/power/supply/axp20x_ac_power.c
+++ b/drivers/power/supply/axp20x_ac_power.c
@@ -13,7 +13,6 @@
#include <linux/mfd/axp20x.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/power_supply.h>
diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
index 9106077c0dbb..6ac5c80cfda2 100644
--- a/drivers/power/supply/axp20x_battery.c
+++ b/drivers/power/supply/axp20x_battery.c
@@ -22,7 +22,6 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index 51c3f9b6458d..bde17406c130 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -15,7 +15,6 @@
#include <linux/mfd/axp20x.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/power_supply.h>
diff --git a/drivers/power/supply/bd99954-charger.c b/drivers/power/supply/bd99954-charger.c
index 086dcf4033c1..1ed1d9f99fb3 100644
--- a/drivers/power/supply/bd99954-charger.c
+++ b/drivers/power/supply/bd99954-charger.c
@@ -536,7 +536,7 @@ static irqreturn_t bd9995x_irq_handler_thread(int irq, void *private)
for_each_set_bit(i, &tmp, 7) {
int sub_status, sub_mask;
- int sub_status_reg[] = {
+ static const int sub_status_reg[] = {
INT1_STATUS, INT2_STATUS, INT3_STATUS, INT4_STATUS,
INT5_STATUS, INT6_STATUS, INT7_STATUS,
};
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index ef8235848f56..3f99cb9590ba 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -965,7 +965,7 @@ static int bq24190_charger_get_precharge(struct bq24190_dev_info *bdi,
union power_supply_propval *val)
{
u8 v;
- int ret;
+ int curr, ret;
ret = bq24190_read_mask(bdi, BQ24190_REG_PCTCC,
BQ24190_REG_PCTCC_IPRECHG_MASK,
@@ -973,7 +973,20 @@ static int bq24190_charger_get_precharge(struct bq24190_dev_info *bdi,
if (ret < 0)
return ret;
- val->intval = ++v * 128 * 1000;
+ curr = ++v * 128 * 1000;
+
+ ret = bq24190_read_mask(bdi, BQ24190_REG_CCC,
+ BQ24190_REG_CCC_FORCE_20PCT_MASK,
+ BQ24190_REG_CCC_FORCE_20PCT_SHIFT, &v);
+ if (ret < 0)
+ return ret;
+
+ /* If FORCE_20PCT is enabled, then current is 50% of IPRECHG value */
+ if (v)
+ curr /= 2;
+
+ val->intval = curr;
+
return 0;
}
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index d98d9244e394..5dd76c0ac98d 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -15,7 +15,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/reboot.h>
diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c
index a87aeaea38e1..27f897067aa3 100644
--- a/drivers/power/supply/da9150-charger.c
+++ b/drivers/power/supply/da9150-charger.c
@@ -11,8 +11,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/power_supply.h>
#include <linux/notifier.h>
diff --git a/drivers/power/supply/da9150-fg.c b/drivers/power/supply/da9150-fg.c
index 8c5e2c49d6c1..652c1f213af1 100644
--- a/drivers/power/supply/da9150-fg.c
+++ b/drivers/power/supply/da9150-fg.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
diff --git a/drivers/power/supply/lego_ev3_battery.c b/drivers/power/supply/lego_ev3_battery.c
index ccb00be38e2c..9085de0ae1b2 100644
--- a/drivers/power/supply/lego_ev3_battery.c
+++ b/drivers/power/supply/lego_ev3_battery.c
@@ -20,7 +20,7 @@
#include <linux/iio/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
index cad39a8f829d..a1ddc4b060ce 100644
--- a/drivers/power/supply/ltc2941-battery-gauge.c
+++ b/drivers/power/supply/ltc2941-battery-gauge.c
@@ -11,7 +11,7 @@
#include <linux/devm-helpers.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/swab.h>
diff --git a/drivers/power/supply/ltc4162-l-charger.c b/drivers/power/supply/ltc4162-l-charger.c
index ec5b4a20ad43..f0eace731480 100644
--- a/drivers/power/supply/ltc4162-l-charger.c
+++ b/drivers/power/supply/ltc4162-l-charger.c
@@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/power_supply.h>
#include <linux/i2c.h>
diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
index fbdf12cf64eb..89f2af72dfcd 100644
--- a/drivers/power/supply/max14656_charger_detector.c
+++ b/drivers/power/supply/max14656_charger_detector.c
@@ -14,8 +14,8 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
-#include <linux/of_device.h>
#include <linux/workqueue.h>
#include <linux/power_supply.h>
#include <linux/devm-helpers.h>
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 22ea7de47a53..ff42db672899 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -15,7 +15,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/power_supply.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/power/supply/max8903_charger.c b/drivers/power/supply/max8903_charger.c
index 54d50b55fbae..e65d0141f260 100644
--- a/drivers/power/supply/max8903_charger.c
+++ b/drivers/power/supply/max8903_charger.c
@@ -10,7 +10,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/power_supply.h>
#include <linux/platform_device.h>
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 3791aec69ddc..4aa466c945e2 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1305,8 +1305,12 @@ static int psy_register_thermal(struct power_supply *psy)
/* Register battery zone device psy reports temperature */
if (psy_has_property(psy->desc, POWER_SUPPLY_PROP_TEMP)) {
+ /* Prefer our hwmon device and avoid duplicates */
+ struct thermal_zone_params tzp = {
+ .no_hwmon = IS_ENABLED(CONFIG_POWER_SUPPLY_HWMON)
+ };
psy->tzd = thermal_zone_device_register(psy->desc->name,
- 0, 0, psy, &psy_tzd_ops, NULL, 0, 0);
+ 0, 0, psy, &psy_tzd_ops, &tzp, 0, 0);
if (IS_ERR(psy->tzd))
return PTR_ERR(psy->tzd);
ret = thermal_zone_device_enable(psy->tzd);
diff --git a/drivers/power/supply/qcom_pmi8998_charger.c b/drivers/power/supply/qcom_pmi8998_charger.c
index d16c5ee17249..10f4dd0caca1 100644
--- a/drivers/power/supply/qcom_pmi8998_charger.c
+++ b/drivers/power/supply/qcom_pmi8998_charger.c
@@ -409,8 +409,6 @@ static enum power_supply_property smb2_properties[] = {
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_USB_TYPE,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
};
static enum power_supply_usb_type smb2_usb_types[] = {
@@ -519,9 +517,9 @@ static int smb2_get_prop_status(struct smb2_chip *chip, int *val)
*val = POWER_SUPPLY_STATUS_NOT_CHARGING;
return rc;
case TERMINATE_CHARGE:
+ case INHIBIT_CHARGE:
*val = POWER_SUPPLY_STATUS_FULL;
return rc;
- case INHIBIT_CHARGE:
default:
*val = POWER_SUPPLY_STATUS_UNKNOWN;
return rc;
@@ -556,7 +554,8 @@ static int smb2_set_current_limit(struct smb2_chip *chip, unsigned int val)
static void smb2_status_change_work(struct work_struct *work)
{
unsigned int charger_type, current_ua;
- int usb_online, count, rc;
+ int usb_online = 0;
+ int count, rc;
struct smb2_chip *chip;
chip = container_of(work, struct smb2_chip, status_change_work.work);
@@ -673,11 +672,7 @@ static int smb2_get_property(struct power_supply *psy,
val->strval = chip->name;
return 0;
case POWER_SUPPLY_PROP_CURRENT_MAX:
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
return smb2_get_current_limit(chip, &val->intval);
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
- val->intval = DCP_CURRENT_UA;
- return 0;
case POWER_SUPPLY_PROP_CURRENT_NOW:
return smb2_get_iio_chan(chip, chip->usb_in_i_chan,
&val->intval);
@@ -706,7 +701,6 @@ static int smb2_set_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CURRENT_MAX:
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
return smb2_set_current_limit(chip, val->intval);
default:
dev_err(chip->dev, "No setter for property: %d\n", psp);
@@ -719,7 +713,6 @@ static int smb2_property_is_writable(struct power_supply *psy,
{
switch (psp) {
case POWER_SUPPLY_PROP_CURRENT_MAX:
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
return 1;
default:
return 0;
diff --git a/drivers/power/supply/rn5t618_power.c b/drivers/power/supply/rn5t618_power.c
index a5e09ac78a50..ebea3522a2ac 100644
--- a/drivers/power/supply/rn5t618_power.c
+++ b/drivers/power/supply/rn5t618_power.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mfd/rn5t618.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
diff --git a/drivers/power/supply/rt5033_charger.c b/drivers/power/supply/rt5033_charger.c
index 5218dfbf5e1b..c0c516f22c66 100644
--- a/drivers/power/supply/rt5033_charger.c
+++ b/drivers/power/supply/rt5033_charger.c
@@ -6,8 +6,8 @@
* Author: Beomho Seo <beomho.seo@samsung.com>
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index ff4dcf77c788..c345a77f9f78 100644
--- a/drivers/power/supply/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
@@ -8,8 +8,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/of_irq.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/power_supply.h>
#include <linux/i2c.h>
diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c
index 62a354e01204..cdfc8466d129 100644
--- a/drivers/power/supply/sbs-battery.c
+++ b/drivers/power/supply/sbs-battery.c
@@ -16,7 +16,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/property.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/power/sbs-battery.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
diff --git a/drivers/power/supply/tps65090-charger.c b/drivers/power/supply/tps65090-charger.c
index 0990b2fa6cd8..f96c705e0a9f 100644
--- a/drivers/power/supply/tps65090-charger.c
+++ b/drivers/power/supply/tps65090-charger.c
@@ -13,7 +13,7 @@
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
diff --git a/drivers/power/supply/tps65217_charger.c b/drivers/power/supply/tps65217_charger.c
index a4bc9f2a10bc..96341cbde4fa 100644
--- a/drivers/power/supply/tps65217_charger.c
+++ b/drivers/power/supply/tps65217_charger.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/power_supply.h>
#include <linux/mfd/core.h>
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index 7adfd69fe649..e78d061d8d32 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/mfd/twl.h>
diff --git a/drivers/powercap/arm_scmi_powercap.c b/drivers/powercap/arm_scmi_powercap.c
index 5231f6d52ae3..a081f177e702 100644
--- a/drivers/powercap/arm_scmi_powercap.c
+++ b/drivers/powercap/arm_scmi_powercap.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/powercap.h>
#include <linux/scmi_protocol.h>
+#include <linux/slab.h>
#define to_scmi_powercap_zone(z) \
container_of(z, struct scmi_powercap_zone, zone)
@@ -19,6 +20,8 @@
static const struct scmi_powercap_proto_ops *powercap_ops;
struct scmi_powercap_zone {
+ bool registered;
+ bool invalid;
unsigned int height;
struct device *dev;
struct scmi_protocol_handle *ph;
@@ -32,6 +35,7 @@ struct scmi_powercap_root {
unsigned int num_zones;
struct scmi_powercap_zone *spzones;
struct list_head *registered_zones;
+ struct list_head scmi_zones;
};
static struct powercap_control_type *scmi_top_pcntrl;
@@ -271,12 +275,6 @@ static void scmi_powercap_unregister_all_zones(struct scmi_powercap_root *pr)
}
}
-static inline bool
-scmi_powercap_is_zone_registered(struct scmi_powercap_zone *spz)
-{
- return !list_empty(&spz->node);
-}
-
static inline unsigned int
scmi_powercap_get_zone_height(struct scmi_powercap_zone *spz)
{
@@ -295,11 +293,46 @@ scmi_powercap_get_parent_zone(struct scmi_powercap_zone *spz)
return &spz->spzones[spz->info->parent_id];
}
+static int scmi_powercap_register_zone(struct scmi_powercap_root *pr,
+ struct scmi_powercap_zone *spz,
+ struct scmi_powercap_zone *parent)
+{
+ int ret = 0;
+ struct powercap_zone *z;
+
+ if (spz->invalid) {
+ list_del(&spz->node);
+ return -EINVAL;
+ }
+
+ z = powercap_register_zone(&spz->zone, scmi_top_pcntrl, spz->info->name,
+ parent ? &parent->zone : NULL,
+ &zone_ops, 1, &constraint_ops);
+ if (!IS_ERR(z)) {
+ spz->height = scmi_powercap_get_zone_height(spz);
+ spz->registered = true;
+ list_move(&spz->node, &pr->registered_zones[spz->height]);
+ dev_dbg(spz->dev, "Registered node %s - parent %s - height:%d\n",
+ spz->info->name, parent ? parent->info->name : "ROOT",
+ spz->height);
+ } else {
+ list_del(&spz->node);
+ ret = PTR_ERR(z);
+ dev_err(spz->dev,
+ "Error registering node:%s - parent:%s - h:%d - ret:%d\n",
+ spz->info->name,
+ parent ? parent->info->name : "ROOT",
+ spz->height, ret);
+ }
+
+ return ret;
+}
+
/**
- * scmi_powercap_register_zone - Register an SCMI powercap zone recursively
+ * scmi_zones_register- Register SCMI powercap zones starting from parent zones
*
+ * @dev: A reference to the SCMI device
* @pr: A reference to the root powercap zones descriptors
- * @spz: A reference to the SCMI powercap zone to register
*
* When registering SCMI powercap zones with the powercap framework we should
* take care to always register zones starting from the root ones and to
@@ -309,10 +342,10 @@ scmi_powercap_get_parent_zone(struct scmi_powercap_zone *spz)
* zones provided by the SCMI platform firmware is built to comply with such
* requirement.
*
- * This function, given an SCMI powercap zone to register, takes care to walk
- * the SCMI powercap zones tree up to the root looking recursively for
- * unregistered parent zones before registering the provided zone; at the same
- * time each registered zone height in such a tree is accounted for and each
+ * This function, given the set of SCMI powercap zones to register, takes care
+ * to walk the SCMI powercap zones trees up to the root registering any
+ * unregistered parent zone before registering the child zones; at the same
+ * time each registered-zone height in such a tree is accounted for and each
* zone, once registered, is stored in the @registered_zones array that is
* indexed by zone height: this way will be trivial, at unregister time, to walk
* the @registered_zones array backward and unregister all the zones starting
@@ -330,57 +363,55 @@ scmi_powercap_get_parent_zone(struct scmi_powercap_zone *spz)
*
* Return: 0 on Success
*/
-static int scmi_powercap_register_zone(struct scmi_powercap_root *pr,
- struct scmi_powercap_zone *spz)
+static int scmi_zones_register(struct device *dev,
+ struct scmi_powercap_root *pr)
{
int ret = 0;
- struct scmi_powercap_zone *parent;
-
- if (!spz->info)
- return ret;
+ unsigned int sp = 0, reg_zones = 0;
+ struct scmi_powercap_zone *spz, **zones_stack;
- parent = scmi_powercap_get_parent_zone(spz);
- if (parent && !scmi_powercap_is_zone_registered(parent)) {
- /*
- * Bail out if a parent domain was marked as unsupported:
- * only domains participating as leaves can be skipped.
- */
- if (!parent->info)
- return -ENODEV;
+ zones_stack = kcalloc(pr->num_zones, sizeof(spz), GFP_KERNEL);
+ if (!zones_stack)
+ return -ENOMEM;
- ret = scmi_powercap_register_zone(pr, parent);
- if (ret)
- return ret;
- }
+ spz = list_first_entry_or_null(&pr->scmi_zones,
+ struct scmi_powercap_zone, node);
+ while (spz) {
+ struct scmi_powercap_zone *parent;
- if (!scmi_powercap_is_zone_registered(spz)) {
- struct powercap_zone *z;
-
- z = powercap_register_zone(&spz->zone,
- scmi_top_pcntrl,
- spz->info->name,
- parent ? &parent->zone : NULL,
- &zone_ops, 1, &constraint_ops);
- if (!IS_ERR(z)) {
- spz->height = scmi_powercap_get_zone_height(spz);
- list_add(&spz->node,
- &pr->registered_zones[spz->height]);
- dev_dbg(spz->dev,
- "Registered node %s - parent %s - height:%d\n",
- spz->info->name,
- parent ? parent->info->name : "ROOT",
- spz->height);
- ret = 0;
+ parent = scmi_powercap_get_parent_zone(spz);
+ if (parent && !parent->registered) {
+ zones_stack[sp++] = spz;
+ spz = parent;
} else {
- ret = PTR_ERR(z);
- dev_err(spz->dev,
- "Error registering node:%s - parent:%s - h:%d - ret:%d\n",
- spz->info->name,
- parent ? parent->info->name : "ROOT",
- spz->height, ret);
+ ret = scmi_powercap_register_zone(pr, spz, parent);
+ if (!ret) {
+ reg_zones++;
+ } else if (sp) {
+ /* Failed to register a non-leaf zone.
+ * Bail-out.
+ */
+ dev_err(dev,
+ "Failed to register non-leaf zone - ret:%d\n",
+ ret);
+ scmi_powercap_unregister_all_zones(pr);
+ reg_zones = 0;
+ goto out;
+ }
+ /* Pick next zone to process */
+ if (sp)
+ spz = zones_stack[--sp];
+ else
+ spz = list_first_entry_or_null(&pr->scmi_zones,
+ struct scmi_powercap_zone,
+ node);
}
}
+out:
+ kfree(zones_stack);
+ dev_info(dev, "Registered %d SCMI Powercap domains !\n", reg_zones);
+
return ret;
}
@@ -424,6 +455,8 @@ static int scmi_powercap_probe(struct scmi_device *sdev)
if (!pr->registered_zones)
return -ENOMEM;
+ INIT_LIST_HEAD(&pr->scmi_zones);
+
for (i = 0, spz = pr->spzones; i < pr->num_zones; i++, spz++) {
/*
* Powercap domains are validate by the protocol layer, i.e.
@@ -438,6 +471,7 @@ static int scmi_powercap_probe(struct scmi_device *sdev)
INIT_LIST_HEAD(&spz->node);
INIT_LIST_HEAD(&pr->registered_zones[i]);
+ list_add_tail(&spz->node, &pr->scmi_zones);
/*
* Forcibly skip powercap domains using an abstract scale.
* Note that only leaves domains can be skipped, so this could
@@ -448,7 +482,7 @@ static int scmi_powercap_probe(struct scmi_device *sdev)
dev_warn(dev,
"Abstract power scale not supported. Skip %s.\n",
spz->info->name);
- spz->info = NULL;
+ spz->invalid = true;
continue;
}
}
@@ -457,21 +491,12 @@ static int scmi_powercap_probe(struct scmi_device *sdev)
* Scan array of retrieved SCMI powercap domains and register them
* recursively starting from the root domains.
*/
- for (i = 0, spz = pr->spzones; i < pr->num_zones; i++, spz++) {
- ret = scmi_powercap_register_zone(pr, spz);
- if (ret) {
- dev_err(dev,
- "Failed to register powercap zone %s - ret:%d\n",
- spz->info->name, ret);
- scmi_powercap_unregister_all_zones(pr);
- return ret;
- }
- }
+ ret = scmi_zones_register(dev, pr);
+ if (ret)
+ return ret;
dev_set_drvdata(dev, pr);
- dev_info(dev, "Registered %d SCMI Powercap domains !\n", pr->num_zones);
-
return ret;
}
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 8fac57b28f8a..5c2e6d5eea2a 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1250,7 +1250,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &rapl_defaults_core),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &rapl_defaults_core),
@@ -1485,7 +1485,7 @@ static int rapl_detect_domains(struct rapl_package *rp)
}
pr_debug("found %d domains on %s\n", rp->nr_domains, rp->name);
- rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
+ rp->domains = kcalloc(rp->nr_domains, sizeof(struct rapl_domain),
GFP_KERNEL);
if (!rp->domains)
return -ENOMEM;
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index dd471021f237..250bd41a588c 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -142,7 +142,7 @@ static const struct x86_cpu_id pl4_support_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, NULL),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 823f8e6e4801..965d4f0c18a6 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -178,6 +178,14 @@ config REGULATOR_ATC260X
ATC260x PMICs. This will enable support for all the software
controllable DCDC/LDO regulators.
+config REGULATOR_AW37503
+ tristate "Awinic AW37503 Dual Output Power regulators"
+ depends on I2C && GPIOLIB
+ select REGMAP_I2C
+ help
+ This driver supports AW37503 single inductor - dual output
+ power supply specifically designed for display panels.
+
config REGULATOR_AXP20X
tristate "X-POWERS AXP20X PMIC Regulators"
depends on MFD_AXP20X
@@ -546,11 +554,11 @@ config REGULATOR_MAX1586
regulator via I2C bus. The provided regulator is suitable
for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
-config REGULATOR_MAX597X
- tristate "Maxim 597x power switch and monitor"
+config REGULATOR_MAX5970
+ tristate "Maxim 5970/5978 power switch and monitor"
depends on I2C
depends on OF
- depends on MFD_MAX597X
+ depends on MFD_MAX5970
help
This driver controls a Maxim 5970/5978 switch via I2C bus.
The MAX5970/5978 is a smart switch with no output regulation, but
@@ -584,6 +592,16 @@ config REGULATOR_MAX77650
Semiconductor. This device has a SIMO with three independent
power rails and an LDO.
+config REGULATOR_MAX77857
+ tristate "ADI MAX77857/MAX77831 regulator support"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver controls a ADI MAX77857 and MAX77831 regulators.
+ via I2C bus. MAX77857 and MAX77831 are high efficiency buck-boost
+ converters with input voltage range (2.5V to 16V). Say Y here to
+ enable the regulator driver
+
config REGULATOR_MAX8649
tristate "Maxim 8649 voltage regulator"
depends on I2C
@@ -989,6 +1007,18 @@ config REGULATOR_PWM
This driver supports PWM controlled voltage regulators. PWM
duty cycle can increase or decrease the voltage.
+config REGULATOR_QCOM_REFGEN
+ tristate "Qualcomm REFGEN regulator driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on REGMAP
+ help
+ This driver supports the MMIO-mapped reference voltage regulator,
+ used internally by some PHYs on many Qualcomm SoCs.
+
+ Say M here if you want to include support for this regulator as
+ a module. The module will be named "qcom-refgen-regulator".
+
config REGULATOR_QCOM_RPM
tristate "Qualcomm RPM regulator driver"
depends on MFD_QCOM_RPM
@@ -1050,7 +1080,11 @@ config REGULATOR_RAA215300
depends on COMMON_CLK
depends on I2C
help
- Support for the Renesas RAA215300 PMIC.
+ If you say yes to this option, support will be included for the
+ Renesas RAA215300 PMIC.
+
+ Say M here if you want to include support for Renesas RAA215300 PMIC
+ as a module. The module will be named "raa215300".
config REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY
tristate "Raspberry Pi 7-inch touchscreen panel ATTINY regulator"
@@ -1232,6 +1266,17 @@ config REGULATOR_RTQ6752
synchronous boost converters for PAVDD, and one synchronous NAVDD
buck-boost. This device is suitable for automotive TFT-LCD panel.
+config REGULATOR_RTQ2208
+ tristate "Richtek RTQ2208 SubPMIC Regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver adds support for RTQ2208 SubPMIC regulators.
+ The RTQ2208 is a multi-phase, programmable power management IC that
+ integrate with dual multi-configurable, synchronous buck converters
+ and two ldos. It features wide output voltage range from 0.4V to 2.05V
+ and the capability to configure the corresponding power stages.
+
config REGULATOR_S2MPA01
tristate "Samsung S2MPA01 voltage regulator"
depends on MFD_SEC_CORE || COMPILE_TEST
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 15e0d614ff66..23074714a81a 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_REGULATOR_ARM_SCMI) += scmi-regulator.o
obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
obj-$(CONFIG_REGULATOR_ATC260X) += atc260x-regulator.o
+obj-$(CONFIG_REGULATOR_AW37503) += aw37503-regulator.o
obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o
obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
obj-$(CONFIG_REGULATOR_BD71815) += bd71815-regulator.o
@@ -67,7 +68,7 @@ obj-$(CONFIG_REGULATOR_LTC3589) += ltc3589.o
obj-$(CONFIG_REGULATOR_LTC3676) += ltc3676.o
obj-$(CONFIG_REGULATOR_MAX14577) += max14577-regulator.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
-obj-$(CONFIG_REGULATOR_MAX597X) += max597x-regulator.o
+obj-$(CONFIG_REGULATOR_MAX5970) += max5970-regulator.o
obj-$(CONFIG_REGULATOR_MAX77541) += max77541-regulator.o
obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
obj-$(CONFIG_REGULATOR_MAX77650) += max77650-regulator.o
@@ -86,6 +87,7 @@ obj-$(CONFIG_REGULATOR_MAX77686) += max77686-regulator.o
obj-$(CONFIG_REGULATOR_MAX77693) += max77693-regulator.o
obj-$(CONFIG_REGULATOR_MAX77802) += max77802-regulator.o
obj-$(CONFIG_REGULATOR_MAX77826) += max77826-regulator.o
+obj-$(CONFIG_REGULATOR_MAX77857) += max77857-regulator.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
@@ -108,6 +110,7 @@ obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_MTK_DVFSRC) += mtk-dvfsrc-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_LABIBB) += qcom-labibb-regulator.o
+obj-$(CONFIG_REGULATOR_QCOM_REFGEN) += qcom-refgen-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPMH) += qcom-rpmh-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
@@ -145,6 +148,7 @@ obj-$(CONFIG_REGULATOR_RT6245) += rt6245-regulator.o
obj-$(CONFIG_REGULATOR_RTMV20) += rtmv20-regulator.o
obj-$(CONFIG_REGULATOR_RTQ2134) += rtq2134-regulator.o
obj-$(CONFIG_REGULATOR_RTQ6752) += rtq6752-regulator.o
+obj-$(CONFIG_REGULATOR_RTQ2208) += rtq2208-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c
index e26264529b74..24cbdd833863 100644
--- a/drivers/regulator/act8945a-regulator.c
+++ b/drivers/regulator/act8945a-regulator.c
@@ -8,7 +8,7 @@
*/
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
diff --git a/drivers/regulator/atc260x-regulator.c b/drivers/regulator/atc260x-regulator.c
index 87e237d740bc..3e9f8fd54fca 100644
--- a/drivers/regulator/atc260x-regulator.c
+++ b/drivers/regulator/atc260x-regulator.c
@@ -7,7 +7,8 @@
#include <linux/mfd/atc260x/core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
@@ -37,7 +38,7 @@ static const struct linear_range atc2609a_ldo_voltage_ranges1[] = {
};
static const unsigned int atc260x_ldo_voltage_range_sel[] = {
- 0x0, 0x20,
+ 0x0, 0x1,
};
static int atc260x_dcdc_set_voltage_time_sel(struct regulator_dev *rdev,
@@ -427,7 +428,7 @@ enum atc2609a_reg_ids {
.vsel_mask = GENMASK(4, 1), \
.vsel_range_reg = ATC2609A_PMU_LDO##num##_CTL0, \
.vsel_range_mask = BIT(5), \
- .linear_range_selectors = atc260x_ldo_voltage_range_sel, \
+ .linear_range_selectors_bitfield = atc260x_ldo_voltage_range_sel, \
.enable_reg = ATC2609A_PMU_LDO##num##_CTL0, \
.enable_mask = BIT(0), \
.enable_time = 2000, \
diff --git a/drivers/regulator/aw37503-regulator.c b/drivers/regulator/aw37503-regulator.c
new file mode 100644
index 000000000000..a5ff6dfd29b5
--- /dev/null
+++ b/drivers/regulator/aw37503-regulator.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// AWINIC AW37503 Regulator Driver
+//
+// Copyright (C) 2023 awinic. All Rights Reserved
+//
+// Author: <like@awinic.com>
+
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#define AW37503_REG_VPOS 0x00
+#define AW37503_REG_VNEG 0x01
+#define AW37503_REG_APPS 0x03
+#define AW37503_REG_CONTROL 0x04
+#define AW37503_REG_WPRTEN 0x21
+
+#define AW37503_VOUT_MASK 0x1F
+#define AW37503_VOUT_N_VOLTAGE 0x15
+#define AW37503_VOUT_VMIN 4000000
+#define AW37503_VOUT_VMAX 6000000
+#define AW37503_VOUT_STEP 100000
+
+#define AW37503_REG_APPS_DIS_VPOS BIT(1)
+#define AW37503_REG_APPS_DIS_VNEG BIT(0)
+
+#define AW37503_REGULATOR_ID_VPOS 0
+#define AW37503_REGULATOR_ID_VNEG 1
+#define AW37503_MAX_REGULATORS 2
+
+struct aw37503_reg_pdata {
+ struct gpio_desc *en_gpiod;
+ int ena_gpio_state;
+};
+
+struct aw37503_regulator {
+ struct device *dev;
+ struct aw37503_reg_pdata reg_pdata[AW37503_MAX_REGULATORS];
+};
+
+static int aw37503_regulator_enable(struct regulator_dev *rdev)
+{
+ struct aw37503_regulator *chip = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ struct aw37503_reg_pdata *rpdata = &chip->reg_pdata[id];
+ int ret;
+
+ if (!IS_ERR(rpdata->en_gpiod)) {
+ gpiod_set_value_cansleep(rpdata->en_gpiod, 1);
+ rpdata->ena_gpio_state = 1;
+ }
+
+ /* Hardware automatically enable discharge bit in enable */
+ if (rdev->constraints->active_discharge ==
+ REGULATOR_ACTIVE_DISCHARGE_DISABLE) {
+ ret = regulator_set_active_discharge_regmap(rdev, false);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to disable active discharge: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int aw37503_regulator_disable(struct regulator_dev *rdev)
+{
+ struct aw37503_regulator *chip = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ struct aw37503_reg_pdata *rpdata = &chip->reg_pdata[id];
+
+ if (!IS_ERR(rpdata->en_gpiod)) {
+ gpiod_set_value_cansleep(rpdata->en_gpiod, 0);
+ rpdata->ena_gpio_state = 0;
+ }
+
+ return 0;
+}
+
+static int aw37503_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct aw37503_regulator *chip = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ struct aw37503_reg_pdata *rpdata = &chip->reg_pdata[id];
+
+ if (!IS_ERR(rpdata->en_gpiod))
+ return rpdata->ena_gpio_state;
+
+ return 1;
+}
+
+static const struct regulator_ops aw37503_regulator_ops = {
+ .enable = aw37503_regulator_enable,
+ .disable = aw37503_regulator_disable,
+ .is_enabled = aw37503_regulator_is_enabled,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+};
+
+static int aw37503_of_parse_cb(struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regulator_config *config)
+{
+ struct aw37503_regulator *chip = config->driver_data;
+ struct aw37503_reg_pdata *rpdata = &chip->reg_pdata[desc->id];
+ int ret;
+
+ rpdata->en_gpiod = devm_fwnode_gpiod_get(chip->dev, of_fwnode_handle(np),
+ "enable", GPIOD_OUT_LOW,
+ "enable");
+
+ if (IS_ERR(rpdata->en_gpiod)) {
+ ret = PTR_ERR(rpdata->en_gpiod);
+
+ /* Ignore the error other than probe defer */
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ return 0;
+ }
+
+ return 0;
+}
+
+#define AW37503_REGULATOR_DESC(_id, _name) \
+ [AW37503_REGULATOR_ID_##_id] = { \
+ .name = "aw37503-"#_name, \
+ .supply_name = "vin", \
+ .id = AW37503_REGULATOR_ID_##_id, \
+ .of_match = of_match_ptr(#_name), \
+ .of_parse_cb = aw37503_of_parse_cb, \
+ .ops = &aw37503_regulator_ops, \
+ .n_voltages = AW37503_VOUT_N_VOLTAGE, \
+ .min_uV = AW37503_VOUT_VMIN, \
+ .uV_step = AW37503_VOUT_STEP, \
+ .enable_time = 500, \
+ .vsel_mask = AW37503_VOUT_MASK, \
+ .vsel_reg = AW37503_REG_##_id, \
+ .active_discharge_off = 0, \
+ .active_discharge_on = AW37503_REG_APPS_DIS_##_id, \
+ .active_discharge_mask = AW37503_REG_APPS_DIS_##_id, \
+ .active_discharge_reg = AW37503_REG_APPS, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }
+
+static const struct regulator_desc aw_regs_desc[AW37503_MAX_REGULATORS] = {
+ AW37503_REGULATOR_DESC(VPOS, outp),
+ AW37503_REGULATOR_DESC(VNEG, outn),
+};
+
+static const struct regmap_range aw37503_no_reg_ranges[] = {
+ regmap_reg_range(AW37503_REG_CONTROL + 1,
+ AW37503_REG_WPRTEN - 1),
+};
+
+static const struct regmap_access_table aw37503_no_reg_table = {
+ .no_ranges = aw37503_no_reg_ranges,
+ .n_no_ranges = ARRAY_SIZE(aw37503_no_reg_ranges),
+};
+
+static const struct regmap_config aw37503_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AW37503_REG_WPRTEN,
+ .rd_table = &aw37503_no_reg_table,
+ .wr_table = &aw37503_no_reg_table,
+};
+
+static int aw37503_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct aw37503_regulator *chip;
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ struct regulator_config config = { };
+ int id;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ regmap = devm_regmap_init_i2c(client, &aw37503_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to init regmap\n");
+
+ i2c_set_clientdata(client, chip);
+ chip->dev = dev;
+
+ config.regmap = regmap;
+ config.dev = dev;
+ config.driver_data = chip;
+
+ for (id = 0; id < AW37503_MAX_REGULATORS; ++id) {
+ rdev = devm_regulator_register(dev, &aw_regs_desc[id],
+ &config);
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "Failed to register regulator %s\n",
+ aw_regs_desc[id].name);
+ }
+ return 0;
+}
+
+static const struct i2c_device_id aw37503_id[] = {
+ {.name = "aw37503",},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, aw37503_id);
+
+static const struct of_device_id aw37503_of_match[] = {
+ {.compatible = "awinic,aw37503",},
+ { /* Sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, aw37503_of_match);
+
+static struct i2c_driver aw37503_i2c_driver = {
+ .driver = {
+ .name = "aw37503",
+ .of_match_table = aw37503_of_match,
+ },
+ .probe = aw37503_probe,
+ .id_table = aw37503_id,
+};
+
+module_i2c_driver(aw37503_i2c_driver);
+
+MODULE_DESCRIPTION("aw37503 regulator driver");
+MODULE_AUTHOR("Alec Li <like@awinic.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 810f90f3e2a1..c657820b0bbb 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -20,7 +20,6 @@
#include <linux/mfd/axp20x.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
index 475b1e0110e7..26192d55a685 100644
--- a/drivers/regulator/bd71815-regulator.c
+++ b/drivers/regulator/bd71815-regulator.c
@@ -18,7 +18,6 @@
#include <linux/regulator/driver.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/mfd/rohm-generic.h>
#include <linux/mfd/rohm-bd71815.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c
index f3205dc9d4fc..08d4ee369287 100644
--- a/drivers/regulator/bd71828-regulator.c
+++ b/drivers/regulator/bd71828-regulator.c
@@ -5,7 +5,6 @@
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/rohm-bd71828.h>
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index b0b9938c20a1..c3fb05dce40c 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -289,7 +289,7 @@ static const struct linear_range bd71837_buck5_volts[] = {
* and 0x1 for last 3 ranges.
*/
static const unsigned int bd71837_buck5_volt_range_sel[] = {
- 0x0, 0x0, 0x0, 0x80, 0x80, 0x80
+ 0x0, 0x0, 0x0, 0x1, 0x1, 0x1
};
/*
@@ -309,7 +309,7 @@ static const struct linear_range bd71847_buck3_volts[] = {
};
static const unsigned int bd71847_buck3_volt_range_sel[] = {
- 0x0, 0x0, 0x0, 0x40, 0x80, 0x80, 0x80
+ 0x0, 0x0, 0x0, 0x1, 0x2, 0x2, 0x2
};
static const struct linear_range bd71847_buck4_volts[] = {
@@ -317,7 +317,7 @@ static const struct linear_range bd71847_buck4_volts[] = {
REGULATOR_LINEAR_RANGE(2600000, 0x00, 0x03, 100000),
};
-static const unsigned int bd71847_buck4_volt_range_sel[] = { 0x0, 0x40 };
+static const unsigned int bd71847_buck4_volt_range_sel[] = { 0x0, 0x1 };
/*
* BUCK6
@@ -360,7 +360,7 @@ static const struct linear_range bd718xx_ldo1_volts[] = {
REGULATOR_LINEAR_RANGE(1600000, 0x00, 0x03, 100000),
};
-static const unsigned int bd718xx_ldo1_volt_range_sel[] = { 0x0, 0x20 };
+static const unsigned int bd718xx_ldo1_volt_range_sel[] = { 0x0, 0x1 };
/*
* LDO2
@@ -403,7 +403,7 @@ static const struct linear_range bd71847_ldo5_volts[] = {
REGULATOR_LINEAR_RANGE(800000, 0x00, 0x0F, 100000),
};
-static const unsigned int bd71847_ldo5_volt_range_sel[] = { 0x0, 0x20 };
+static const unsigned int bd71847_ldo5_volt_range_sel[] = { 0x0, 0x1 };
/*
* LDO6
@@ -817,7 +817,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_1ST_NODVS_BUCK_MASK,
.vsel_range_reg = BD718XX_REG_1ST_NODVS_BUCK_VOLT,
.vsel_range_mask = BD71847_BUCK3_RANGE_MASK,
- .linear_range_selectors = bd71847_buck3_volt_range_sel,
+ .linear_range_selectors_bitfield = bd71847_buck3_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK3_STARTUP_TIME,
@@ -845,7 +845,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD71847_BUCK4_MASK,
.vsel_range_reg = BD718XX_REG_2ND_NODVS_BUCK_VOLT,
.vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
- .linear_range_selectors = bd71847_buck4_volt_range_sel,
+ .linear_range_selectors_bitfield = bd71847_buck4_volt_range_sel,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71847_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
@@ -916,7 +916,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO1_MASK,
.vsel_range_reg = BD718XX_REG_LDO1_VOLT,
.vsel_range_mask = BD718XX_LDO1_RANGE_MASK,
- .linear_range_selectors = bd718xx_ldo1_volt_range_sel,
+ .linear_range_selectors_bitfield = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO1_STARTUP_TIME,
@@ -1010,7 +1010,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD71847_LDO5_MASK,
.vsel_range_reg = BD718XX_REG_LDO5_VOLT,
.vsel_range_mask = BD71847_LDO5_RANGE_MASK,
- .linear_range_selectors = bd71847_ldo5_volt_range_sel,
+ .linear_range_selectors_bitfield = bd71847_ldo5_volt_range_sel,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71847_LDO5_STARTUP_TIME,
@@ -1232,7 +1232,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_BUCK5_MASK,
.vsel_range_reg = BD718XX_REG_1ST_NODVS_BUCK_VOLT,
.vsel_range_mask = BD71837_BUCK5_RANGE_MASK,
- .linear_range_selectors = bd71837_buck5_volt_range_sel,
+ .linear_range_selectors_bitfield = bd71837_buck5_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
.enable_time = BD71837_BUCK5_STARTUP_TIME,
@@ -1328,7 +1328,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO1_MASK,
.vsel_range_reg = BD718XX_REG_LDO1_VOLT,
.vsel_range_mask = BD718XX_LDO1_RANGE_MASK,
- .linear_range_selectors = bd718xx_ldo1_volt_range_sel,
+ .linear_range_selectors_bitfield = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
.enable_time = BD71837_LDO1_STARTUP_TIME,
diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c
index 1fd79fb17303..6958d154442b 100644
--- a/drivers/regulator/cpcap-regulator.c
+++ b/drivers/regulator/cpcap-regulator.c
@@ -12,7 +12,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index c28b061eef02..1d354db0c1bd 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -924,7 +924,7 @@ static int da9062_regulator_probe(struct platform_device *pdev)
struct da9062_regulator *regl;
struct regulator_config config = { };
const struct da9062_regulator_info *rinfo;
- int irq, n, ret;
+ int n, ret;
int max_regulators;
switch (chip->chip_type) {
@@ -1012,12 +1012,11 @@ static int da9062_regulator_probe(struct platform_device *pdev)
}
/* LDOs overcurrent event support */
- irq = platform_get_irq_byname(pdev, "LDO_LIM");
- if (irq < 0)
- return irq;
- regulators->irq_ldo_lim = irq;
+ regulators->irq_ldo_lim = platform_get_irq_byname_optional(pdev, "LDO_LIM");
+ if (regulators->irq_ldo_lim < 0)
+ return 0;
- ret = devm_request_threaded_irq(&pdev->dev, irq,
+ ret = devm_request_threaded_irq(&pdev->dev, regulators->irq_ldo_lim,
NULL, da9062_ldo_lim_event,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"LDO_LIM", regulators);
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 122124944749..80098035bb13 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -440,7 +440,7 @@ static const struct regulator_desc da9121_reg = {
.of_match = "buck1",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
- .regulators_node = of_match_ptr("regulators"),
+ .regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
@@ -465,7 +465,7 @@ static const struct regulator_desc da9220_reg[2] = {
.of_match = "buck1",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
- .regulators_node = of_match_ptr("regulators"),
+ .regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
@@ -484,7 +484,7 @@ static const struct regulator_desc da9220_reg[2] = {
.of_match = "buck2",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
- .regulators_node = of_match_ptr("regulators"),
+ .regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
@@ -506,7 +506,7 @@ static const struct regulator_desc da9122_reg[2] = {
.of_match = "buck1",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
- .regulators_node = of_match_ptr("regulators"),
+ .regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
@@ -525,7 +525,7 @@ static const struct regulator_desc da9122_reg[2] = {
.of_match = "buck2",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
- .regulators_node = of_match_ptr("regulators"),
+ .regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
@@ -546,7 +546,7 @@ static const struct regulator_desc da9217_reg = {
.of_match = "buck1",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
- .regulators_node = of_match_ptr("regulators"),
+ .regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
@@ -573,7 +573,7 @@ static const struct regulator_desc da9141_reg = {
.of_match = "buck1",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
- .regulators_node = of_match_ptr("regulators"),
+ .regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
@@ -593,7 +593,7 @@ static const struct regulator_desc da9142_reg = {
.of_match = "buck1",
.of_parse_cb = da9121_of_parse_cb,
.owner = THIS_MODULE,
- .regulators_node = of_match_ptr("regulators"),
+ .regulators_node = "regulators",
.of_map_mode = da9121_map_mode,
.ops = &da9121_buck_ops,
.type = REGULATOR_VOLTAGE,
@@ -1195,7 +1195,7 @@ static struct i2c_driver da9121_regulator_driver = {
.driver = {
.name = "da9121",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = of_match_ptr(da9121_dt_ids),
+ .of_match_table = da9121_dt_ids,
},
.probe = da9121_i2c_probe,
.remove = da9121_i2c_remove,
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 289c06e09f47..48f312167e53 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -12,7 +12,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/param.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 364d1a2683b7..55130efae9b8 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -25,7 +25,6 @@
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
#include <linux/clk.h>
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index e6c999ba3fa2..5ad5f3b3a6b5 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -5,13 +5,14 @@
// Copyright 2007, 2008 Wolfson Microelectronics PLC.
// Copyright 2008 SlimLogic Ltd.
-#include <linux/kernel.h>
-#include <linux/err.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/driver.h>
-#include <linux/module.h>
#include "internal.h"
@@ -104,13 +105,14 @@ static int regulator_range_selector_to_index(struct regulator_dev *rdev,
{
int i;
- if (!rdev->desc->linear_range_selectors)
+ if (!rdev->desc->linear_range_selectors_bitfield)
return -EINVAL;
rval &= rdev->desc->vsel_range_mask;
+ rval >>= ffs(rdev->desc->vsel_range_mask) - 1;
for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
- if (rdev->desc->linear_range_selectors[i] == rval)
+ if (rdev->desc->linear_range_selectors_bitfield[i] == rval)
return i;
}
return -EINVAL;
@@ -194,7 +196,8 @@ int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev,
sel <<= ffs(rdev->desc->vsel_mask) - 1;
sel += rdev->desc->linear_ranges[i].min_sel;
- range = rdev->desc->linear_range_selectors[i];
+ range = rdev->desc->linear_range_selectors_bitfield[i];
+ range <<= ffs(rdev->desc->vsel_mask) - 1;
if (rdev->desc->vsel_reg == rdev->desc->vsel_range_reg) {
ret = regmap_update_bits(rdev->regmap,
diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c
index 1b52423598d3..82e9e364d4d4 100644
--- a/drivers/regulator/hi6421-regulator.c
+++ b/drivers/regulator/hi6421-regulator.c
@@ -131,8 +131,8 @@ static const struct regulator_ops hi6421_buck345_ops;
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
- .of_match = of_match_ptr(#_match), \
- .regulators_node = of_match_ptr("regulators"), \
+ .of_match = #_match, \
+ .regulators_node = "regulators", \
.ops = &hi6421_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -170,8 +170,8 @@ static const struct regulator_ops hi6421_buck345_ops;
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
- .of_match = of_match_ptr(#_match), \
- .regulators_node = of_match_ptr("regulators"), \
+ .of_match = #_match, \
+ .regulators_node = "regulators", \
.ops = &hi6421_ldo_linear_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -210,8 +210,8 @@ static const struct regulator_ops hi6421_buck345_ops;
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
- .of_match = of_match_ptr(#_match), \
- .regulators_node = of_match_ptr("regulators"), \
+ .of_match = #_match, \
+ .regulators_node = "regulators", \
.ops = &hi6421_ldo_linear_range_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -247,8 +247,8 @@ static const struct regulator_ops hi6421_buck345_ops;
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
- .of_match = of_match_ptr(#_match), \
- .regulators_node = of_match_ptr("regulators"), \
+ .of_match = #_match, \
+ .regulators_node = "regulators", \
.ops = &hi6421_buck012_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
@@ -284,8 +284,8 @@ static const struct regulator_ops hi6421_buck345_ops;
[HI6421_##_id] = { \
.desc = { \
.name = #_id, \
- .of_match = of_match_ptr(#_match), \
- .regulators_node = of_match_ptr("regulators"), \
+ .of_match = #_match, \
+ .regulators_node = "regulators", \
.ops = &hi6421_buck345_ops, \
.type = REGULATOR_VOLTAGE, \
.id = HI6421_##_id, \
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 63aa227b1813..942f37082cb1 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -864,7 +864,7 @@ static struct lp872x_platform_data
for (i = 0; i < num_matches; i++) {
pdata->regulator_data[i].id =
- (enum lp872x_regulator_id)match[i].driver_data;
+ (uintptr_t)match[i].driver_data;
pdata->regulator_data[i].init_data = match[i].init_data;
}
out:
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index 4bc310f972ed..8d01e18046f3 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -13,7 +13,6 @@
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
#include <linux/regmap.h>
#include <linux/uaccess.h>
#include <linux/regulator/driver.h>
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index bdb60d8a7f3d..61ee5cf3f241 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -29,8 +29,8 @@ enum LP87565_regulator_id {
.name = _name, \
.supply_name = _of "-in", \
.id = _id, \
- .of_match = of_match_ptr(_of), \
- .regulators_node = of_match_ptr("regulators"),\
+ .of_match = _of, \
+ .regulators_node = "regulators", \
.ops = &_ops, \
.n_voltages = _n, \
.type = REGULATOR_VOLTAGE, \
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index e9751c206d95..d892c2a5df7b 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
@@ -392,8 +391,7 @@ static int ltc3589_probe(struct i2c_client *client)
i2c_set_clientdata(client, ltc3589);
if (client->dev.of_node)
- ltc3589->variant = (enum ltc3589_variant)
- of_device_get_match_data(&client->dev);
+ ltc3589->variant = (uintptr_t)of_device_get_match_data(&client->dev);
else
ltc3589->variant = id->driver_data;
ltc3589->dev = dev;
diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
index fad31f5f435e..32f47b896fd1 100644
--- a/drivers/regulator/max20086-regulator.c
+++ b/drivers/regulator/max20086-regulator.c
@@ -6,7 +6,6 @@
// Copyright (C) 2018 Avnet, Inc.
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
diff --git a/drivers/regulator/max597x-regulator.c b/drivers/regulator/max5970-regulator.c
index 7873a5267555..b56a174cde3d 100644
--- a/drivers/regulator/max597x-regulator.c
+++ b/drivers/regulator/max5970-regulator.c
@@ -20,9 +20,9 @@
#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
-#include <linux/mfd/max597x.h>
+#include <linux/mfd/max5970.h>
-struct max597x_regulator {
+struct max5970_regulator {
int num_switches, mon_rng, irng, shunt_micro_ohms, lim_uA;
struct regmap *regmap;
};
@@ -58,7 +58,7 @@ static int max597x_set_vp(struct regulator_dev *rdev, int lim_uV, int severity,
bool enable, bool overvoltage)
{
int off_h, off_l, reg, ret;
- struct max597x_regulator *data = rdev_get_drvdata(rdev);
+ struct max5970_regulator *data = rdev_get_drvdata(rdev);
int channel = rdev_get_id(rdev);
if (overvoltage) {
@@ -140,7 +140,7 @@ static int max597x_set_ocp(struct regulator_dev *rdev, int lim_uA,
int val, reg;
unsigned int vthst, vthfst;
- struct max597x_regulator *data = rdev_get_drvdata(rdev);
+ struct max5970_regulator *data = rdev_get_drvdata(rdev);
int rdev_id = rdev_get_id(rdev);
/*
* MAX5970 doesn't has enable control for ocp.
@@ -222,7 +222,7 @@ static int max597x_dt_parse(struct device_node *np,
const struct regulator_desc *desc,
struct regulator_config *cfg)
{
- struct max597x_regulator *data = cfg->driver_data;
+ struct max5970_regulator *data = cfg->driver_data;
int ret = 0;
ret =
@@ -274,7 +274,7 @@ static int max597x_irq_handler(int irq, struct regulator_irq_data *rid,
unsigned long *dev_mask)
{
struct regulator_err_state *stat;
- struct max597x_regulator *d = (struct max597x_regulator *)rid->data;
+ struct max5970_regulator *d = (struct max5970_regulator *)rid->data;
int val, ret, i;
ret = max597x_regmap_read_clear(d->regmap, MAX5970_REG_FAULT0, &val);
@@ -394,7 +394,7 @@ static int max597x_adc_range(struct regmap *regmap, const int ch,
static int max597x_setup_irq(struct device *dev,
int irq,
struct regulator_dev *rdevs[MAX5970_NUM_SWITCHES],
- int num_switches, struct max597x_regulator *data)
+ int num_switches, struct max5970_regulator *data)
{
struct regulator_irq_desc max597x_notif = {
.name = "max597x-irq",
@@ -425,9 +425,9 @@ static int max597x_setup_irq(struct device *dev,
static int max597x_regulator_probe(struct platform_device *pdev)
{
- struct max597x_data *max597x;
+ struct max5970_data *max597x;
struct regmap *regmap = dev_get_regmap(pdev->dev.parent, NULL);
- struct max597x_regulator *data;
+ struct max5970_regulator *data;
struct i2c_client *i2c = to_i2c_client(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_dev *rdev;
@@ -438,16 +438,16 @@ static int max597x_regulator_probe(struct platform_device *pdev)
if (!regmap)
return -EPROBE_DEFER;
- max597x = devm_kzalloc(&i2c->dev, sizeof(struct max597x_data), GFP_KERNEL);
+ max597x = devm_kzalloc(&i2c->dev, sizeof(struct max5970_data), GFP_KERNEL);
if (!max597x)
return -ENOMEM;
i2c_set_clientdata(i2c, max597x);
if (of_device_is_compatible(i2c->dev.of_node, "maxim,max5978"))
- max597x->num_switches = MAX597x_TYPE_MAX5978;
+ max597x->num_switches = MAX5978_NUM_SWITCHES;
else if (of_device_is_compatible(i2c->dev.of_node, "maxim,max5970"))
- max597x->num_switches = MAX597x_TYPE_MAX5970;
+ max597x->num_switches = MAX5970_NUM_SWITCHES;
else
return -ENODEV;
@@ -456,7 +456,7 @@ static int max597x_regulator_probe(struct platform_device *pdev)
for (i = 0; i < num_switches; i++) {
data =
- devm_kzalloc(&i2c->dev, sizeof(struct max597x_regulator),
+ devm_kzalloc(&i2c->dev, sizeof(struct max5970_regulator),
GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -500,7 +500,7 @@ static int max597x_regulator_probe(struct platform_device *pdev)
static struct platform_driver max597x_regulator_driver = {
.driver = {
- .name = "max597x-regulator",
+ .name = "max5970-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = max597x_regulator_probe,
diff --git a/drivers/regulator/max77541-regulator.c b/drivers/regulator/max77541-regulator.c
index 2976f9cb3e26..e6b3d9147c37 100644
--- a/drivers/regulator/max77541-regulator.c
+++ b/drivers/regulator/max77541-regulator.c
@@ -44,7 +44,7 @@ static const struct linear_range max77541_buck_ranges[] = {
};
static const unsigned int max77541_buck_volt_range_sel[] = {
- 0x00, 0x00, 0x40, 0x40, 0x80, 0x80,
+ 0x0, 0x0, 0x1, 0x1, 0x2, 0x2,
};
enum max77541_regulators {
@@ -67,7 +67,7 @@ enum max77541_regulators {
.vsel_mask = MAX77541_BITS_MX_VOUT, \
.vsel_range_reg = MAX77541_REG_M ## _id ## _CFG1, \
.vsel_range_mask = MAX77541_BITS_MX_CFG1_RNG, \
- .linear_range_selectors = max77541_buck_volt_range_sel, \
+ .linear_range_selectors_bitfield = max77541_buck_volt_range_sel, \
.owner = THIS_MODULE, \
}
@@ -86,7 +86,7 @@ enum max77541_regulators {
.vsel_mask = MAX77541_BITS_MX_VOUT, \
.vsel_range_reg = MAX77541_REG_M ## _id ## _CFG1, \
.vsel_range_mask = MAX77541_BITS_MX_CFG1_RNG, \
- .linear_range_selectors = max77541_buck_volt_range_sel, \
+ .linear_range_selectors_bitfield = max77541_buck_volt_range_sel, \
.owner = THIS_MODULE, \
}
diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c
index f6539b945037..94abfbb2bc1e 100644
--- a/drivers/regulator/max77650-regulator.c
+++ b/drivers/regulator/max77650-regulator.c
@@ -239,7 +239,7 @@ static struct max77650_regulator_desc max77651_SBB1_desc = {
.supply_name = "in-sbb1",
.id = MAX77650_REGULATOR_ID_SBB1,
.ops = &max77651_SBB1_regulator_ops,
- .linear_range_selectors = max77651_sbb1_volt_range_sel,
+ .linear_range_selectors_bitfield = max77651_sbb1_volt_range_sel,
.linear_ranges = max77651_sbb1_volt_ranges,
.n_linear_ranges = ARRAY_SIZE(max77651_sbb1_volt_ranges),
.n_voltages = 58,
diff --git a/drivers/regulator/max77826-regulator.c b/drivers/regulator/max77826-regulator.c
index 3855f5e686d8..5590cdf615b7 100644
--- a/drivers/regulator/max77826-regulator.c
+++ b/drivers/regulator/max77826-regulator.c
@@ -9,7 +9,6 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/max77857-regulator.c b/drivers/regulator/max77857-regulator.c
new file mode 100644
index 000000000000..145ad0281857
--- /dev/null
+++ b/drivers/regulator/max77857-regulator.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Analog Devices, Inc.
+ * ADI Regulator driver for the MAX77857
+ * MAX77859 and MAX77831.
+ */
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/util_macros.h>
+
+#define MAX77857_REG_INT_SRC 0x10
+#define MAX77857_REG_INT_MASK 0x11
+#define MAX77857_REG_CONT1 0x12
+#define MAX77857_REG_CONT2 0x13
+#define MAX77857_REG_CONT3 0x14
+
+#define MAX77857_INT_SRC_OCP BIT(0)
+#define MAX77857_INT_SRC_THS BIT(1)
+#define MAX77857_INT_SRC_HARDSHORT BIT(2)
+#define MAX77857_INT_SRC_OVP BIT(3)
+#define MAX77857_INT_SRC_POK BIT(4)
+
+#define MAX77857_ILIM_MASK GENMASK(2, 0)
+#define MAX77857_CONT1_FREQ GENMASK(4, 3)
+#define MAX77857_CONT3_FPWM BIT(5)
+
+#define MAX77859_REG_INT_SRC 0x11
+#define MAX77859_REG_CONT1 0x13
+#define MAX77859_REG_CONT2 0x14
+#define MAX77859_REG_CONT3 0x15
+#define MAX77859_REG_CONT5 0x17
+#define MAX77859_CONT2_FPWM BIT(2)
+#define MAX77859_CONT2_INTB BIT(3)
+#define MAX77859_CONT3_DVS_START BIT(2)
+#define MAX77859_VOLTAGE_SEL_MASK GENMASK(9, 0)
+
+#define MAX77859_CURRENT_MIN 1000000
+#define MAX77859_CURRENT_MAX 5000000
+#define MAX77859_CURRENT_STEP 50000
+
+enum max77857_id {
+ ID_MAX77831 = 1,
+ ID_MAX77857,
+ ID_MAX77859,
+ ID_MAX77859A,
+};
+
+static bool max77857_volatile_reg(struct device *dev, unsigned int reg)
+{
+ enum max77857_id id = (uintptr_t)dev_get_drvdata(dev);
+
+ switch (id) {
+ case ID_MAX77831:
+ case ID_MAX77857:
+ return reg == MAX77857_REG_INT_SRC;
+ case ID_MAX77859:
+ case ID_MAX77859A:
+ return reg == MAX77859_REG_INT_SRC;
+ default:
+ return true;
+ }
+}
+
+static struct regmap_config max77857_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = max77857_volatile_reg,
+};
+
+static int max77857_get_status(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, MAX77857_REG_INT_SRC, &val);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(MAX77857_INT_SRC_POK, val))
+ return REGULATOR_STATUS_ON;
+
+ return REGULATOR_STATUS_ERROR;
+}
+
+static unsigned int max77857_get_mode(struct regulator_dev *rdev)
+{
+ enum max77857_id id = (uintptr_t)rdev_get_drvdata(rdev);
+ unsigned int regval;
+ int ret;
+
+ switch (id) {
+ case ID_MAX77831:
+ case ID_MAX77857:
+ ret = regmap_read(rdev->regmap, MAX77857_REG_CONT3, &regval);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(MAX77857_CONT3_FPWM, regval))
+ return REGULATOR_MODE_FAST;
+
+ break;
+ case ID_MAX77859:
+ case ID_MAX77859A:
+ ret = regmap_read(rdev->regmap, MAX77859_REG_CONT2, &regval);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(MAX77859_CONT2_FPWM, regval))
+ return REGULATOR_MODE_FAST;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int max77857_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ enum max77857_id id = (uintptr_t)rdev_get_drvdata(rdev);
+ unsigned int reg, val;
+
+ switch (id) {
+ case ID_MAX77831:
+ case ID_MAX77857:
+ reg = MAX77857_REG_CONT3;
+ val = MAX77857_CONT3_FPWM;
+ break;
+ case ID_MAX77859:
+ case ID_MAX77859A:
+ reg = MAX77859_REG_CONT2;
+ val = MAX77859_CONT2_FPWM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ return regmap_set_bits(rdev->regmap, reg, val);
+ case REGULATOR_MODE_NORMAL:
+ return regmap_clear_bits(rdev->regmap, reg, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int max77857_get_error_flags(struct regulator_dev *rdev,
+ unsigned int *flags)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, MAX77857_REG_INT_SRC, &val);
+ if (ret)
+ return ret;
+
+ *flags = 0;
+
+ if (FIELD_GET(MAX77857_INT_SRC_OVP, val))
+ *flags |= REGULATOR_ERROR_OVER_VOLTAGE_WARN;
+
+ if (FIELD_GET(MAX77857_INT_SRC_OCP, val) ||
+ FIELD_GET(MAX77857_INT_SRC_HARDSHORT, val))
+ *flags |= REGULATOR_ERROR_OVER_CURRENT;
+
+ if (FIELD_GET(MAX77857_INT_SRC_THS, val))
+ *flags |= REGULATOR_ERROR_OVER_TEMP;
+
+ if (!FIELD_GET(MAX77857_INT_SRC_POK, val))
+ *flags |= REGULATOR_ERROR_FAIL;
+
+ return 0;
+}
+
+static struct linear_range max77859_lin_ranges[] = {
+ REGULATOR_LINEAR_RANGE(3200000, 0x0A0, 0x320, 20000)
+};
+
+static const unsigned int max77859_ramp_table[4] = {
+ 1000, 500, 250, 125
+};
+
+static int max77859_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int sel)
+{
+ __be16 reg;
+ int ret;
+
+ reg = cpu_to_be16(sel);
+
+ ret = regmap_bulk_write(rdev->regmap, MAX77859_REG_CONT3, &reg, 2);
+ if (ret)
+ return ret;
+
+ /* actually apply new voltage */
+ return regmap_set_bits(rdev->regmap, MAX77859_REG_CONT3,
+ MAX77859_CONT3_DVS_START);
+}
+
+static int max77859_get_voltage_sel(struct regulator_dev *rdev)
+{
+ __be16 reg;
+ int ret;
+
+ ret = regmap_bulk_read(rdev->regmap, MAX77859_REG_CONT3, &reg, 2);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(MAX77859_VOLTAGE_SEL_MASK, __be16_to_cpu(reg));
+}
+
+static int max77859_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA)
+{
+ u32 selector;
+
+ if (max_uA < MAX77859_CURRENT_MIN)
+ return -EINVAL;
+
+ selector = 0x12 + (max_uA - MAX77859_CURRENT_MIN) / MAX77859_CURRENT_STEP;
+
+ selector = clamp_val(selector, 0x00, 0x7F);
+
+ return regmap_write(rdev->regmap, MAX77859_REG_CONT5, selector);
+}
+
+static int max77859_get_current_limit(struct regulator_dev *rdev)
+{
+ u32 selector;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, MAX77859_REG_CONT5, &selector);
+ if (ret)
+ return ret;
+
+ if (selector <= 0x12)
+ return MAX77859_CURRENT_MIN;
+
+ if (selector >= 0x64)
+ return MAX77859_CURRENT_MAX;
+
+ return MAX77859_CURRENT_MIN + (selector - 0x12) * MAX77859_CURRENT_STEP;
+}
+
+static const struct regulator_ops max77859_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = max77859_set_voltage_sel,
+ .get_voltage_sel = max77859_get_voltage_sel,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
+ .get_status = max77857_get_status,
+ .set_mode = max77857_set_mode,
+ .get_mode = max77857_get_mode,
+ .get_error_flags = max77857_get_error_flags,
+};
+
+static const struct regulator_ops max77859a_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = max77859_set_voltage_sel,
+ .get_voltage_sel = max77859_get_voltage_sel,
+ .set_current_limit = max77859_set_current_limit,
+ .get_current_limit = max77859_get_current_limit,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
+ .get_status = max77857_get_status,
+ .set_mode = max77857_set_mode,
+ .get_mode = max77857_get_mode,
+ .get_error_flags = max77857_get_error_flags,
+};
+
+static const struct regulator_ops max77857_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_ramp_delay = regulator_set_ramp_delay_regmap,
+ .get_status = max77857_get_status,
+ .set_mode = max77857_set_mode,
+ .get_mode = max77857_get_mode,
+ .get_error_flags = max77857_get_error_flags,
+};
+
+static struct linear_range max77857_lin_ranges[] = {
+ REGULATOR_LINEAR_RANGE(4485000, 0x3D, 0xCC, 73500)
+};
+
+static const unsigned int max77857_switch_freq[] = {
+ 1200000, 1500000, 1800000, 2100000
+};
+
+#define RAMAP_DELAY_INIT_VAL 1333
+
+static const unsigned int max77857_ramp_table[2][4] = {
+ { RAMAP_DELAY_INIT_VAL, 667, 333, 227 }, /* when switch freq is 1.8MHz or 2.1MHz */
+ { 1166, 667, 333, 167 }, /* when switch freq is 1.2MHz or 1.5MHz */
+};
+
+static struct regulator_desc max77857_regulator_desc = {
+ .ops = &max77857_regulator_ops,
+ .name = "max77857",
+ .linear_ranges = max77857_lin_ranges,
+ .n_linear_ranges = ARRAY_SIZE(max77857_lin_ranges),
+ .vsel_mask = 0xFF,
+ .vsel_reg = MAX77857_REG_CONT2,
+ .ramp_delay_table = max77857_ramp_table[0],
+ .n_ramp_values = ARRAY_SIZE(max77857_ramp_table[0]),
+ .ramp_reg = MAX77857_REG_CONT3,
+ .ramp_mask = GENMASK(1, 0),
+ .ramp_delay = RAMAP_DELAY_INIT_VAL,
+ .owner = THIS_MODULE,
+};
+
+static void max77857_calc_range(struct device *dev, enum max77857_id id)
+{
+ struct linear_range *range;
+ unsigned long vref_step;
+ u32 rtop = 0;
+ u32 rbot = 0;
+
+ device_property_read_u32(dev, "adi,rtop-ohms", &rtop);
+ device_property_read_u32(dev, "adi,rbot-ohms", &rbot);
+
+ if (!rbot || !rtop)
+ return;
+
+ switch (id) {
+ case ID_MAX77831:
+ case ID_MAX77857:
+ range = max77857_lin_ranges;
+ vref_step = 4900UL;
+ break;
+ case ID_MAX77859:
+ case ID_MAX77859A:
+ range = max77859_lin_ranges;
+ vref_step = 1250UL;
+ break;
+ }
+
+ range->step = DIV_ROUND_CLOSEST(vref_step * (rbot + rtop), rbot);
+ range->min = range->step * range->min_sel;
+}
+
+static int max77857_probe(struct i2c_client *client)
+{
+ const struct i2c_device_id *i2c_id;
+ struct device *dev = &client->dev;
+ struct regulator_config cfg = { };
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ enum max77857_id id;
+ u32 switch_freq = 0;
+ int ret;
+
+ i2c_id = i2c_client_get_device_id(client);
+ if (!i2c_id)
+ return -EINVAL;
+
+ id = i2c_id->driver_data;
+
+ dev_set_drvdata(dev, (void *)id);
+
+ if (id == ID_MAX77859 || id == ID_MAX77859A) {
+ max77857_regulator_desc.ops = &max77859_regulator_ops;
+ max77857_regulator_desc.linear_ranges = max77859_lin_ranges;
+ max77857_regulator_desc.ramp_delay_table = max77859_ramp_table;
+ max77857_regulator_desc.ramp_delay = max77859_ramp_table[0];
+ }
+
+ if (id == ID_MAX77859A)
+ max77857_regulator_desc.ops = &max77859a_regulator_ops;
+
+ max77857_calc_range(dev, id);
+
+ regmap = devm_regmap_init_i2c(client, &max77857_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "cannot initialize regmap\n");
+
+ device_property_read_u32(dev, "adi,switch-frequency-hz", &switch_freq);
+ if (switch_freq) {
+ switch_freq = find_closest(switch_freq, max77857_switch_freq,
+ ARRAY_SIZE(max77857_switch_freq));
+
+ if (id == ID_MAX77831 && switch_freq == 3)
+ switch_freq = 2;
+
+ switch (id) {
+ case ID_MAX77831:
+ case ID_MAX77857:
+ ret = regmap_update_bits(regmap, MAX77857_REG_CONT1,
+ MAX77857_CONT1_FREQ, switch_freq);
+
+ if (switch_freq >= 2)
+ break;
+
+ max77857_regulator_desc.ramp_delay_table = max77857_ramp_table[1];
+ max77857_regulator_desc.ramp_delay = max77857_ramp_table[1][0];
+ break;
+ case ID_MAX77859:
+ case ID_MAX77859A:
+ ret = regmap_update_bits(regmap, MAX77859_REG_CONT1,
+ MAX77857_CONT1_FREQ, switch_freq);
+ break;
+ }
+ if (ret)
+ return ret;
+ }
+
+ cfg.dev = dev;
+ cfg.driver_data = (void *)id;
+ cfg.regmap = regmap;
+ cfg.init_data = of_get_regulator_init_data(dev, dev->of_node,
+ &max77857_regulator_desc);
+ if (!cfg.init_data)
+ return -ENOMEM;
+
+ rdev = devm_regulator_register(dev, &max77857_regulator_desc, &cfg);
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "cannot register regulator\n");
+
+ return 0;
+}
+
+const struct i2c_device_id max77857_id[] = {
+ { "max77831", ID_MAX77831 },
+ { "max77857", ID_MAX77857 },
+ { "max77859", ID_MAX77859 },
+ { "max77859a", ID_MAX77859A },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max77857_id);
+
+static const struct of_device_id max77857_of_id[] = {
+ { .compatible = "adi,max77831", .data = (void *)ID_MAX77831 },
+ { .compatible = "adi,max77857", .data = (void *)ID_MAX77857 },
+ { .compatible = "adi,max77859", .data = (void *)ID_MAX77859 },
+ { .compatible = "adi,max77859a", .data = (void *)ID_MAX77859A },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max77857_of_id);
+
+static struct i2c_driver max77857_driver = {
+ .driver = {
+ .name = "max77857",
+ .of_match_table = max77857_of_id,
+ },
+ .id_table = max77857_id,
+ .probe = max77857_probe,
+};
+module_i2c_driver(max77857_driver);
+
+MODULE_DESCRIPTION("Analog Devices MAX77857 Buck-Boost Converter Driver");
+MODULE_AUTHOR("Ibrahim Tilki <Ibrahim.Tilki@analog.com>");
+MODULE_AUTHOR("Okan Sahin <Okan.Sahin@analog.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8893.c b/drivers/regulator/max8893.c
index cb0e72948dd4..30592425e193 100644
--- a/drivers/regulator/max8893.c
+++ b/drivers/regulator/max8893.c
@@ -125,7 +125,7 @@ static const struct regmap_config max8893_regmap = {
.val_bits = 8,
};
-static int max8893_probe_new(struct i2c_client *i2c)
+static int max8893_probe(struct i2c_client *i2c)
{
int id, ret;
struct regulator_config config = {.dev = &i2c->dev};
@@ -168,7 +168,7 @@ static const struct i2c_device_id max8893_ids[] = {
MODULE_DEVICE_TABLE(i2c, max8893_ids);
static struct i2c_driver max8893_driver = {
- .probe = max8893_probe_new,
+ .probe = max8893_probe,
.driver = {
.name = "max8893",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index 6c6f5a21362b..0c15a19fe83a 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -8,7 +8,6 @@
//
// Inspired from tps65086-regulator.c
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -111,7 +110,7 @@ static unsigned int mcp16502_of_map_mode(unsigned int mode)
#define MCP16502_REGULATOR(_name, _id, _ranges, _ops, _ramp_table) \
[_id] = { \
.name = _name, \
- .regulators_node = of_match_ptr("regulators"), \
+ .regulators_node = "regulators", \
.id = _id, \
.ops = &(_ops), \
.type = REGULATOR_VOLTAGE, \
@@ -120,7 +119,7 @@ static unsigned int mcp16502_of_map_mode(unsigned int mode)
.linear_ranges = _ranges, \
.linear_min_sel = VDD_LOW_SEL, \
.n_linear_ranges = ARRAY_SIZE(_ranges), \
- .of_match = of_match_ptr(_name), \
+ .of_match = _name, \
.of_map_mode = mcp16502_of_map_mode, \
.vsel_reg = (((_id) + 1) << 4), \
.vsel_mask = MCP16502_VSEL, \
@@ -588,7 +587,7 @@ static struct i2c_driver mcp16502_drv = {
.driver = {
.name = "mcp16502-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = of_match_ptr(mcp16502_ids),
+ .of_match_table = mcp16502_ids,
#ifdef CONFIG_PM
.pm = &mcp16502_pm_ops,
#endif
diff --git a/drivers/regulator/mp5416.c b/drivers/regulator/mp5416.c
index 3886b252fbe7..d068ac93d373 100644
--- a/drivers/regulator/mp5416.c
+++ b/drivers/regulator/mp5416.c
@@ -10,7 +10,7 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
diff --git a/drivers/regulator/mp886x.c b/drivers/regulator/mp886x.c
index ede1b1e58002..9911be2e6bac 100644
--- a/drivers/regulator/mp886x.c
+++ b/drivers/regulator/mp886x.c
@@ -9,7 +9,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/mpq7920.c b/drivers/regulator/mpq7920.c
index bf677c535edc..a670e09891e7 100644
--- a/drivers/regulator/mpq7920.c
+++ b/drivers/regulator/mpq7920.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
@@ -319,7 +318,7 @@ static struct i2c_driver mpq7920_regulator_driver = {
.driver = {
.name = "mpq7920",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = of_match_ptr(mpq7920_of_match),
+ .of_match_table = mpq7920_of_match,
},
.probe = mpq7920_i2c_probe,
.id_table = mpq7920_id,
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
index b0771770cc26..63a51485f2cc 100644
--- a/drivers/regulator/mt6311-regulator.c
+++ b/drivers/regulator/mt6311-regulator.c
@@ -4,7 +4,6 @@
// Author: Henry Chen <henryc.chen@mediatek.com>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
diff --git a/drivers/regulator/mt6315-regulator.c b/drivers/regulator/mt6315-regulator.c
index 8047081ea2f7..2608a6652d77 100644
--- a/drivers/regulator/mt6315-regulator.c
+++ b/drivers/regulator/mt6315-regulator.c
@@ -3,7 +3,7 @@
// Copyright (c) 2021 MediaTek Inc.
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
diff --git a/drivers/regulator/mt6359-regulator.c b/drivers/regulator/mt6359-regulator.c
index 3eb86ec21d08..c8a788858824 100644
--- a/drivers/regulator/mt6359-regulator.c
+++ b/drivers/regulator/mt6359-regulator.c
@@ -7,7 +7,7 @@
#include <linux/mfd/mt6359p/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
diff --git a/drivers/regulator/mtk-dvfsrc-regulator.c b/drivers/regulator/mtk-dvfsrc-regulator.c
index efca67207a5a..f1280d45265d 100644
--- a/drivers/regulator/mtk-dvfsrc-regulator.c
+++ b/drivers/regulator/mtk-dvfsrc-regulator.c
@@ -6,8 +6,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
+#include <linux/of_.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/soc/mediatek/mtk_dvfsrc.h>
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 0c9873e9abdc..cd5a0d7e4455 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -25,7 +25,6 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
struct pbias_reg_info {
u32 enable;
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index 91bfb7e026c9..2ab365d2749f 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 8d7e6c323324..46854602b3ea 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -699,8 +699,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client)
return -ENOMEM;
if (client->dev.of_node) {
- match = of_match_device(of_match_ptr(pfuze_dt_ids),
- &client->dev);
+ match = of_match_device(pfuze_dt_ids, &client->dev);
if (!match) {
dev_err(&client->dev, "Error: No device match found\n");
return -ENODEV;
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index b64d99695b84..2aff6db748e2 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -10,11 +10,11 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pwm.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/regulator/qcom-refgen-regulator.c b/drivers/regulator/qcom-refgen-regulator.c
new file mode 100644
index 000000000000..656fe330d38f
--- /dev/null
+++ b/drivers/regulator/qcom-refgen-regulator.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2017, 2019-2020, The Linux Foundation. All rights reserved.
+// Copyright (c) 2023, Linaro Limited
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#define REFGEN_REG_BIAS_EN 0x08
+#define REFGEN_BIAS_EN_MASK GENMASK(2, 0)
+ #define REFGEN_BIAS_EN_ENABLE 0x7
+ #define REFGEN_BIAS_EN_DISABLE 0x6
+
+#define REFGEN_REG_BG_CTRL 0x14
+#define REFGEN_BG_CTRL_MASK GENMASK(2, 1)
+ #define REFGEN_BG_CTRL_ENABLE 0x3
+ #define REFGEN_BG_CTRL_DISABLE 0x2
+
+#define REFGEN_REG_PWRDWN_CTRL5 0x80
+#define REFGEN_PWRDWN_CTRL5_MASK BIT(0)
+ #define REFGEN_PWRDWN_CTRL5_ENABLE 0x1
+
+static int qcom_sdm845_refgen_enable(struct regulator_dev *rdev)
+{
+ regmap_update_bits(rdev->regmap, REFGEN_REG_BG_CTRL, REFGEN_BG_CTRL_MASK,
+ FIELD_PREP(REFGEN_BG_CTRL_MASK, REFGEN_BG_CTRL_ENABLE));
+
+ regmap_write(rdev->regmap, REFGEN_REG_BIAS_EN,
+ FIELD_PREP(REFGEN_BIAS_EN_MASK, REFGEN_BIAS_EN_ENABLE));
+
+ return 0;
+}
+
+static int qcom_sdm845_refgen_disable(struct regulator_dev *rdev)
+{
+ regmap_write(rdev->regmap, REFGEN_REG_BIAS_EN,
+ FIELD_PREP(REFGEN_BIAS_EN_MASK, REFGEN_BIAS_EN_DISABLE));
+
+ regmap_update_bits(rdev->regmap, REFGEN_REG_BG_CTRL, REFGEN_BG_CTRL_MASK,
+ FIELD_PREP(REFGEN_BG_CTRL_MASK, REFGEN_BG_CTRL_DISABLE));
+
+ return 0;
+}
+
+static int qcom_sdm845_refgen_is_enabled(struct regulator_dev *rdev)
+{
+ u32 val;
+
+ regmap_read(rdev->regmap, REFGEN_REG_BG_CTRL, &val);
+ if (FIELD_GET(REFGEN_BG_CTRL_MASK, val) != REFGEN_BG_CTRL_ENABLE)
+ return 0;
+
+ regmap_read(rdev->regmap, REFGEN_REG_BIAS_EN, &val);
+ if (FIELD_GET(REFGEN_BIAS_EN_MASK, val) != REFGEN_BIAS_EN_ENABLE)
+ return 0;
+
+ return 1;
+}
+
+static struct regulator_desc sdm845_refgen_desc = {
+ .enable_time = 5,
+ .name = "refgen",
+ .owner = THIS_MODULE,
+ .type = REGULATOR_VOLTAGE,
+ .ops = &(const struct regulator_ops) {
+ .enable = qcom_sdm845_refgen_enable,
+ .disable = qcom_sdm845_refgen_disable,
+ .is_enabled = qcom_sdm845_refgen_is_enabled,
+ },
+};
+
+static struct regulator_desc sm8250_refgen_desc = {
+ .enable_reg = REFGEN_REG_PWRDWN_CTRL5,
+ .enable_mask = REFGEN_PWRDWN_CTRL5_MASK,
+ .enable_val = REFGEN_PWRDWN_CTRL5_ENABLE,
+ .disable_val = 0,
+ .enable_time = 5,
+ .name = "refgen",
+ .owner = THIS_MODULE,
+ .type = REGULATOR_VOLTAGE,
+ .ops = &(const struct regulator_ops) {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ },
+};
+
+static const struct regmap_config qcom_refgen_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static int qcom_refgen_probe(struct platform_device *pdev)
+{
+ struct regulator_init_data *init_data;
+ struct regulator_config config = {};
+ const struct regulator_desc *rdesc;
+ struct device *dev = &pdev->dev;
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+ void __iomem *base;
+
+ rdesc = of_device_get_match_data(dev);
+ if (!rdesc)
+ return -ENODATA;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &qcom_refgen_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ init_data = of_get_regulator_init_data(dev, dev->of_node, rdesc);
+ if (!init_data)
+ return -ENOMEM;
+
+ config.dev = dev;
+ config.init_data = init_data;
+ config.of_node = dev->of_node;
+ config.regmap = regmap;
+
+ rdev = devm_regulator_register(dev, rdesc, &config);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_refgen_match_table[] = {
+ { .compatible = "qcom,sdm845-refgen-regulator", .data = &sdm845_refgen_desc },
+ { .compatible = "qcom,sm8250-refgen-regulator", .data = &sm8250_refgen_desc },
+ { }
+};
+
+static struct platform_driver qcom_refgen_driver = {
+ .probe = qcom_refgen_probe,
+ .driver = {
+ .name = "qcom-refgen-regulator",
+ .of_match_table = qcom_refgen_match_table,
+ },
+};
+module_platform_driver(qcom_refgen_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Qualcomm REFGEN regulator driver");
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index cd077b7c4aff..d990ba19c50e 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -1273,6 +1272,40 @@ static const struct rpmh_vreg_init_data pmx65_vreg_data[] = {
{}
};
+static const struct rpmh_vreg_init_data pmx75_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps525_lv, "vdd-s1"),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps525_lv, "vdd-s2"),
+ RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps525_lv, "vdd-s3"),
+ RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps525_mv, "vdd-s4"),
+ RPMH_VREG("smps5", "smp%s5", &pmic5_ftsmps525_lv, "vdd-s5"),
+ RPMH_VREG("smps6", "smp%s6", &pmic5_ftsmps525_lv, "vdd-s6"),
+ RPMH_VREG("smps7", "smp%s7", &pmic5_ftsmps525_lv, "vdd-s7"),
+ RPMH_VREG("smps8", "smp%s8", &pmic5_ftsmps525_lv, "vdd-s8"),
+ RPMH_VREG("smps9", "smp%s9", &pmic5_ftsmps525_lv, "vdd-s9"),
+ RPMH_VREG("smps10", "smp%s10", &pmic5_ftsmps525_lv, "vdd-s10"),
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo515, "vdd-l1"),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_nldo515, "vdd-l2-18"),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_nldo515, "vdd-l3"),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo515, "vdd-l4-l16"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo_lv, "vdd-l5-l6"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo_lv, "vdd-l5-l6"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_nldo515, "vdd-l7"),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_nldo515, "vdd-l8-l9"),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_nldo515, "vdd-l8-l9"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, "vdd-l10"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, "vdd-l11-l13"),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic5_nldo515, "vdd-l12"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l11-l13"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic5_nldo515, "vdd-l14"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo515, "vdd-l15"),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic5_nldo515, "vdd-l4-l16"),
+ RPMH_VREG("ldo17", "ldo%s17", &pmic5_nldo515, "vdd-l17"),
+ /* ldo18 not configured */
+ RPMH_VREG("ldo19", "ldo%s19", &pmic5_nldo515, "vdd-l19"),
+ RPMH_VREG("ldo20", "ldo%s20", &pmic5_nldo515, "vdd-l20-l21"),
+ RPMH_VREG("ldo21", "ldo%s21", &pmic5_nldo515, "vdd-l20-l21"),
+};
+
static const struct rpmh_vreg_init_data pm7325_vreg_data[] = {
RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps510, "vdd-s1"),
RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps520, "vdd-s2"),
@@ -1495,6 +1528,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.data = pmx65_vreg_data,
},
{
+ .compatible = "qcom,pmx75-rpmh-regulators",
+ .data = pmx75_vreg_data,
+ },
+ {
.compatible = "qcom,pm7325-rpmh-regulators",
.data = pm7325_vreg_data,
},
diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c
index f95bc9208c13..9366488f0383 100644
--- a/drivers/regulator/qcom_rpm-regulator.c
+++ b/drivers/regulator/qcom_rpm-regulator.c
@@ -956,11 +956,10 @@ static int rpm_reg_probe(struct platform_device *pdev)
}
for (reg = match->data; reg->name; reg++) {
- vreg = devm_kmalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+ vreg = devm_kmemdup(&pdev->dev, reg->template, sizeof(*vreg), GFP_KERNEL);
if (!vreg)
return -ENOMEM;
- memcpy(vreg, reg->template, sizeof(*vreg));
mutex_init(&vreg->lock);
vreg->dev = &pdev->dev;
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 18189f35db68..f53ada076252 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -6,7 +6,6 @@
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/qcom_usb_vbus-regulator.c b/drivers/regulator/qcom_usb_vbus-regulator.c
index 57ec613f4a0a..cd94ed67621f 100644
--- a/drivers/regulator/qcom_usb_vbus-regulator.c
+++ b/drivers/regulator/qcom_usb_vbus-regulator.c
@@ -8,7 +8,6 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/raa215300.c b/drivers/regulator/raa215300.c
index 24a1c89f5dbc..6982565c8aa4 100644
--- a/drivers/regulator/raa215300.c
+++ b/drivers/regulator/raa215300.c
@@ -38,10 +38,6 @@
#define RAA215300_REG_BLOCK_EN_RTC_EN BIT(6)
#define RAA215300_RTC_DEFAULT_ADDR 0x6f
-const char *clkin_name = "clkin";
-const char *xin_name = "xin";
-static struct clk *clk;
-
static const struct regmap_config raa215300_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -51,10 +47,6 @@ static const struct regmap_config raa215300_regmap_config = {
static void raa215300_rtc_unregister_device(void *data)
{
i2c_unregister_device(data);
- if (!clk) {
- clk_unregister_fixed_rate(clk);
- clk = NULL;
- }
}
static int raa215300_clk_present(struct i2c_client *client, const char *name)
@@ -71,8 +63,10 @@ static int raa215300_clk_present(struct i2c_client *client, const char *name)
static int raa215300_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
- const char *clk_name = xin_name;
+ const char *clkin_name = "clkin";
unsigned int pmic_version, val;
+ const char *xin_name = "xin";
+ const char *clk_name = NULL;
struct regmap *regmap;
int ret;
@@ -92,7 +86,7 @@ static int raa215300_i2c_probe(struct i2c_client *client)
val &= RAA215300_REG_BLOCK_EN_RTC_EN;
regmap_write(regmap, RAA215300_REG_BLOCK_EN, val);
- /*Clear the latched registers */
+ /* Clear the latched registers */
regmap_read(regmap, RAA215300_FAULT_LATCHED_STATUS_1, &val);
regmap_write(regmap, RAA215300_FAULT_LATCHED_STATUS_1, val);
regmap_read(regmap, RAA215300_FAULT_LATCHED_STATUS_2, &val);
@@ -114,24 +108,32 @@ static int raa215300_i2c_probe(struct i2c_client *client)
ret = raa215300_clk_present(client, xin_name);
if (ret < 0) {
return ret;
- } else if (!ret) {
+ } else if (ret) {
+ clk_name = xin_name;
+ } else {
ret = raa215300_clk_present(client, clkin_name);
if (ret < 0)
return ret;
-
- clk_name = clkin_name;
+ if (ret)
+ clk_name = clkin_name;
}
- if (ret) {
- char *name = pmic_version >= 0x12 ? "isl1208" : "raa215300_a0";
+ if (clk_name) {
+ const char *name = pmic_version >= 0x12 ? "isl1208" : "raa215300_a0";
struct device_node *np = client->dev.of_node;
u32 addr = RAA215300_RTC_DEFAULT_ADDR;
struct i2c_board_info info = {};
struct i2c_client *rtc_client;
+ struct clk_hw *hw;
ssize_t size;
- clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 32000);
- clk_register_clkdev(clk, clk_name, NULL);
+ hw = devm_clk_hw_register_fixed_rate(dev, clk_name, NULL, 0, 32768);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ ret = devm_clk_hw_register_clkdev(dev, hw, clk_name, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize clkdev\n");
if (np) {
int i;
@@ -180,7 +182,7 @@ static struct i2c_driver raa215300_i2c_driver = {
.name = "raa215300",
.of_match_table = raa215300_dt_match,
},
- .probe_new = raa215300_i2c_probe,
+ .probe = raa215300_i2c_probe,
};
module_i2c_driver(raa215300_i2c_driver);
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index a5afca73715d..a25a141e86c4 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -16,7 +16,6 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#include <linux/gpio.h>
#include <linux/mfd/rc5t583.h>
struct rc5t583_regulator_info {
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 460525ed006c..867a2cf243f6 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -17,9 +17,10 @@
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/mfd/rk808.h>
+#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/regulator/rohm-regulator.c b/drivers/regulator/rohm-regulator.c
index f97a9a51ee76..0e2418ed957c 100644
--- a/drivers/regulator/rohm-regulator.c
+++ b/drivers/regulator/rohm-regulator.c
@@ -36,7 +36,7 @@ static int set_dvs_level(const struct regulator_desc *desc,
}
for (i = 0; i < desc->n_voltages; i++) {
/* NOTE to next hacker - Does not support pickable ranges */
- if (desc->linear_range_selectors)
+ if (desc->linear_range_selectors_bitfield)
return -EINVAL;
if (desc->n_linear_ranges)
ret = regulator_desc_list_voltage_linear_range(desc, i);
diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
index e9719a378a0b..f52c3d47ecea 100644
--- a/drivers/regulator/rpi-panel-attiny-regulator.c
+++ b/drivers/regulator/rpi-panel-attiny-regulator.c
@@ -7,7 +7,6 @@
#include <linux/backlight.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/init.h>
@@ -397,7 +396,7 @@ static struct i2c_driver attiny_regulator_driver = {
.driver = {
.name = "rpi_touchscreen_attiny",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = of_match_ptr(attiny_dt_ids),
+ .of_match_table = attiny_dt_ids,
},
.probe = attiny_i2c_probe,
.remove = attiny_i2c_remove,
diff --git a/drivers/regulator/rt5739.c b/drivers/regulator/rt5739.c
index 0ce6a1666752..91412c905ce6 100644
--- a/drivers/regulator/rt5739.c
+++ b/drivers/regulator/rt5739.c
@@ -31,10 +31,17 @@
#define RT5739_MODEVSEL1_MASK BIT(1)
#define RT5739_MODEVSEL0_MASK BIT(0)
#define RT5739_VID_MASK GENMASK(7, 5)
+#define RT5739_DID_MASK GENMASK(3, 0)
#define RT5739_ACTD_MASK BIT(7)
#define RT5739_ENVSEL1_MASK BIT(1)
#define RT5739_ENVSEL0_MASK BIT(0)
+#define RT5733_CHIPDIE_ID 0x1
+#define RT5733_VOLT_MINUV 270000
+#define RT5733_VOLT_MAXUV 1401250
+#define RT5733_VOLT_STPUV 6250
+#define RT5733_N_VOLTS 182
+
#define RT5739_VOLT_MINUV 300000
#define RT5739_VOLT_MAXUV 1300000
#define RT5739_VOLT_STPUV 5000
@@ -93,8 +100,11 @@ static int rt5739_set_suspend_voltage(struct regulator_dev *rdev, int uV)
const struct regulator_desc *desc = rdev->desc;
struct regmap *regmap = rdev_get_regmap(rdev);
unsigned int reg, vsel;
+ int max_uV;
+
+ max_uV = desc->min_uV + desc->uV_step * (desc->n_voltages - 1);
- if (uV < RT5739_VOLT_MINUV || uV > RT5739_VOLT_MAXUV)
+ if (uV < desc->min_uV || uV > max_uV)
return -EINVAL;
if (desc->vsel_reg == RT5739_REG_NSEL0)
@@ -102,7 +112,7 @@ static int rt5739_set_suspend_voltage(struct regulator_dev *rdev, int uV)
else
reg = RT5739_REG_NSEL0;
- vsel = (uV - RT5739_VOLT_MINUV) / RT5739_VOLT_STPUV;
+ vsel = (uV - desc->min_uV) / desc->uV_step;
return regmap_write(regmap, reg, vsel);
}
@@ -189,15 +199,12 @@ static unsigned int rt5739_of_map_mode(unsigned int mode)
}
static void rt5739_init_regulator_desc(struct regulator_desc *desc,
- bool vsel_active_high)
+ bool vsel_active_high, u8 did)
{
/* Fixed */
desc->name = "rt5739-regulator";
desc->owner = THIS_MODULE;
desc->ops = &rt5739_regulator_ops;
- desc->n_voltages = RT5739_N_VOLTS;
- desc->min_uV = RT5739_VOLT_MINUV;
- desc->uV_step = RT5739_VOLT_STPUV;
desc->vsel_mask = RT5739_VSEL_MASK;
desc->enable_reg = RT5739_REG_CNTL2;
desc->active_discharge_reg = RT5739_REG_CNTL1;
@@ -213,6 +220,20 @@ static void rt5739_init_regulator_desc(struct regulator_desc *desc,
desc->vsel_reg = RT5739_REG_NSEL0;
desc->enable_mask = RT5739_ENVSEL0_MASK;
}
+
+ /* Assigned by CHIPDIE ID */
+ switch (did) {
+ case RT5733_CHIPDIE_ID:
+ desc->n_voltages = RT5733_N_VOLTS;
+ desc->min_uV = RT5733_VOLT_MINUV;
+ desc->uV_step = RT5733_VOLT_STPUV;
+ break;
+ default:
+ desc->n_voltages = RT5739_N_VOLTS;
+ desc->min_uV = RT5739_VOLT_MINUV;
+ desc->uV_step = RT5739_VOLT_STPUV;
+ break;
+ }
}
static const struct regmap_config rt5739_regmap_config = {
@@ -258,7 +279,7 @@ static int rt5739_probe(struct i2c_client *i2c)
vsel_acth = device_property_read_bool(dev, "richtek,vsel-active-high");
- rt5739_init_regulator_desc(desc, vsel_acth);
+ rt5739_init_regulator_desc(desc, vsel_acth, vid & RT5739_DID_MASK);
cfg.dev = dev;
cfg.of_node = dev_of_node(dev);
@@ -271,6 +292,7 @@ static int rt5739_probe(struct i2c_client *i2c)
}
static const struct of_device_id rt5739_device_table[] = {
+ { .compatible = "richtek,rt5733" },
{ .compatible = "richtek,rt5739" },
{ /* sentinel */ }
};
diff --git a/drivers/regulator/rt5759-regulator.c b/drivers/regulator/rt5759-regulator.c
index 90555a9ef1b0..c2553dcee050 100644
--- a/drivers/regulator/rt5759-regulator.c
+++ b/drivers/regulator/rt5759-regulator.c
@@ -4,7 +4,7 @@
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c
new file mode 100644
index 000000000000..2d54844c4226
--- /dev/null
+++ b/drivers/regulator/rtq2208-regulator.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/util_macros.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mod_devicetable.h>
+
+/* Register */
+#define RTQ2208_REG_GLOBAL_INT1 0x12
+#define RTQ2208_REG_FLT_RECORDBUCK_CB 0x18
+#define RTQ2208_REG_GLOBAL_INT1_MASK 0x1D
+#define RTQ2208_REG_FLT_MASKBUCK_CB 0x1F
+#define RTQ2208_REG_BUCK_C_CFG0 0x32
+#define RTQ2208_REG_BUCK_B_CFG0 0x42
+#define RTQ2208_REG_BUCK_A_CFG0 0x52
+#define RTQ2208_REG_BUCK_D_CFG0 0x62
+#define RTQ2208_REG_BUCK_G_CFG0 0x72
+#define RTQ2208_REG_BUCK_F_CFG0 0x82
+#define RTQ2208_REG_BUCK_E_CFG0 0x92
+#define RTQ2208_REG_BUCK_H_CFG0 0xA2
+#define RTQ2208_REG_LDO1_CFG 0xB1
+#define RTQ2208_REG_LDO2_CFG 0xC1
+
+/* Mask */
+#define RTQ2208_BUCK_NR_MTP_SEL_MASK GENMASK(7, 0)
+#define RTQ2208_BUCK_EN_NR_MTP_SEL0_MASK BIT(0)
+#define RTQ2208_BUCK_EN_NR_MTP_SEL1_MASK BIT(1)
+#define RTQ2208_BUCK_RSPUP_MASK GENMASK(6, 4)
+#define RTQ2208_BUCK_RSPDN_MASK GENMASK(2, 0)
+#define RTQ2208_BUCK_NRMODE_MASK BIT(5)
+#define RTQ2208_BUCK_STRMODE_MASK BIT(5)
+#define RTQ2208_BUCK_EN_STR_MASK BIT(0)
+#define RTQ2208_LDO_EN_STR_MASK BIT(7)
+#define RTQ2208_EN_DIS_MASK BIT(0)
+#define RTQ2208_BUCK_RAMP_SEL_MASK GENMASK(2, 0)
+#define RTQ2208_HD_INT_MASK BIT(0)
+
+/* Size */
+#define RTQ2208_VOUT_MAXNUM 256
+#define RTQ2208_BUCK_NUM_IRQ_REGS 5
+#define RTQ2208_STS_NUM_IRQ_REGS 2
+
+/* Value */
+#define RTQ2208_RAMP_VALUE_MIN_uV 500
+#define RTQ2208_RAMP_VALUE_MAX_uV 64000
+
+#define RTQ2208_BUCK_MASK(uv_irq, ov_irq) (1 << ((uv_irq) % 8) | 1 << ((ov_irq) % 8))
+
+enum {
+ RTQ2208_BUCK_B = 0,
+ RTQ2208_BUCK_C,
+ RTQ2208_BUCK_D,
+ RTQ2208_BUCK_A,
+ RTQ2208_BUCK_F,
+ RTQ2208_BUCK_G,
+ RTQ2208_BUCK_H,
+ RTQ2208_BUCK_E,
+ RTQ2208_LDO2,
+ RTQ2208_LDO1,
+ RTQ2208_LDO_MAX,
+};
+
+enum {
+ RTQ2208_AUTO_MODE = 0,
+ RTQ2208_FCCM,
+};
+
+struct rtq2208_regulator_desc {
+ struct regulator_desc desc;
+ unsigned int mtp_sel_reg;
+ unsigned int mtp_sel_mask;
+ unsigned int mode_reg;
+ unsigned int mode_mask;
+ unsigned int suspend_config_reg;
+ unsigned int suspend_enable_mask;
+ unsigned int suspend_mode_mask;
+};
+
+struct rtq2208_rdev_map {
+ struct regulator_dev *rdev[RTQ2208_LDO_MAX];
+ struct regmap *regmap;
+ struct device *dev;
+};
+
+/* set Normal Auto/FCCM mode */
+static int rtq2208_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ const struct rtq2208_regulator_desc *rdesc =
+ (const struct rtq2208_regulator_desc *)rdev->desc;
+ unsigned int val, shift;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = RTQ2208_AUTO_MODE;
+ break;
+ case REGULATOR_MODE_FAST:
+ val = RTQ2208_FCCM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ shift = ffs(rdesc->mode_mask) - 1;
+ return regmap_update_bits(rdev->regmap, rdesc->mode_reg,
+ rdesc->mode_mask, val << shift);
+}
+
+static unsigned int rtq2208_get_mode(struct regulator_dev *rdev)
+{
+ const struct rtq2208_regulator_desc *rdesc =
+ (const struct rtq2208_regulator_desc *)rdev->desc;
+ unsigned int mode_val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, rdesc->mode_reg, &mode_val);
+ if (ret)
+ return REGULATOR_MODE_INVALID;
+
+ return (mode_val & rdesc->mode_mask) ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
+}
+
+static int rtq2208_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ const struct regulator_desc *desc = rdev->desc;
+ unsigned int sel = 0, val;
+
+ ramp_delay = max(ramp_delay, RTQ2208_RAMP_VALUE_MIN_uV);
+ ramp_delay = min(ramp_delay, RTQ2208_RAMP_VALUE_MAX_uV);
+
+ ramp_delay /= RTQ2208_RAMP_VALUE_MIN_uV;
+
+ /*
+ * fls(ramp_delay) - 1: doing LSB shift, let it starts from 0
+ *
+ * RTQ2208_BUCK_RAMP_SEL_MASK - sel: doing descending order shifting.
+ * Because the relation of seleltion and value is like that
+ *
+ * seletion: value
+ * 000: 64mv
+ * 001: 32mv
+ * ...
+ * 111: 0.5mv
+ *
+ * For example, if I would like to select 64mv, the fls(ramp_delay) - 1 will be 0b111,
+ * and I need to use 0b111 - sel to do the shifting
+ */
+
+ sel = fls(ramp_delay) - 1;
+ sel = RTQ2208_BUCK_RAMP_SEL_MASK - sel;
+
+ val = FIELD_PREP(RTQ2208_BUCK_RSPUP_MASK, sel) | FIELD_PREP(RTQ2208_BUCK_RSPDN_MASK, sel);
+
+ return regmap_update_bits(rdev->regmap, desc->ramp_reg,
+ RTQ2208_BUCK_RSPUP_MASK | RTQ2208_BUCK_RSPDN_MASK, val);
+}
+
+static int rtq2208_set_suspend_enable(struct regulator_dev *rdev)
+{
+ const struct rtq2208_regulator_desc *rdesc =
+ (const struct rtq2208_regulator_desc *)rdev->desc;
+
+ return regmap_set_bits(rdev->regmap, rdesc->suspend_config_reg, rdesc->suspend_enable_mask);
+}
+
+static int rtq2208_set_suspend_disable(struct regulator_dev *rdev)
+{
+ const struct rtq2208_regulator_desc *rdesc =
+ (const struct rtq2208_regulator_desc *)rdev->desc;
+
+ return regmap_update_bits(rdev->regmap, rdesc->suspend_config_reg, rdesc->suspend_enable_mask, 0);
+}
+
+static int rtq2208_set_suspend_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ const struct rtq2208_regulator_desc *rdesc =
+ (const struct rtq2208_regulator_desc *)rdev->desc;
+ unsigned int val, shift;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = RTQ2208_AUTO_MODE;
+ break;
+ case REGULATOR_MODE_FAST:
+ val = RTQ2208_FCCM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ shift = ffs(rdesc->suspend_mode_mask) - 1;
+
+ return regmap_update_bits(rdev->regmap, rdesc->suspend_config_reg,
+ rdesc->suspend_mode_mask, val << shift);
+}
+
+static const struct regulator_ops rtq2208_regulator_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_mode = rtq2208_set_mode,
+ .get_mode = rtq2208_get_mode,
+ .set_ramp_delay = rtq2208_set_ramp_delay,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_suspend_enable = rtq2208_set_suspend_enable,
+ .set_suspend_disable = rtq2208_set_suspend_disable,
+ .set_suspend_mode = rtq2208_set_suspend_mode,
+};
+
+static const struct regulator_ops rtq2208_regulator_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_suspend_enable = rtq2208_set_suspend_enable,
+ .set_suspend_disable = rtq2208_set_suspend_disable,
+};
+
+static unsigned int rtq2208_of_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case RTQ2208_AUTO_MODE:
+ return REGULATOR_MODE_NORMAL;
+ case RTQ2208_FCCM:
+ return REGULATOR_MODE_FAST;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+static int rtq2208_init_irq_mask(struct rtq2208_rdev_map *rdev_map, unsigned int *buck_masks)
+{
+ unsigned char buck_clr_masks[5] = {0x33, 0x33, 0x33, 0x33, 0x33},
+ sts_clr_masks[2] = {0xE7, 0xF7}, sts_masks[2] = {0xE6, 0xF6};
+ int ret;
+
+ /* write clear all buck irq once */
+ ret = regmap_bulk_write(rdev_map->regmap, RTQ2208_REG_FLT_RECORDBUCK_CB, buck_clr_masks, 5);
+ if (ret)
+ return dev_err_probe(rdev_map->dev, ret, "Failed to clr buck irqs\n");
+
+ /* write clear general irq once */
+ ret = regmap_bulk_write(rdev_map->regmap, RTQ2208_REG_GLOBAL_INT1, sts_clr_masks, 2);
+ if (ret)
+ return dev_err_probe(rdev_map->dev, ret, "Failed to clr general irqs\n");
+
+ /* unmask buck ov/uv irq */
+ ret = regmap_bulk_write(rdev_map->regmap, RTQ2208_REG_FLT_MASKBUCK_CB, buck_masks, 5);
+ if (ret)
+ return dev_err_probe(rdev_map->dev, ret, "Failed to unmask buck irqs\n");
+
+ /* unmask needed general irq */
+ return regmap_bulk_write(rdev_map->regmap, RTQ2208_REG_GLOBAL_INT1_MASK, sts_masks, 2);
+}
+
+static irqreturn_t rtq2208_irq_handler(int irqno, void *devid)
+{
+ unsigned char buck_flags[RTQ2208_BUCK_NUM_IRQ_REGS], sts_flags[RTQ2208_STS_NUM_IRQ_REGS];
+ int ret = 0, i, uv_bit, ov_bit;
+ struct rtq2208_rdev_map *rdev_map = devid;
+ struct regulator_dev *rdev;
+
+ if (!rdev_map)
+ return IRQ_NONE;
+
+ /* read irq event */
+ ret = regmap_bulk_read(rdev_map->regmap, RTQ2208_REG_FLT_RECORDBUCK_CB,
+ buck_flags, ARRAY_SIZE(buck_flags));
+ if (ret)
+ return IRQ_NONE;
+
+ ret = regmap_bulk_read(rdev_map->regmap, RTQ2208_REG_GLOBAL_INT1,
+ sts_flags, ARRAY_SIZE(sts_flags));
+ if (ret)
+ return IRQ_NONE;
+
+ /* clear irq event */
+ ret = regmap_bulk_write(rdev_map->regmap, RTQ2208_REG_FLT_RECORDBUCK_CB,
+ buck_flags, ARRAY_SIZE(buck_flags));
+ if (ret)
+ return IRQ_NONE;
+
+ ret = regmap_bulk_write(rdev_map->regmap, RTQ2208_REG_GLOBAL_INT1,
+ sts_flags, ARRAY_SIZE(sts_flags));
+ if (ret)
+ return IRQ_NONE;
+
+ for (i = 0; i < RTQ2208_LDO_MAX; i++) {
+ if (!rdev_map->rdev[i])
+ continue;
+
+ rdev = rdev_map->rdev[i];
+ /* uv irq */
+ uv_bit = (i & 1) ? 4 : 0;
+ if (buck_flags[i >> 1] & (1 << uv_bit))
+ regulator_notifier_call_chain(rdev,
+ REGULATOR_EVENT_UNDER_VOLTAGE, NULL);
+ /* ov irq */
+ ov_bit = uv_bit + 1;
+ if (buck_flags[i >> 1] & (1 << ov_bit))
+ regulator_notifier_call_chain(rdev,
+ REGULATOR_EVENT_REGULATION_OUT, NULL);
+
+ /* hd irq */
+ if (sts_flags[1] & RTQ2208_HD_INT_MASK)
+ regulator_notifier_call_chain(rdev,
+ REGULATOR_EVENT_OVER_TEMP, NULL);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define RTQ2208_REGULATOR_INFO(_name, _base) \
+{ \
+ .name = #_name, \
+ .base = _base, \
+}
+#define BUCK_RG_BASE(_id) RTQ2208_REG_BUCK_##_id##_CFG0
+#define BUCK_RG_SHIFT(_base, _shift) (_base + _shift)
+#define LDO_RG_BASE(_id) RTQ2208_REG_LDO##_id##_CFG
+#define LDO_RG_SHIFT(_base, _shift) (_base + _shift)
+#define VSEL_SHIFT(_sel) (_sel ? 3 : 1)
+#define MTP_SEL_MASK(_sel) RTQ2208_BUCK_EN_NR_MTP_SEL##_sel##_MASK
+
+static const struct linear_range rtq2208_vout_range[] = {
+ REGULATOR_LINEAR_RANGE(400000, 0, 180, 5000),
+ REGULATOR_LINEAR_RANGE(1310000, 181, 255, 10000),
+};
+
+static int rtq2208_of_get_fixed_voltage(struct device *dev,
+ struct of_regulator_match *rtq2208_ldo_match, int n_fixed)
+{
+ struct device_node *np;
+ struct of_regulator_match *match;
+ struct rtq2208_regulator_desc *rdesc;
+ struct regulator_init_data *init_data;
+ int ret, i;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ np = of_get_child_by_name(dev->of_node, "regulators");
+ if (!np)
+ np = dev->of_node;
+
+ ret = of_regulator_match(dev, np, rtq2208_ldo_match, n_fixed);
+
+ of_node_put(np);
+
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < n_fixed; i++) {
+ match = rtq2208_ldo_match + i;
+ init_data = match->init_data;
+ rdesc = (struct rtq2208_regulator_desc *)match->driver_data;
+
+ if (!init_data || !rdesc)
+ continue;
+
+ if (init_data->constraints.min_uV == init_data->constraints.max_uV)
+ rdesc->desc.fixed_uV = init_data->constraints.min_uV;
+ }
+
+ return 0;
+}
+
+static void rtq2208_init_regulator_desc(struct rtq2208_regulator_desc *rdesc, int mtp_sel,
+ int idx, struct of_regulator_match *rtq2208_ldo_match, int *ldo_idx)
+{
+ struct regulator_desc *desc;
+ static const struct {
+ char *name;
+ int base;
+ } regulator_info[] = {
+ RTQ2208_REGULATOR_INFO(buck-b, BUCK_RG_BASE(B)),
+ RTQ2208_REGULATOR_INFO(buck-c, BUCK_RG_BASE(C)),
+ RTQ2208_REGULATOR_INFO(buck-d, BUCK_RG_BASE(D)),
+ RTQ2208_REGULATOR_INFO(buck-a, BUCK_RG_BASE(A)),
+ RTQ2208_REGULATOR_INFO(buck-f, BUCK_RG_BASE(F)),
+ RTQ2208_REGULATOR_INFO(buck-g, BUCK_RG_BASE(G)),
+ RTQ2208_REGULATOR_INFO(buck-h, BUCK_RG_BASE(H)),
+ RTQ2208_REGULATOR_INFO(buck-e, BUCK_RG_BASE(E)),
+ RTQ2208_REGULATOR_INFO(ldo2, LDO_RG_BASE(2)),
+ RTQ2208_REGULATOR_INFO(ldo1, LDO_RG_BASE(1)),
+ }, *curr_info;
+
+ curr_info = regulator_info + idx;
+ desc = &rdesc->desc;
+ desc->name = curr_info->name;
+ desc->of_match = of_match_ptr(curr_info->name);
+ desc->regulators_node = of_match_ptr("regulators");
+ desc->id = idx;
+ desc->owner = THIS_MODULE;
+ desc->type = REGULATOR_VOLTAGE;
+ desc->enable_mask = mtp_sel ? MTP_SEL_MASK(1) : MTP_SEL_MASK(0);
+ desc->active_discharge_on = RTQ2208_EN_DIS_MASK;
+ desc->active_discharge_off = 0;
+ desc->active_discharge_mask = RTQ2208_EN_DIS_MASK;
+
+ rdesc->mode_mask = RTQ2208_BUCK_NRMODE_MASK;
+
+ if (idx >= RTQ2208_BUCK_B && idx <= RTQ2208_BUCK_E) {
+ /* init buck desc */
+ desc->enable_reg = BUCK_RG_SHIFT(curr_info->base, 2);
+ desc->ops = &rtq2208_regulator_buck_ops;
+ desc->vsel_reg = curr_info->base + VSEL_SHIFT(mtp_sel);
+ desc->vsel_mask = RTQ2208_BUCK_NR_MTP_SEL_MASK;
+ desc->n_voltages = RTQ2208_VOUT_MAXNUM;
+ desc->linear_ranges = rtq2208_vout_range;
+ desc->n_linear_ranges = ARRAY_SIZE(rtq2208_vout_range);
+ desc->ramp_reg = BUCK_RG_SHIFT(curr_info->base, 5);
+ desc->active_discharge_reg = curr_info->base;
+ desc->of_map_mode = rtq2208_of_map_mode;
+
+ rdesc->mode_reg = BUCK_RG_SHIFT(curr_info->base, 2);
+ rdesc->suspend_config_reg = BUCK_RG_SHIFT(curr_info->base, 4);
+ rdesc->suspend_enable_mask = RTQ2208_BUCK_EN_STR_MASK;
+ rdesc->suspend_mode_mask = RTQ2208_BUCK_STRMODE_MASK;
+ } else {
+ /* init ldo desc */
+ desc->enable_reg = curr_info->base;
+ desc->ops = &rtq2208_regulator_ldo_ops;
+ desc->n_voltages = 1;
+ desc->active_discharge_reg = LDO_RG_SHIFT(curr_info->base, 2);
+
+ rtq2208_ldo_match[*ldo_idx].name = desc->name;
+ rtq2208_ldo_match[*ldo_idx].driver_data = rdesc;
+ rtq2208_ldo_match[(*ldo_idx)++].desc = desc;
+
+ rdesc->suspend_config_reg = curr_info->base;
+ rdesc->suspend_enable_mask = RTQ2208_LDO_EN_STR_MASK;
+ }
+}
+
+static int rtq2208_parse_regulator_dt_data(int n_regulator, const unsigned int *regulator_idx_table,
+ struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX], struct device *dev)
+{
+ struct of_regulator_match rtq2208_ldo_match[2];
+ int mtp_sel, ret, i, idx, ldo_idx = 0;
+
+ /* get mtp_sel0 or mtp_sel1 */
+ mtp_sel = device_property_read_bool(dev, "richtek,mtp-sel-high");
+
+ for (i = 0; i < n_regulator; i++) {
+ idx = regulator_idx_table[i];
+
+ rdesc[i] = devm_kcalloc(dev, 1, sizeof(*rdesc[0]), GFP_KERNEL);
+ if (!rdesc[i])
+ return -ENOMEM;
+
+ rtq2208_init_regulator_desc(rdesc[i], mtp_sel, idx, rtq2208_ldo_match, &ldo_idx);
+ }
+
+ /* init ldo fixed_uV */
+ ret = rtq2208_of_get_fixed_voltage(dev, rtq2208_ldo_match, ldo_idx);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get ldo fixed_uV\n");
+
+ return 0;
+
+}
+
+/** different slave address corresponds different used bucks
+ * slave address 0x10: BUCK[BCA FGE]
+ * slave address 0x20: BUCK[BC FGHE]
+ * slave address 0x40: BUCK[C G]
+ */
+static int rtq2208_regulator_check(int slave_addr, int *num,
+ int *regulator_idx_table, unsigned int *buck_masks)
+{
+ static bool rtq2208_used_table[3][RTQ2208_LDO_MAX] = {
+ /* BUCK[BCA FGE], LDO[12] */
+ {1, 1, 0, 1, 1, 1, 0, 1, 1, 1},
+ /* BUCK[BC FGHE], LDO[12]*/
+ {1, 1, 0, 0, 1, 1, 1, 1, 1, 1},
+ /* BUCK[C G], LDO[12] */
+ {0, 1, 0, 0, 0, 1, 0, 0, 1, 1},
+ };
+ int i, idx = ffs(slave_addr >> 4) - 1;
+ u8 mask;
+
+ for (i = 0; i < RTQ2208_LDO_MAX; i++) {
+ if (!rtq2208_used_table[idx][i])
+ continue;
+
+ regulator_idx_table[(*num)++] = i;
+
+ mask = RTQ2208_BUCK_MASK(4 * i, 4 * i + 1);
+ buck_masks[i >> 1] &= ~mask;
+ }
+
+ return 0;
+}
+
+static const struct regmap_config rtq2208_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xEF,
+};
+
+static int rtq2208_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct regmap *regmap;
+ struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX];
+ struct regulator_dev *rdev;
+ struct regulator_config cfg;
+ struct rtq2208_rdev_map *rdev_map;
+ int i, ret = 0, idx, n_regulator = 0;
+ unsigned int regulator_idx_table[RTQ2208_LDO_MAX],
+ buck_masks[RTQ2208_BUCK_NUM_IRQ_REGS] = {0x33, 0x33, 0x33, 0x33, 0x33};
+
+ rdev_map = devm_kzalloc(dev, sizeof(struct rtq2208_rdev_map), GFP_KERNEL);
+ if (!rdev_map)
+ return -ENOMEM;
+
+ regmap = devm_regmap_init_i2c(i2c, &rtq2208_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Failed to allocate regmap\n");
+
+ /* get needed regulator */
+ ret = rtq2208_regulator_check(i2c->addr, &n_regulator, regulator_idx_table, buck_masks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to check used regulators\n");
+
+ rdev_map->regmap = regmap;
+ rdev_map->dev = dev;
+
+ cfg.dev = dev;
+
+ /* init regulator desc */
+ ret = rtq2208_parse_regulator_dt_data(n_regulator, regulator_idx_table, rdesc, dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < n_regulator; i++) {
+ idx = regulator_idx_table[i];
+
+ /* register regulator */
+ rdev = devm_regulator_register(dev, &rdesc[i]->desc, &cfg);
+ if (IS_ERR(rdev))
+ return PTR_ERR(rdev);
+
+ rdev_map->rdev[idx] = rdev;
+ }
+
+ /* init interrupt mask */
+ ret = rtq2208_init_irq_mask(rdev_map, buck_masks);
+ if (ret)
+ return ret;
+
+ /* register interrupt */
+ return devm_request_threaded_irq(dev, i2c->irq, NULL, rtq2208_irq_handler,
+ IRQF_ONESHOT, dev_name(dev), rdev_map);
+}
+
+static const struct of_device_id rtq2208_device_tables[] = {
+ { .compatible = "richtek,rtq2208" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rtq2208_device_tables);
+
+static struct i2c_driver rtq2208_driver = {
+ .driver = {
+ .name = "rtq2208",
+ .of_match_table = rtq2208_device_tables,
+ },
+ .probe = rtq2208_probe,
+};
+module_i2c_driver(rtq2208_driver);
+
+MODULE_AUTHOR("Alina Yu <alina_yu@richtek.com>");
+MODULE_DESCRIPTION("Richtek RTQ2208 Regulator Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index b147ff6a16b1..c22fdde67f9c 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -5,7 +5,6 @@
#include <linux/bug.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/regulator/stm32-pwr.c b/drivers/regulator/stm32-pwr.c
index 4c60eddad60d..85b0102fb9b1 100644
--- a/drivers/regulator/stm32-pwr.c
+++ b/drivers/regulator/stm32-pwr.c
@@ -6,8 +6,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
index f5ccc7dd309a..717144cbe0f9 100644
--- a/drivers/regulator/stm32-vrefbuf.c
+++ b/drivers/regulator/stm32-vrefbuf.c
@@ -10,7 +10,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/sy8824x.c b/drivers/regulator/sy8824x.c
index d0703105c439..d49c0cba09fb 100644
--- a/drivers/regulator/sy8824x.c
+++ b/drivers/regulator/sy8824x.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/sy8827n.c b/drivers/regulator/sy8827n.c
index 433959b43549..f11ff38b36c9 100644
--- a/drivers/regulator/sy8827n.c
+++ b/drivers/regulator/sy8827n.c
@@ -9,7 +9,7 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/i2c.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/tps6286x-regulator.c b/drivers/regulator/tps6286x-regulator.c
index b1c4b5120745..758c70269653 100644
--- a/drivers/regulator/tps6286x-regulator.c
+++ b/drivers/regulator/tps6286x-regulator.c
@@ -4,7 +4,7 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
@@ -84,11 +84,11 @@ static unsigned int tps6286x_of_map_mode(unsigned int mode)
static const struct regulator_desc tps6286x_reg = {
.name = "tps6286x",
- .of_match = of_match_ptr("SW"),
+ .of_match = "SW",
.owner = THIS_MODULE,
.ops = &tps6286x_regulator_ops,
.of_map_mode = tps6286x_of_map_mode,
- .regulators_node = of_match_ptr("regulators"),
+ .regulators_node = "regulators",
.type = REGULATOR_VOLTAGE,
.n_voltages = ((TPS6286X_MAX_MV - TPS6286X_MIN_MV) / TPS6286X_STEP_MV) + 1,
.min_uV = TPS6286X_MIN_MV * 1000,
@@ -148,7 +148,7 @@ static struct i2c_driver tps6286x_regulator_driver = {
.driver = {
.name = "tps6286x",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = of_match_ptr(tps6286x_dt_ids),
+ .of_match_table = tps6286x_dt_ids,
},
.probe = tps6286x_i2c_probe,
.id_table = tps6286x_i2c_id,
diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c
index b1c0963586ac..d022184a8e7d 100644
--- a/drivers/regulator/tps6287x-regulator.c
+++ b/drivers/regulator/tps6287x-regulator.c
@@ -8,8 +8,8 @@
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
@@ -41,7 +41,7 @@ static const struct linear_range tps6287x_voltage_ranges[] = {
};
static const unsigned int tps6287x_voltage_range_sel[] = {
- 0x0, 0x4, 0x8, 0xC
+ 0x0, 0x1, 0x2, 0x3
};
static const unsigned int tps6287x_ramp_table[] = {
@@ -122,7 +122,7 @@ static struct regulator_desc tps6287x_reg = {
.n_voltages = 256,
.linear_ranges = tps6287x_voltage_ranges,
.n_linear_ranges = ARRAY_SIZE(tps6287x_voltage_ranges),
- .linear_range_selectors = tps6287x_voltage_range_sel,
+ .linear_range_selectors_bitfield = tps6287x_voltage_range_sel,
};
static int tps6287x_i2c_probe(struct i2c_client *i2c)
diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
index 663789198ba5..2d284c64eeb7 100644
--- a/drivers/regulator/tps65086-regulator.c
+++ b/drivers/regulator/tps65086-regulator.c
@@ -15,7 +15,15 @@
#include <linux/mfd/tps65086.h>
enum tps65086_regulators { BUCK1, BUCK2, BUCK3, BUCK4, BUCK5, BUCK6, LDOA1,
- LDOA2, LDOA3, SWA1, SWB1, SWB2, VTT };
+ LDOA2, LDOA3, VTT, SWA1, SWB1, SWB2 };
+
+/* Selector for regulator configuration regarding PMIC chip ID. */
+enum tps65086_ids {
+ TPS6508640 = 0,
+ TPS65086401,
+ TPS6508641,
+ TPS65086470,
+};
#define TPS65086_REGULATOR(_name, _of, _id, _nv, _vr, _vm, _er, _em, _lr, _dr, _dm) \
[_id] = { \
@@ -57,12 +65,24 @@ enum tps65086_regulators { BUCK1, BUCK2, BUCK3, BUCK4, BUCK5, BUCK6, LDOA1,
}, \
}
+
+#define TPS65086_REGULATOR_CONFIG(_chip_id, _config) \
+ [_chip_id] = { \
+ .config = _config, \
+ .num_elems = ARRAY_SIZE(_config), \
+ }
+
struct tps65086_regulator {
struct regulator_desc desc;
unsigned int decay_reg;
unsigned int decay_mask;
};
+struct tps65086_regulator_config {
+ struct tps65086_regulator * const config;
+ const unsigned int num_elems;
+};
+
static const struct linear_range tps65086_10mv_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
REGULATOR_LINEAR_RANGE(410000, 0x1, 0x7F, 10000),
@@ -114,7 +134,125 @@ static int tps65086_of_parse_cb(struct device_node *dev,
const struct regulator_desc *desc,
struct regulator_config *config);
-static struct tps65086_regulator regulators[] = {
+static struct tps65086_regulator tps6508640_regulator_config[] = {
+ TPS65086_REGULATOR("BUCK1", "buck1", BUCK1, 0x80, TPS65086_BUCK1CTRL,
+ BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(0),
+ tps65086_10mv_ranges, TPS65086_BUCK1CTRL,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK2", "buck2", BUCK2, 0x80, TPS65086_BUCK2CTRL,
+ BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(1),
+ tps65086_10mv_ranges, TPS65086_BUCK2CTRL,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK3", "buck3", BUCK3, 0x80, TPS65086_BUCK3VID,
+ BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(2),
+ tps65086_10mv_ranges, TPS65086_BUCK3DECAY,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK4", "buck4", BUCK4, 0x80, TPS65086_BUCK4VID,
+ BUCK_VID_MASK, TPS65086_BUCK4CTRL, BIT(0),
+ tps65086_10mv_ranges, TPS65086_BUCK4VID,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK5", "buck5", BUCK5, 0x80, TPS65086_BUCK5VID,
+ BUCK_VID_MASK, TPS65086_BUCK5CTRL, BIT(0),
+ tps65086_10mv_ranges, TPS65086_BUCK5CTRL,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK6", "buck6", BUCK6, 0x80, TPS65086_BUCK6VID,
+ BUCK_VID_MASK, TPS65086_BUCK6CTRL, BIT(0),
+ tps65086_10mv_ranges, TPS65086_BUCK6CTRL,
+ BIT(0)),
+ TPS65086_REGULATOR("LDOA1", "ldoa1", LDOA1, 0xF, TPS65086_LDOA1CTRL,
+ VDOA1_VID_MASK, TPS65086_SWVTT_EN, BIT(7),
+ tps65086_ldoa1_ranges, 0, 0),
+ TPS65086_REGULATOR("LDOA2", "ldoa2", LDOA2, 0x10, TPS65086_LDOA2VID,
+ VDOA23_VID_MASK, TPS65086_LDOA2CTRL, BIT(0),
+ tps65086_ldoa23_ranges, 0, 0),
+ TPS65086_REGULATOR("LDOA3", "ldoa3", LDOA3, 0x10, TPS65086_LDOA3VID,
+ VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
+ tps65086_ldoa23_ranges, 0, 0),
+ TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
+ TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
+ TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
+ TPS65086_SWITCH("SWB2", "swb2", SWB2, TPS65086_LDOA1CTRL, BIT(0)),
+};
+
+static struct tps65086_regulator tps65086401_regulator_config[] = {
+ TPS65086_REGULATOR("BUCK1", "buck1", BUCK1, 0x80, TPS65086_BUCK1CTRL,
+ BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(0),
+ tps65086_10mv_ranges, TPS65086_BUCK1CTRL,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK2", "buck2", BUCK2, 0x80, TPS65086_BUCK2CTRL,
+ BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(1),
+ tps65086_10mv_ranges, TPS65086_BUCK2CTRL,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK3", "buck3", BUCK3, 0x80, TPS65086_BUCK3VID,
+ BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(2),
+ tps65086_10mv_ranges, TPS65086_BUCK3DECAY,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK4", "buck4", BUCK4, 0x80, TPS65086_BUCK4VID,
+ BUCK_VID_MASK, TPS65086_BUCK4CTRL, BIT(0),
+ tps65086_10mv_ranges, TPS65086_BUCK4VID,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK5", "buck5", BUCK5, 0x80, TPS65086_BUCK5VID,
+ BUCK_VID_MASK, TPS65086_BUCK5CTRL, BIT(0),
+ tps65086_10mv_ranges, TPS65086_BUCK5CTRL,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK6", "buck6", BUCK6, 0x80, TPS65086_BUCK6VID,
+ BUCK_VID_MASK, TPS65086_BUCK6CTRL, BIT(0),
+ tps65086_10mv_ranges, TPS65086_BUCK6CTRL,
+ BIT(0)),
+ TPS65086_REGULATOR("LDOA1", "ldoa1", LDOA1, 0xF, TPS65086_LDOA1CTRL,
+ VDOA1_VID_MASK, TPS65086_SWVTT_EN, BIT(7),
+ tps65086_ldoa1_ranges, 0, 0),
+ TPS65086_REGULATOR("LDOA2", "ldoa2", LDOA2, 0x10, TPS65086_LDOA2VID,
+ VDOA23_VID_MASK, TPS65086_LDOA2CTRL, BIT(0),
+ tps65086_ldoa23_ranges, 0, 0),
+ TPS65086_REGULATOR("LDOA3", "ldoa3", LDOA3, 0x10, TPS65086_LDOA3VID,
+ VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
+ tps65086_ldoa23_ranges, 0, 0),
+ TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
+ TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
+ TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
+};
+
+static struct tps65086_regulator tps6508641_regulator_config[] = {
+ TPS65086_REGULATOR("BUCK1", "buck1", BUCK1, 0x80, TPS65086_BUCK1CTRL,
+ BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(0),
+ tps65086_10mv_ranges, TPS65086_BUCK1CTRL,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK2", "buck2", BUCK2, 0x80, TPS65086_BUCK2CTRL,
+ BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(1),
+ tps65086_10mv_ranges, TPS65086_BUCK2CTRL,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK3", "buck3", BUCK3, 0x80, TPS65086_BUCK3VID,
+ BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(2),
+ tps65086_10mv_ranges, TPS65086_BUCK3DECAY,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK4", "buck4", BUCK4, 0x80, TPS65086_BUCK4VID,
+ BUCK_VID_MASK, TPS65086_BUCK4CTRL, BIT(0),
+ tps65086_10mv_ranges, TPS65086_BUCK4VID,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK5", "buck5", BUCK5, 0x80, TPS65086_BUCK5VID,
+ BUCK_VID_MASK, TPS65086_BUCK5CTRL, BIT(0),
+ tps65086_10mv_ranges, TPS65086_BUCK5CTRL,
+ BIT(0)),
+ TPS65086_REGULATOR("BUCK6", "buck6", BUCK6, 0x80, TPS65086_BUCK6VID,
+ BUCK_VID_MASK, TPS65086_BUCK6CTRL, BIT(0),
+ tps65086_10mv_ranges, TPS65086_BUCK6CTRL,
+ BIT(0)),
+ TPS65086_REGULATOR("LDOA1", "ldoa1", LDOA1, 0xF, TPS65086_LDOA1CTRL,
+ VDOA1_VID_MASK, TPS65086_SWVTT_EN, BIT(7),
+ tps65086_ldoa1_ranges, 0, 0),
+ TPS65086_REGULATOR("LDOA2", "ldoa2", LDOA2, 0x10, TPS65086_LDOA2VID,
+ VDOA23_VID_MASK, TPS65086_LDOA2CTRL, BIT(0),
+ tps65086_ldoa23_ranges, 0, 0),
+ TPS65086_REGULATOR("LDOA3", "ldoa3", LDOA3, 0x10, TPS65086_LDOA3VID,
+ VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
+ tps65086_ldoa23_ranges, 0, 0),
+ TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
+ TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
+ TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
+};
+
+static struct tps65086_regulator tps65086470_regulator_config[] = {
TPS65086_REGULATOR("BUCK1", "buck1", BUCK1, 0x80, TPS65086_BUCK1CTRL,
BUCK_VID_MASK, TPS65086_BUCK123CTRL, BIT(0),
tps65086_10mv_ranges, TPS65086_BUCK1CTRL,
@@ -148,16 +286,25 @@ static struct tps65086_regulator regulators[] = {
TPS65086_REGULATOR("LDOA3", "ldoa3", LDOA3, 0x10, TPS65086_LDOA3VID,
VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
tps65086_ldoa23_ranges, 0, 0),
+ TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
TPS65086_SWITCH("SWB2", "swb2", SWB2, TPS65086_SWVTT_EN, BIT(7)),
- TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
+};
+
+static const struct tps65086_regulator_config regulator_configs[] = {
+ TPS65086_REGULATOR_CONFIG(TPS6508640, tps6508640_regulator_config),
+ TPS65086_REGULATOR_CONFIG(TPS65086401, tps65086401_regulator_config),
+ TPS65086_REGULATOR_CONFIG(TPS6508641, tps6508641_regulator_config),
+ TPS65086_REGULATOR_CONFIG(TPS65086470, tps65086470_regulator_config)
};
static int tps65086_of_parse_cb(struct device_node *node,
const struct regulator_desc *desc,
struct regulator_config *config)
{
+ struct tps65086 * const tps = dev_get_drvdata(config->dev);
+ struct tps65086_regulator *regulators = tps->reg_config->config;
int ret;
/* Check for 25mV step mode */
@@ -203,9 +350,30 @@ static int tps65086_regulator_probe(struct platform_device *pdev)
{
struct tps65086 *tps = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
+ unsigned int selector_reg_config;
struct regulator_dev *rdev;
int i;
+ /* Select regulator configuration for used PMIC device */
+ switch (tps->chip_id) {
+ case TPS6508640_ID:
+ selector_reg_config = TPS6508640;
+ break;
+ case TPS65086401_ID:
+ selector_reg_config = TPS65086401;
+ break;
+ case TPS6508641_ID:
+ selector_reg_config = TPS6508641;
+ break;
+ case TPS65086470_ID:
+ selector_reg_config = TPS65086470;
+ break;
+ default:
+ dev_err(tps->dev, "Unknown device ID. Cannot determine regulator config.\n");
+ return -ENODEV;
+ }
+ tps->reg_config = &regulator_configs[selector_reg_config];
+
platform_set_drvdata(pdev, tps);
config.dev = &pdev->dev;
@@ -213,12 +381,16 @@ static int tps65086_regulator_probe(struct platform_device *pdev)
config.driver_data = tps;
config.regmap = tps->regmap;
- for (i = 0; i < ARRAY_SIZE(regulators); i++) {
- rdev = devm_regulator_register(&pdev->dev, &regulators[i].desc,
- &config);
+ for (i = 0; i < tps->reg_config->num_elems; ++i) {
+ struct regulator_desc * const desc_ptr = &tps->reg_config->config[i].desc;
+
+ dev_dbg(tps->dev, "Index: %u; Regulator name: \"%s\"; Regulator ID: %d\n",
+ i, desc_ptr->name, desc_ptr->id);
+
+ rdev = devm_regulator_register(&pdev->dev, desc_ptr, &config);
if (IS_ERR(rdev)) {
- dev_err(tps->dev, "failed to register %s regulator\n",
- pdev->name);
+ dev_err(tps->dev, "failed to register %d \"%s\" regulator\n",
+ i, desc_ptr->name);
return PTR_ERR(rdev);
}
}
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 13985883e5f0..f44b5767099c 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -8,12 +8,12 @@
*/
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/driver.h>
diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
index 8971b507a79a..b4065356392f 100644
--- a/drivers/regulator/tps65219-regulator.c
+++ b/drivers/regulator/tps65219-regulator.c
@@ -15,8 +15,8 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/driver.h>
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 2a0965ba1570..3a3027e0b94e 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -17,7 +17,6 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/mfd/tps65910.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c
index d5a574ec6d12..25ef102c8270 100644
--- a/drivers/regulator/tps6594-regulator.c
+++ b/drivers/regulator/tps6594-regulator.c
@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 3e724f5345de..5bacfcebf59a 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -12,7 +12,6 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index f9c695f9bde8..6eed0f6e0adb 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -13,7 +13,6 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
index 7e2785e10dc6..1d8304b88bd6 100644
--- a/drivers/regulator/uniphier-regulator.c
+++ b/drivers/regulator/uniphier-regulator.c
@@ -7,7 +7,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
index 85dca90233f6..2796580a3a3c 100644
--- a/drivers/regulator/vctrl-regulator.c
+++ b/drivers/regulator/vctrl-regulator.c
@@ -10,7 +10,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/regulator/vexpress-regulator.c b/drivers/regulator/vexpress-regulator.c
index b545dbc70a4d..6687077e9a97 100644
--- a/drivers/regulator/vexpress-regulator.c
+++ b/drivers/regulator/vexpress-regulator.c
@@ -8,7 +8,8 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 50a5ff70814a..215597f73be4 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3636,11 +3636,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* so sync bdev first and then wait for our queues to become
* empty
*/
- if (device->block) {
- rc = fsync_bdev(device->block->bdev);
- if (rc != 0)
- goto interrupted;
- }
+ if (device->block)
+ bdev_mark_dead(device->block->bdev, false);
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,
_wait_for_empty_queues(device));
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 09acf3853a77..06bcb6c78909 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -412,6 +412,7 @@ removeseg:
}
list_del(&dev_info->lh);
+ dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
@@ -707,9 +708,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
goto out;
out_dax_host:
+ put_device(&dev_info->dev);
dax_remove_host(dev_info->gd);
out_dax:
- put_device(&dev_info->dev);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
put_dev:
@@ -789,6 +790,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
}
list_del(&dev_info->lh);
+ dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
@@ -860,7 +862,7 @@ dcssblk_submit_bio(struct bio *bio)
struct bio_vec bvec;
struct bvec_iter iter;
unsigned long index;
- unsigned long page_addr;
+ void *page_addr;
unsigned long source_addr;
unsigned long bytes_done;
@@ -868,8 +870,8 @@ dcssblk_submit_bio(struct bio *bio)
dev_info = bio->bi_bdev->bd_disk->private_data;
if (dev_info == NULL)
goto fail;
- if ((bio->bi_iter.bi_sector & 7) != 0 ||
- (bio->bi_iter.bi_size & 4095) != 0)
+ if (!IS_ALIGNED(bio->bi_iter.bi_sector, 8) ||
+ !IS_ALIGNED(bio->bi_iter.bi_size, PAGE_SIZE))
/* Request is not page-aligned. */
goto fail;
/* verify data transfer direction */
@@ -889,18 +891,16 @@ dcssblk_submit_bio(struct bio *bio)
index = (bio->bi_iter.bi_sector >> 3);
bio_for_each_segment(bvec, bio, iter) {
- page_addr = (unsigned long)bvec_virt(&bvec);
+ page_addr = bvec_virt(&bvec);
source_addr = dev_info->start + (index<<12) + bytes_done;
- if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
+ if (unlikely(!IS_ALIGNED((unsigned long)page_addr, PAGE_SIZE) ||
+ !IS_ALIGNED(bvec.bv_len, PAGE_SIZE)))
// More paranoia.
goto fail;
- if (bio_data_dir(bio) == READ) {
- memcpy((void*)page_addr, (void*)source_addr,
- bvec.bv_len);
- } else {
- memcpy((void*)source_addr, (void*)page_addr,
- bvec.bv_len);
- }
+ if (bio_data_dir(bio) == READ)
+ memcpy(page_addr, __va(source_addr), bvec.bv_len);
+ else
+ memcpy(__va(source_addr), page_addr, bvec.bv_len);
bytes_done += bvec.bv_len;
}
bio_endio(bio);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 0c1df1d5f1ac..3a9cc8a4a230 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -134,7 +134,7 @@ static void scm_request_done(struct scm_request *scmrq)
if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
IS_ALIGNED(aidaw, PAGE_SIZE))
- mempool_free(virt_to_page(aidaw), aidaw_pool);
+ mempool_free(virt_to_page((void *)aidaw), aidaw_pool);
}
spin_lock_irqsave(&list_lock, flags);
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 3c87057436d5..8b4575a0db9f 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -392,10 +392,6 @@ static void __init add_memory_merged(u16 rn)
goto skip_add;
start = rn2addr(first_rn);
size = (unsigned long long) num * sclp.rzm;
- if (start >= VMEM_MAX_PHYS)
- goto skip_add;
- if (start + size > VMEM_MAX_PHYS)
- size = VMEM_MAX_PHYS - start;
if (start >= ident_map_size)
goto skip_add;
if (start + size > ident_map_size)
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index f480d6c7fd39..fdc8668f3fba 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -55,6 +55,7 @@ static void __init sclp_early_facilities_detect(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
if (sccb->cpuoff > 134) {
sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
+ sclp.has_diag320 = !!(sccb->byte_134 & 0x04);
sclp.has_iplcc = !!(sccb->byte_134 & 0x02);
}
if (sccb->cpuoff > 137) {
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 4cebfaaa22b4..eb0520a9d4af 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -89,7 +89,7 @@ static void vmcp_response_free(struct vmcp_session *session)
order = get_order(session->bufsize);
nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
if (session->cma_alloc) {
- page = virt_to_page((unsigned long)session->response);
+ page = virt_to_page(session->response);
cma_release(vmcp_cma, page, nr_pages);
session->cma_alloc = 0;
} else {
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 942c73a11ca3..bc3be0330f1d 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -3,7 +3,7 @@
* zcore module to export memory content and register sets for creating system
* dumps on SCSI/NVMe disks (zfcp/nvme dump).
*
- * For more information please refer to Documentation/s390/zfcpdump.rst
+ * For more information please refer to Documentation/arch/s390/zfcpdump.rst
*
* Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 22d2db690cd3..0edacd101c12 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -11,7 +11,7 @@ zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
-obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
+obj-$(CONFIG_ZCRYPT) += zcrypt_cex4.o
# pkey kernel module
pkey-objs := pkey_api.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 420120be300f..339812efe822 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright IBM Corp. 2006, 2021
+ * Copyright IBM Corp. 2006, 2023
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -219,6 +219,15 @@ int ap_sb_available(void)
}
/*
+ * ap_is_se_guest(): Check for SE guest with AP pass-through support.
+ */
+bool ap_is_se_guest(void)
+{
+ return is_prot_virt_guest() && ap_sb_available();
+}
+EXPORT_SYMBOL(ap_is_se_guest);
+
+/*
* ap_fetch_qci_info(): Fetch cryptographic config info
*
* Returns the ap configuration info fetched via PQAP(QCI).
@@ -387,23 +396,6 @@ static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
*q_ml = tapq_info.ml;
*q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
*q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
- switch (*q_type) {
- /* For CEX2 and CEX3 the available functions
- * are not reflected by the facilities bits.
- * Instead it is coded into the type. So here
- * modify the function bits based on the type.
- */
- case AP_DEVICE_TYPE_CEX2A:
- case AP_DEVICE_TYPE_CEX3A:
- *q_fac |= 0x08000000;
- break;
- case AP_DEVICE_TYPE_CEX2C:
- case AP_DEVICE_TYPE_CEX3C:
- *q_fac |= 0x10000000;
- break;
- default:
- break;
- }
return 1;
default:
/*
@@ -1678,8 +1670,8 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
{
int comp_type = 0;
- /* < CEX2A is not supported */
- if (rawtype < AP_DEVICE_TYPE_CEX2A) {
+ /* < CEX4 is not supported */
+ if (rawtype < AP_DEVICE_TYPE_CEX4) {
AP_DBF_WARN("%s queue=%02x.%04x unsupported type %d\n",
__func__, AP_QID_CARD(qid),
AP_QID_QUEUE(qid), rawtype);
@@ -1701,7 +1693,7 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
apinfo.cat = AP_DEVICE_TYPE_CEX8;
status = ap_qact(qid, 0, &apinfo);
if (status.response_code == AP_RESPONSE_NORMAL &&
- apinfo.cat >= AP_DEVICE_TYPE_CEX2A &&
+ apinfo.cat >= AP_DEVICE_TYPE_CEX4 &&
apinfo.cat <= AP_DEVICE_TYPE_CEX8)
comp_type = apinfo.cat;
}
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 0d7b7eb374ad..be54b070c031 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright IBM Corp. 2006, 2019
+ * Copyright IBM Corp. 2006, 2023
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -67,15 +67,8 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
#define AP_RESPONSE_INVALID_DOMAIN 0x42
/*
- * Known device types
+ * Supported AP device types
*/
-#define AP_DEVICE_TYPE_PCICC 3
-#define AP_DEVICE_TYPE_PCICA 4
-#define AP_DEVICE_TYPE_PCIXCC 5
-#define AP_DEVICE_TYPE_CEX2A 6
-#define AP_DEVICE_TYPE_CEX2C 7
-#define AP_DEVICE_TYPE_CEX3A 8
-#define AP_DEVICE_TYPE_CEX3C 9
#define AP_DEVICE_TYPE_CEX4 10
#define AP_DEVICE_TYPE_CEX5 11
#define AP_DEVICE_TYPE_CEX6 12
@@ -272,14 +265,6 @@ static inline void ap_release_message(struct ap_message *ap_msg)
kfree_sensitive(ap_msg->private);
}
-/*
- * Note: don't use ap_send/ap_recv after using ap_queue_message
- * for the first time. Otherwise the ap message queue will get
- * confused.
- */
-int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen);
-int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen);
-
enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event);
enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
@@ -289,6 +274,7 @@ void ap_flush_queue(struct ap_queue *aq);
void *ap_airq_ptr(void);
int ap_sb_available(void);
+bool ap_is_se_guest(void);
void ap_wait(enum ap_sm_wait wait);
void ap_request_timeout(struct timer_list *t);
void ap_bus_force_rescan(void);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 30df83735adf..1336e632adc4 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright IBM Corp. 2016
+ * Copyright IBM Corp. 2016, 2023
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* Adjunct processor bus, queue related code.
@@ -93,51 +93,6 @@ __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
return ap_nqap(qid, psmid, msg, msglen);
}
-int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen)
-{
- struct ap_queue_status status;
-
- status = __ap_send(qid, psmid, msg, msglen, 0);
- if (status.async)
- return -EPERM;
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- return 0;
- case AP_RESPONSE_Q_FULL:
- case AP_RESPONSE_RESET_IN_PROGRESS:
- return -EBUSY;
- case AP_RESPONSE_REQ_FAC_NOT_INST:
- return -EINVAL;
- default: /* Device is gone. */
- return -ENODEV;
- }
-}
-EXPORT_SYMBOL(ap_send);
-
-int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen)
-{
- struct ap_queue_status status;
-
- if (!msg)
- return -EINVAL;
- status = ap_dqap(qid, psmid, msg, msglen, NULL, NULL, NULL);
- if (status.async)
- return -EPERM;
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- return 0;
- case AP_RESPONSE_NO_PENDING_REPLY:
- if (status.queue_empty)
- return -ENOENT;
- return -EBUSY;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- return -EBUSY;
- default:
- return -ENODEV;
- }
-}
-EXPORT_SYMBOL(ap_recv);
-
/* State machine definitions and helpers */
static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index e58bfd225323..6cfb6b2340c9 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -263,7 +263,9 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
/* build a list of apqns suitable for ep11 keys with cpacf support */
rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7, EP11_API_V, NULL);
+ ZCRYPT_CEX7,
+ ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
+ NULL);
if (rc)
goto out;
@@ -272,7 +274,8 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
card = apqns[i] >> 16;
dom = apqns[i] & 0xFFFF;
rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
- 0, clrkey, keybuf, keybuflen);
+ 0, clrkey, keybuf, keybuflen,
+ PKEY_TYPE_EP11);
if (rc == 0)
break;
}
@@ -287,10 +290,9 @@ out:
/*
* Find card and transform EP11 secure key into protected key.
*/
-static int pkey_ep11key2pkey(const u8 *key, u8 *protkey,
- u32 *protkeylen, u32 *protkeytype)
+static int pkey_ep11key2pkey(const u8 *key, size_t keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
- struct ep11keyblob *kb = (struct ep11keyblob *)key;
u32 nr_apqns, *apqns = NULL;
u16 card, dom;
int i, rc;
@@ -299,7 +301,9 @@ static int pkey_ep11key2pkey(const u8 *key, u8 *protkey,
/* build a list of apqns suitable for this key */
rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+ ZCRYPT_CEX7,
+ ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
+ ep11_kb_wkvp(key, keylen));
if (rc)
goto out;
@@ -307,7 +311,7 @@ static int pkey_ep11key2pkey(const u8 *key, u8 *protkey,
for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
card = apqns[i] >> 16;
dom = apqns[i] & 0xFFFF;
- rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
+ rc = ep11_kblob2protkey(card, dom, key, keylen,
protkey, protkeylen, protkeytype);
if (rc == 0)
break;
@@ -495,7 +499,7 @@ try_via_ep11:
tmpbuf, &tmpbuflen);
if (rc)
goto failure;
- rc = pkey_ep11key2pkey(tmpbuf,
+ rc = pkey_ep11key2pkey(tmpbuf, tmpbuflen,
protkey, protkeylen, protkeytype);
if (!rc)
goto out;
@@ -611,7 +615,7 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
if (rc)
goto out;
- rc = pkey_ep11key2pkey(key,
+ rc = pkey_ep11key2pkey(key, keylen,
protkey, protkeylen, protkeytype);
break;
}
@@ -620,7 +624,7 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1);
if (rc)
goto out;
- rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header),
+ rc = pkey_ep11key2pkey(key, keylen,
protkey, protkeylen, protkeytype);
break;
default:
@@ -713,6 +717,11 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (*keybufsize < MINEP11AESKEYBLOBSIZE)
return -EINVAL;
break;
+ case PKEY_TYPE_EP11_AES:
+ if (*keybufsize < (sizeof(struct ep11kblob_header) +
+ MINEP11AESKEYBLOBSIZE))
+ return -EINVAL;
+ break;
default:
return -EINVAL;
}
@@ -729,9 +738,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
- if (ktype == PKEY_TYPE_EP11) {
+ if (ktype == PKEY_TYPE_EP11 ||
+ ktype == PKEY_TYPE_EP11_AES) {
rc = ep11_genaeskey(card, dom, ksize, kflags,
- keybuf, keybufsize);
+ keybuf, keybufsize, ktype);
} else if (ktype == PKEY_TYPE_CCA_DATA) {
rc = cca_genseckey(card, dom, ksize, keybuf);
*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
@@ -769,6 +779,11 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (*keybufsize < MINEP11AESKEYBLOBSIZE)
return -EINVAL;
break;
+ case PKEY_TYPE_EP11_AES:
+ if (*keybufsize < (sizeof(struct ep11kblob_header) +
+ MINEP11AESKEYBLOBSIZE))
+ return -EINVAL;
+ break;
default:
return -EINVAL;
}
@@ -787,9 +802,11 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
- if (ktype == PKEY_TYPE_EP11) {
+ if (ktype == PKEY_TYPE_EP11 ||
+ ktype == PKEY_TYPE_EP11_AES) {
rc = ep11_clr2keyblob(card, dom, ksize, kflags,
- clrkey, keybuf, keybufsize);
+ clrkey, keybuf, keybufsize,
+ ktype);
} else if (ktype == PKEY_TYPE_CCA_DATA) {
rc = cca_clr2seckey(card, dom, ksize,
clrkey, keybuf);
@@ -888,6 +905,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES) {
struct ep11keyblob *kb = (struct ep11keyblob *)key;
+ int api;
rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
if (rc)
@@ -895,10 +913,12 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
if (ktype)
*ktype = PKEY_TYPE_EP11;
if (ksize)
- *ksize = kb->head.keybitlen;
+ *ksize = kb->head.bitlen;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
- ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+ ZCRYPT_CEX7, api,
+ ep11_kb_wkvp(key, keylen));
if (rc)
goto out;
@@ -908,6 +928,32 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
*cardnr = ((struct pkey_apqn *)_apqns)->card;
*domain = ((struct pkey_apqn *)_apqns)->domain;
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
+ struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
+ int api;
+
+ rc = ep11_check_aes_key_with_hdr(debug_info, 3,
+ key, keylen, 1);
+ if (rc)
+ goto out;
+ if (ktype)
+ *ktype = PKEY_TYPE_EP11_AES;
+ if (ksize)
+ *ksize = kh->bitlen;
+
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+ ZCRYPT_CEX7, api,
+ ep11_kb_wkvp(key, keylen));
+ if (rc)
+ goto out;
+
+ if (flags)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ *cardnr = ((struct pkey_apqn *)_apqns)->card;
+ *domain = ((struct pkey_apqn *)_apqns)->domain;
} else {
rc = -EINVAL;
}
@@ -949,10 +995,12 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
}
} else if (hdr->type == TOKTYPE_NON_CCA) {
if (hdr->version == TOKVER_EP11_AES) {
- if (keylen < sizeof(struct ep11keyblob))
- return -EINVAL;
if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
return -EINVAL;
+ } else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
+ if (ep11_check_aes_key_with_hdr(debug_info, 3,
+ key, keylen, 1))
+ return -EINVAL;
} else {
return pkey_nonccatok2pkey(key, keylen,
protkey, protkeylen,
@@ -980,10 +1028,7 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
protkey, protkeylen,
protkeytype);
} else {
- /* EP11 AES secure key blob */
- struct ep11keyblob *kb = (struct ep11keyblob *)key;
-
- rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
+ rc = ep11_kblob2protkey(card, dom, key, keylen,
protkey, protkeylen,
protkeytype);
}
@@ -1018,7 +1063,7 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
return -EINVAL;
if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
minhwtype = ZCRYPT_CEX7;
- api = EP11_API_V;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
}
rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, api, kb->wkvp);
@@ -1034,7 +1079,7 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
return -EINVAL;
if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
minhwtype = ZCRYPT_CEX7;
- api = EP11_API_V;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
}
rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, api, kb->wkvp);
@@ -1144,11 +1189,13 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
ktype == PKEY_TYPE_EP11_AES ||
ktype == PKEY_TYPE_EP11_ECC) {
u8 *wkvp = NULL;
+ int api;
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
wkvp = cur_mkvp;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7, EP11_API_V, wkvp);
+ ZCRYPT_CEX7, api, wkvp);
if (rc)
goto out;
@@ -1243,12 +1290,14 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header)))
rc = ep11_kblob2protkey(card, dom, key, hdr->len,
- protkey, protkeylen, protkeytype);
+ protkey, protkeylen,
+ protkeytype);
else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES &&
is_ep11_keyblob(key))
rc = ep11_kblob2protkey(card, dom, key, hdr->len,
- protkey, protkeylen, protkeytype);
+ protkey, protkeylen,
+ protkeytype);
else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
hdr->version == TOKVER_CCA_AES)
rc = cca_sec2protkey(card, dom, key, protkey,
@@ -1466,7 +1515,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
if (IS_ERR(apqns))
return PTR_ERR(apqns);
- kkey = kmalloc(klen, GFP_KERNEL);
+ kkey = kzalloc(klen, GFP_KERNEL);
if (!kkey) {
kfree(apqns);
return -ENOMEM;
@@ -1508,7 +1557,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
if (IS_ERR(apqns))
return PTR_ERR(apqns);
- kkey = kmalloc(klen, GFP_KERNEL);
+ kkey = kzalloc(klen, GFP_KERNEL);
if (!kkey) {
kfree(apqns);
return -ENOMEM;
@@ -2102,7 +2151,7 @@ static struct attribute_group ccacipher_attr_group = {
* (i.e. off != 0 or count < key blob size) -EINVAL is returned.
* This function and the sysfs attributes using it provide EP11 key blobs
* padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
- * 320 bytes.
+ * 336 bytes.
*/
static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
bool is_xts, char *buf, loff_t off,
@@ -2120,7 +2169,9 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
/* build a list of apqns able to generate an cipher key */
rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7, EP11_API_V, NULL);
+ ZCRYPT_CEX7,
+ ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
+ NULL);
if (rc)
return rc;
@@ -2130,7 +2181,8 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i] >> 16;
dom = apqns[i] & 0xFFFF;
- rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+ rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
+ PKEY_TYPE_EP11_AES);
if (rc == 0)
break;
}
@@ -2140,7 +2192,8 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
if (is_xts) {
keysize = MAXEP11AESKEYBLOBSIZE;
buf += MAXEP11AESKEYBLOBSIZE;
- rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+ rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
+ PKEY_TYPE_EP11_AES);
if (rc == 0)
return 2 * MAXEP11AESKEYBLOBSIZE;
}
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index b441745b0418..0509f80622cd 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -30,13 +30,12 @@
#define AP_QUEUE_UNASSIGNED "unassigned"
#define AP_QUEUE_IN_USE "in use"
-#define MAX_RESET_CHECK_WAIT 200 /* Sleep max 200ms for reset check */
#define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */
static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
-static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
+static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
/**
* get_update_locks_for_kvm: Acquire the locks required to dynamically update a
@@ -360,6 +359,28 @@ static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
return 0;
}
+static int ensure_nib_shared(unsigned long addr, struct gmap *gmap)
+{
+ int ret;
+
+ /*
+ * The nib has to be located in shared storage since guest and
+ * host access it. vfio_pin_pages() will do a pin shared and
+ * if that fails (possibly because it's not a shared page) it
+ * calls export. We try to do a second pin shared here so that
+ * the UV gives us an error code if we try to pin a non-shared
+ * page.
+ *
+ * If the page is already pinned shared the UV will return a success.
+ */
+ ret = uv_pin_shared(addr);
+ if (ret) {
+ /* vfio_pin_pages() likely exported the page so let's re-import */
+ gmap_convert_to_secure(gmap, addr);
+ }
+ return ret;
+}
+
/**
* vfio_ap_irq_enable - Enable Interruption for a APQN
*
@@ -423,6 +444,14 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
aqic_gisa.gisc = isc;
+ /* NIB in non-shared storage is a rc 6 for PV guests */
+ if (kvm_s390_pv_cpu_is_protected(vcpu) &&
+ ensure_nib_shared(h_nib & PAGE_MASK, kvm->arch.gmap)) {
+ vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
+ status.response_code = AP_RESPONSE_INVALID_ADDRESS;
+ return status;
+ }
+
nisc = kvm_s390_gisc_register(kvm, isc);
if (nisc < 0) {
VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
@@ -675,7 +704,7 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
*/
apqn = AP_MKQID(apid, apqi);
q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
- if (!q || q->reset_rc) {
+ if (!q || q->reset_status.response_code) {
clear_bit_inv(apid,
matrix_mdev->shadow_apcb.apm);
break;
@@ -1608,19 +1637,21 @@ static int apq_status_check(int apqn, struct ap_queue_status *status)
{
switch (status->response_code) {
case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_DECONFIGURED:
+ return 0;
case AP_RESPONSE_RESET_IN_PROGRESS:
- if (status->queue_empty && !status->irq_enabled)
- return 0;
+ case AP_RESPONSE_BUSY:
return -EBUSY;
- case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE:
+ case AP_RESPONSE_ASSOC_FAILED:
/*
- * If the AP queue is deconfigured, any subsequent AP command
- * targeting the queue will fail with the same response code. On the
- * other hand, when an AP adapter is deconfigured, the associated
- * queues are reset, so let's return a value indicating the reset
- * for which we're waiting completed successfully.
+ * These asynchronous response codes indicate a PQAP(AAPQ)
+ * instruction to associate a secret with the guest failed. All
+ * subsequent AP instructions will end with the asynchronous
+ * response code until the AP queue is reset; so, let's return
+ * a value indicating a reset needs to be performed again.
*/
- return 0;
+ return -EAGAIN;
default:
WARN(true,
"failed to verify reset of queue %02x.%04x: TAPQ rc=%u\n",
@@ -1630,91 +1661,105 @@ static int apq_status_check(int apqn, struct ap_queue_status *status)
}
}
-static int apq_reset_check(struct vfio_ap_queue *q)
+#define WAIT_MSG "Waited %dms for reset of queue %02x.%04x (%u, %u, %u)"
+
+static void apq_reset_check(struct work_struct *reset_work)
{
- int ret;
- int iters = MAX_RESET_CHECK_WAIT / AP_RESET_INTERVAL;
+ int ret = -EBUSY, elapsed = 0;
struct ap_queue_status status;
+ struct vfio_ap_queue *q;
- for (; iters > 0; iters--) {
+ q = container_of(reset_work, struct vfio_ap_queue, reset_work);
+ memcpy(&status, &q->reset_status, sizeof(status));
+ while (true) {
msleep(AP_RESET_INTERVAL);
+ elapsed += AP_RESET_INTERVAL;
status = ap_tapq(q->apqn, NULL);
ret = apq_status_check(q->apqn, &status);
- if (ret != -EBUSY)
- return ret;
+ if (ret == -EIO)
+ return;
+ if (ret == -EBUSY) {
+ pr_notice_ratelimited(WAIT_MSG, elapsed,
+ AP_QID_CARD(q->apqn),
+ AP_QID_QUEUE(q->apqn),
+ status.response_code,
+ status.queue_empty,
+ status.irq_enabled);
+ } else {
+ if (q->reset_status.response_code == AP_RESPONSE_RESET_IN_PROGRESS ||
+ q->reset_status.response_code == AP_RESPONSE_BUSY ||
+ q->reset_status.response_code == AP_RESPONSE_STATE_CHANGE_IN_PROGRESS ||
+ ret == -EAGAIN) {
+ status = ap_zapq(q->apqn, 0);
+ memcpy(&q->reset_status, &status, sizeof(status));
+ continue;
+ }
+ /*
+ * When an AP adapter is deconfigured, the
+ * associated queues are reset, so let's set the
+ * status response code to 0 so the queue may be
+ * passed through (i.e., not filtered)
+ */
+ if (status.response_code == AP_RESPONSE_DECONFIGURED)
+ q->reset_status.response_code = 0;
+ if (q->saved_isc != VFIO_AP_ISC_INVALID)
+ vfio_ap_free_aqic_resources(q);
+ break;
+ }
}
- WARN_ONCE(iters <= 0,
- "timeout verifying reset of queue %02x.%04x (%u, %u, %u)",
- AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
- status.queue_empty, status.irq_enabled, status.response_code);
- return ret;
}
-static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
+static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
{
struct ap_queue_status status;
- int ret;
if (!q)
- return 0;
-retry_zapq:
+ return;
status = ap_zapq(q->apqn, 0);
- q->reset_rc = status.response_code;
+ memcpy(&q->reset_status, &status, sizeof(status));
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- ret = 0;
- /* if the reset has not completed, wait for it to take effect */
- if (!status.queue_empty || status.irq_enabled)
- ret = apq_reset_check(q);
- break;
case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
/*
- * There is a reset issued by another process in progress. Let's wait
- * for that to complete. Since we have no idea whether it was a RAPQ or
- * ZAPQ, then if it completes successfully, let's issue the ZAPQ.
+ * Let's verify whether the ZAPQ completed successfully on a work queue.
*/
- ret = apq_reset_check(q);
- if (ret)
- break;
- goto retry_zapq;
+ queue_work(system_long_wq, &q->reset_work);
+ break;
case AP_RESPONSE_DECONFIGURED:
/*
* When an AP adapter is deconfigured, the associated
- * queues are reset, so let's return a value indicating the reset
- * completed successfully.
+ * queues are reset, so let's set the status response code to 0
+ * so the queue may be passed through (i.e., not filtered).
*/
- ret = 0;
+ q->reset_status.response_code = 0;
+ vfio_ap_free_aqic_resources(q);
break;
default:
WARN(true,
"PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
status.response_code);
- return -EIO;
}
-
- vfio_ap_free_aqic_resources(q);
-
- return ret;
}
static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
{
- int ret, loop_cursor, rc = 0;
+ int ret = 0, loop_cursor;
struct vfio_ap_queue *q;
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode)
+ vfio_ap_mdev_reset_queue(q);
+
hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
- ret = vfio_ap_mdev_reset_queue(q);
- /*
- * Regardless whether a queue turns out to be busy, or
- * is not operational, we need to continue resetting
- * the remaining queues.
- */
- if (ret)
- rc = ret;
+ flush_work(&q->reset_work);
+
+ if (q->reset_status.response_code)
+ ret = -EIO;
}
- return rc;
+ return ret;
}
static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
@@ -2038,6 +2083,8 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
q->apqn = to_ap_queue(&apdev->device)->qid;
q->saved_isc = VFIO_AP_ISC_INVALID;
+ memset(&q->reset_status, 0, sizeof(q->reset_status));
+ INIT_WORK(&q->reset_work, apq_reset_check);
matrix_mdev = get_update_locks_by_apqn(q->apqn);
if (matrix_mdev) {
@@ -2087,6 +2134,7 @@ void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
}
vfio_ap_mdev_reset_queue(q);
+ flush_work(&q->reset_work);
dev_set_drvdata(&apdev->device, NULL);
kfree(q);
release_update_locks_for_mdev(matrix_mdev);
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index 4642bbdbd1b2..88aff8b81f2f 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -133,7 +133,8 @@ struct ap_matrix_mdev {
* @apqn: the APQN of the AP queue device
* @saved_isc: the guest ISC registered with the GIB interface
* @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable
- * @reset_rc: the status response code from the last reset of the queue
+ * @reset_status: the status from the last reset of the queue
+ * @reset_work: work to wait for queue reset to complete
*/
struct vfio_ap_queue {
struct ap_matrix_mdev *matrix_mdev;
@@ -142,7 +143,8 @@ struct vfio_ap_queue {
#define VFIO_AP_ISC_INVALID 0xff
unsigned char saved_isc;
struct hlist_node mdev_qnode;
- unsigned int reset_rc;
+ struct ap_queue_status reset_status;
+ struct work_struct reset_work;
};
int vfio_ap_mdev_register(void);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 83f692c9c197..e69de29bb2d1 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -1,227 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright IBM Corp. 2001, 2012
- * Author(s): Robert Burroughs
- * Eric Rossman (edrossma@us.ibm.com)
- *
- * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- * Ralph Wuerthner <rwuerthn@de.ibm.com>
- * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/mod_devicetable.h>
-
-#include "ap_bus.h"
-#include "zcrypt_api.h"
-#include "zcrypt_error.h"
-#include "zcrypt_cex2a.h"
-#include "zcrypt_msgtype50.h"
-
-#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
-#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
-#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
-#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
-
-#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
-#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
-
-#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
- * (max outputdatalength) +
- * type80_hdr
- */
-#define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg)
-
-#define CEX2A_CLEANUP_TIME (15 * HZ)
-#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
-
-MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("CEX2A/CEX3A Cryptographic Coprocessor device driver, " \
- "Copyright IBM Corp. 2001, 2018");
-MODULE_LICENSE("GPL");
-
-static struct ap_device_id zcrypt_cex2a_card_ids[] = {
- { .dev_type = AP_DEVICE_TYPE_CEX2A,
- .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
- { .dev_type = AP_DEVICE_TYPE_CEX3A,
- .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids);
-
-static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
- { .dev_type = AP_DEVICE_TYPE_CEX2A,
- .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
- { .dev_type = AP_DEVICE_TYPE_CEX3A,
- .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
-
-/*
- * Probe function for CEX2A card devices. It always accepts the AP device
- * since the bus_match already checked the card type.
- * @ap_dev: pointer to the AP device.
- */
-static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
-{
- /*
- * Normalized speed ratings per crypto adapter
- * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
- */
- static const int CEX2A_SPEED_IDX[] = {
- 800, 1000, 2000, 900, 1200, 2400, 0, 0};
- static const int CEX3A_SPEED_IDX[] = {
- 400, 500, 1000, 450, 550, 1200, 0, 0};
-
- struct ap_card *ac = to_ap_card(&ap_dev->device);
- struct zcrypt_card *zc;
- int rc = 0;
-
- zc = zcrypt_card_alloc();
- if (!zc)
- return -ENOMEM;
- zc->card = ac;
- dev_set_drvdata(&ap_dev->device, zc);
-
- if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
- zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
- zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
- zc->speed_rating = CEX2A_SPEED_IDX;
- zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
- zc->type_string = "CEX2A";
- zc->user_space_type = ZCRYPT_CEX2A;
- } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) {
- zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
- zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
- zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
- if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
- ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
- zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
- zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
- }
- zc->speed_rating = CEX3A_SPEED_IDX;
- zc->type_string = "CEX3A";
- zc->user_space_type = ZCRYPT_CEX3A;
- } else {
- zcrypt_card_free(zc);
- return -ENODEV;
- }
- zc->online = 1;
-
- rc = zcrypt_card_register(zc);
- if (rc)
- zcrypt_card_free(zc);
-
- return rc;
-}
-
-/*
- * This is called to remove the CEX2A card driver information
- * if an AP card device is removed.
- */
-static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
-{
- struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
-
- zcrypt_card_unregister(zc);
-}
-
-static struct ap_driver zcrypt_cex2a_card_driver = {
- .probe = zcrypt_cex2a_card_probe,
- .remove = zcrypt_cex2a_card_remove,
- .ids = zcrypt_cex2a_card_ids,
- .flags = AP_DRIVER_FLAG_DEFAULT,
-};
-
-/*
- * Probe function for CEX2A queue devices. It always accepts the AP device
- * since the bus_match already checked the queue type.
- * @ap_dev: pointer to the AP device.
- */
-static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
-{
- struct ap_queue *aq = to_ap_queue(&ap_dev->device);
- struct zcrypt_queue *zq = NULL;
- int rc;
-
- switch (ap_dev->device_type) {
- case AP_DEVICE_TYPE_CEX2A:
- zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE);
- if (!zq)
- return -ENOMEM;
- break;
- case AP_DEVICE_TYPE_CEX3A:
- zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE);
- if (!zq)
- return -ENOMEM;
- break;
- }
- if (!zq)
- return -ENODEV;
- zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT);
- zq->queue = aq;
- zq->online = 1;
- atomic_set(&zq->load, 0);
- ap_queue_init_state(aq);
- ap_queue_init_reply(aq, &zq->reply);
- aq->request_timeout = CEX2A_CLEANUP_TIME;
- dev_set_drvdata(&ap_dev->device, zq);
- rc = zcrypt_queue_register(zq);
- if (rc)
- zcrypt_queue_free(zq);
-
- return rc;
-}
-
-/*
- * This is called to remove the CEX2A queue driver information
- * if an AP queue device is removed.
- */
-static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
-{
- struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
-
- zcrypt_queue_unregister(zq);
-}
-
-static struct ap_driver zcrypt_cex2a_queue_driver = {
- .probe = zcrypt_cex2a_queue_probe,
- .remove = zcrypt_cex2a_queue_remove,
- .ids = zcrypt_cex2a_queue_ids,
- .flags = AP_DRIVER_FLAG_DEFAULT,
-};
-
-int __init zcrypt_cex2a_init(void)
-{
- int rc;
-
- rc = ap_driver_register(&zcrypt_cex2a_card_driver,
- THIS_MODULE, "cex2acard");
- if (rc)
- return rc;
-
- rc = ap_driver_register(&zcrypt_cex2a_queue_driver,
- THIS_MODULE, "cex2aqueue");
- if (rc)
- ap_driver_unregister(&zcrypt_cex2a_card_driver);
-
- return rc;
-}
-
-void __exit zcrypt_cex2a_exit(void)
-{
- ap_driver_unregister(&zcrypt_cex2a_queue_driver);
- ap_driver_unregister(&zcrypt_cex2a_card_driver);
-}
-
-module_init(zcrypt_cex2a_init);
-module_exit(zcrypt_cex2a_exit);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
index 7842214d9d09..e69de29bb2d1 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.h
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright IBM Corp. 2001, 2006
- * Author(s): Robert Burroughs
- * Eric Rossman (edrossma@us.ibm.com)
- *
- * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#ifndef _ZCRYPT_CEX2A_H_
-#define _ZCRYPT_CEX2A_H_
-
-/**
- * The type 50 message family is associated with CEXxA cards.
- *
- * The four members of the family are described below.
- *
- * Note that all unsigned char arrays are right-justified and left-padded
- * with zeroes.
- *
- * Note that all reserved fields must be zeroes.
- */
-struct type50_hdr {
- unsigned char reserved1;
- unsigned char msg_type_code; /* 0x50 */
- unsigned short msg_len;
- unsigned char reserved2;
- unsigned char ignored;
- unsigned short reserved3;
-} __packed;
-
-#define TYPE50_TYPE_CODE 0x50
-
-#define TYPE50_MEB1_FMT 0x0001
-#define TYPE50_MEB2_FMT 0x0002
-#define TYPE50_MEB3_FMT 0x0003
-#define TYPE50_CRB1_FMT 0x0011
-#define TYPE50_CRB2_FMT 0x0012
-#define TYPE50_CRB3_FMT 0x0013
-
-/* Mod-Exp, with a small modulus */
-struct type50_meb1_msg {
- struct type50_hdr header;
- unsigned short keyblock_type; /* 0x0001 */
- unsigned char reserved[6];
- unsigned char exponent[128];
- unsigned char modulus[128];
- unsigned char message[128];
-} __packed;
-
-/* Mod-Exp, with a large modulus */
-struct type50_meb2_msg {
- struct type50_hdr header;
- unsigned short keyblock_type; /* 0x0002 */
- unsigned char reserved[6];
- unsigned char exponent[256];
- unsigned char modulus[256];
- unsigned char message[256];
-} __packed;
-
-/* Mod-Exp, with a larger modulus */
-struct type50_meb3_msg {
- struct type50_hdr header;
- unsigned short keyblock_type; /* 0x0003 */
- unsigned char reserved[6];
- unsigned char exponent[512];
- unsigned char modulus[512];
- unsigned char message[512];
-} __packed;
-
-/* CRT, with a small modulus */
-struct type50_crb1_msg {
- struct type50_hdr header;
- unsigned short keyblock_type; /* 0x0011 */
- unsigned char reserved[6];
- unsigned char p[64];
- unsigned char q[64];
- unsigned char dp[64];
- unsigned char dq[64];
- unsigned char u[64];
- unsigned char message[128];
-} __packed;
-
-/* CRT, with a large modulus */
-struct type50_crb2_msg {
- struct type50_hdr header;
- unsigned short keyblock_type; /* 0x0012 */
- unsigned char reserved[6];
- unsigned char p[128];
- unsigned char q[128];
- unsigned char dp[128];
- unsigned char dq[128];
- unsigned char u[128];
- unsigned char message[256];
-} __packed;
-
-/* CRT, with a larger modulus */
-struct type50_crb3_msg {
- struct type50_hdr header;
- unsigned short keyblock_type; /* 0x0013 */
- unsigned char reserved[6];
- unsigned char p[256];
- unsigned char q[256];
- unsigned char dp[256];
- unsigned char dq[256];
- unsigned char u[256];
- unsigned char message[512];
-} __packed;
-
-/**
- * The type 80 response family is associated with a CEXxA cards.
- *
- * Note that all unsigned char arrays are right-justified and left-padded
- * with zeroes.
- *
- * Note that all reserved fields must be zeroes.
- */
-
-#define TYPE80_RSP_CODE 0x80
-
-struct type80_hdr {
- unsigned char reserved1;
- unsigned char type; /* 0x80 */
- unsigned short len;
- unsigned char code; /* 0x00 */
- unsigned char reserved2[3];
- unsigned char reserved3[8];
-} __packed;
-
-int zcrypt_cex2a_init(void);
-void zcrypt_cex2a_exit(void);
-
-#endif /* _ZCRYPT_CEX2A_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 251b5bd3d19c..e69de29bb2d1 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -1,421 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright IBM Corp. 2001, 2018
- * Author(s): Robert Burroughs
- * Eric Rossman (edrossma@us.ibm.com)
- *
- * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- * Ralph Wuerthner <rwuerthn@de.ibm.com>
- * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/mod_devicetable.h>
-
-#include "ap_bus.h"
-#include "zcrypt_api.h"
-#include "zcrypt_error.h"
-#include "zcrypt_msgtype6.h"
-#include "zcrypt_cex2c.h"
-#include "zcrypt_cca_key.h"
-#include "zcrypt_ccamisc.h"
-
-#define CEX2C_MIN_MOD_SIZE 16 /* 128 bits */
-#define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */
-#define CEX3C_MIN_MOD_SIZE 16 /* 128 bits */
-#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
-#define CEX2C_MAX_XCRB_MESSAGE_SIZE (12 * 1024)
-#define CEX2C_CLEANUP_TIME (15 * HZ)
-
-MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("CEX2C/CEX3C Cryptographic Coprocessor device driver, " \
- "Copyright IBM Corp. 2001, 2018");
-MODULE_LICENSE("GPL");
-
-static struct ap_device_id zcrypt_cex2c_card_ids[] = {
- { .dev_type = AP_DEVICE_TYPE_CEX2C,
- .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
- { .dev_type = AP_DEVICE_TYPE_CEX3C,
- .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_card_ids);
-
-static struct ap_device_id zcrypt_cex2c_queue_ids[] = {
- { .dev_type = AP_DEVICE_TYPE_CEX2C,
- .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
- { .dev_type = AP_DEVICE_TYPE_CEX3C,
- .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_queue_ids);
-
-/*
- * CCA card additional device attributes
- */
-static ssize_t cca_serialnr_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct zcrypt_card *zc = dev_get_drvdata(dev);
- struct cca_info ci;
- struct ap_card *ac = to_ap_card(dev);
-
- memset(&ci, 0, sizeof(ci));
-
- if (ap_domain_index >= 0)
- cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
-
- return sysfs_emit(buf, "%s\n", ci.serial);
-}
-
-static struct device_attribute dev_attr_cca_serialnr =
- __ATTR(serialnr, 0444, cca_serialnr_show, NULL);
-
-static struct attribute *cca_card_attrs[] = {
- &dev_attr_cca_serialnr.attr,
- NULL,
-};
-
-static const struct attribute_group cca_card_attr_grp = {
- .attrs = cca_card_attrs,
-};
-
- /*
- * CCA queue additional device attributes
- */
-static ssize_t cca_mkvps_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct zcrypt_queue *zq = dev_get_drvdata(dev);
- int n = 0;
- struct cca_info ci;
- static const char * const cao_state[] = { "invalid", "valid" };
- static const char * const new_state[] = { "empty", "partial", "full" };
-
- memset(&ci, 0, sizeof(ci));
-
- cca_get_info(AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- &ci, zq->online);
-
- if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
- n = sysfs_emit(buf, "AES NEW: %s 0x%016llx\n",
- new_state[ci.new_aes_mk_state - '1'],
- ci.new_aes_mkvp);
- else
- n = sysfs_emit(buf, "AES NEW: - -\n");
-
- if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
- n += sysfs_emit_at(buf, n, "AES CUR: %s 0x%016llx\n",
- cao_state[ci.cur_aes_mk_state - '1'],
- ci.cur_aes_mkvp);
- else
- n += sysfs_emit_at(buf, n, "AES CUR: - -\n");
-
- if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
- n += sysfs_emit_at(buf, n, "AES OLD: %s 0x%016llx\n",
- cao_state[ci.old_aes_mk_state - '1'],
- ci.old_aes_mkvp);
- else
- n += sysfs_emit_at(buf, n, "AES OLD: - -\n");
-
- if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
- n += sysfs_emit_at(buf, n, "APKA NEW: %s 0x%016llx\n",
- new_state[ci.new_apka_mk_state - '1'],
- ci.new_apka_mkvp);
- else
- n += sysfs_emit_at(buf, n, "APKA NEW: - -\n");
-
- if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
- n += sysfs_emit_at(buf, n, "APKA CUR: %s 0x%016llx\n",
- cao_state[ci.cur_apka_mk_state - '1'],
- ci.cur_apka_mkvp);
- else
- n += sysfs_emit_at(buf, n, "APKA CUR: - -\n");
-
- if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
- n += sysfs_emit_at(buf, n, "APKA OLD: %s 0x%016llx\n",
- cao_state[ci.old_apka_mk_state - '1'],
- ci.old_apka_mkvp);
- else
- n += sysfs_emit_at(buf, n, "APKA OLD: - -\n");
-
- return n;
-}
-
-static struct device_attribute dev_attr_cca_mkvps =
- __ATTR(mkvps, 0444, cca_mkvps_show, NULL);
-
-static struct attribute *cca_queue_attrs[] = {
- &dev_attr_cca_mkvps.attr,
- NULL,
-};
-
-static const struct attribute_group cca_queue_attr_grp = {
- .attrs = cca_queue_attrs,
-};
-
-/*
- * Large random number detection function. Its sends a message to a CEX2C/CEX3C
- * card to find out if large random numbers are supported.
- * @ap_dev: pointer to the AP device.
- *
- * Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
- */
-static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
-{
- struct ap_message ap_msg;
- unsigned long psmid;
- unsigned int domain;
- struct {
- struct type86_hdr hdr;
- struct type86_fmt2_ext fmt2;
- struct CPRBX cprbx;
- } __packed *reply;
- struct {
- struct type6_hdr hdr;
- struct CPRBX cprbx;
- char function_code[2];
- short int rule_length;
- char rule[8];
- short int verb_length;
- short int key_length;
- } __packed *msg;
- int rc, i;
-
- ap_init_message(&ap_msg);
- ap_msg.msg = (void *)get_zeroed_page(GFP_KERNEL);
- if (!ap_msg.msg)
- return -ENOMEM;
- ap_msg.bufsize = PAGE_SIZE;
-
- rng_type6cprb_msgx(&ap_msg, 4, &domain);
-
- msg = ap_msg.msg;
- msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
-
- rc = ap_send(aq->qid, 0x0102030405060708UL, ap_msg.msg, ap_msg.len);
- if (rc)
- goto out_free;
-
- /* Wait for the test message to complete. */
- for (i = 0; i < 2 * HZ; i++) {
- msleep(1000 / HZ);
- rc = ap_recv(aq->qid, &psmid, ap_msg.msg, ap_msg.bufsize);
- if (rc == 0 && psmid == 0x0102030405060708UL)
- break;
- }
-
- if (i >= 2 * HZ) {
- /* Got no answer. */
- rc = -ENODEV;
- goto out_free;
- }
-
- reply = ap_msg.msg;
- if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
- rc = 1;
- else
- rc = 0;
-out_free:
- free_page((unsigned long)ap_msg.msg);
- return rc;
-}
-
-/*
- * Probe function for CEX2C/CEX3C card devices. It always accepts the
- * AP device since the bus_match already checked the hardware type.
- * @ap_dev: pointer to the AP card device.
- */
-static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
-{
- /*
- * Normalized speed ratings per crypto adapter
- * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
- */
- static const int CEX2C_SPEED_IDX[] = {
- 1000, 1400, 2400, 1100, 1500, 2600, 100, 12};
- static const int CEX3C_SPEED_IDX[] = {
- 500, 700, 1400, 550, 800, 1500, 80, 10};
-
- struct ap_card *ac = to_ap_card(&ap_dev->device);
- struct zcrypt_card *zc;
- int rc = 0;
-
- zc = zcrypt_card_alloc();
- if (!zc)
- return -ENOMEM;
- zc->card = ac;
- dev_set_drvdata(&ap_dev->device, zc);
- switch (ac->ap_dev.device_type) {
- case AP_DEVICE_TYPE_CEX2C:
- zc->user_space_type = ZCRYPT_CEX2C;
- zc->type_string = "CEX2C";
- zc->speed_rating = CEX2C_SPEED_IDX;
- zc->min_mod_size = CEX2C_MIN_MOD_SIZE;
- zc->max_mod_size = CEX2C_MAX_MOD_SIZE;
- zc->max_exp_bit_length = CEX2C_MAX_MOD_SIZE;
- break;
- case AP_DEVICE_TYPE_CEX3C:
- zc->user_space_type = ZCRYPT_CEX3C;
- zc->type_string = "CEX3C";
- zc->speed_rating = CEX3C_SPEED_IDX;
- zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
- zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
- zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
- break;
- default:
- zcrypt_card_free(zc);
- return -ENODEV;
- }
- zc->online = 1;
-
- rc = zcrypt_card_register(zc);
- if (rc) {
- zcrypt_card_free(zc);
- return rc;
- }
-
- if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
- rc = sysfs_create_group(&ap_dev->device.kobj,
- &cca_card_attr_grp);
- if (rc) {
- zcrypt_card_unregister(zc);
- zcrypt_card_free(zc);
- }
- }
-
- return rc;
-}
-
-/*
- * This is called to remove the CEX2C/CEX3C card driver information
- * if an AP card device is removed.
- */
-static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
-{
- struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
- struct ap_card *ac = to_ap_card(&ap_dev->device);
-
- if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
- sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
-
- zcrypt_card_unregister(zc);
-}
-
-static struct ap_driver zcrypt_cex2c_card_driver = {
- .probe = zcrypt_cex2c_card_probe,
- .remove = zcrypt_cex2c_card_remove,
- .ids = zcrypt_cex2c_card_ids,
- .flags = AP_DRIVER_FLAG_DEFAULT,
-};
-
-/*
- * Probe function for CEX2C/CEX3C queue devices. It always accepts the
- * AP device since the bus_match already checked the hardware type.
- * @ap_dev: pointer to the AP card device.
- */
-static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
-{
- struct ap_queue *aq = to_ap_queue(&ap_dev->device);
- struct zcrypt_queue *zq;
- int rc;
-
- zq = zcrypt_queue_alloc(CEX2C_MAX_XCRB_MESSAGE_SIZE);
- if (!zq)
- return -ENOMEM;
- zq->queue = aq;
- zq->online = 1;
- atomic_set(&zq->load, 0);
- ap_rapq(aq->qid, 0);
- rc = zcrypt_cex2c_rng_supported(aq);
- if (rc < 0) {
- zcrypt_queue_free(zq);
- return rc;
- }
- if (rc)
- zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
- MSGTYPE06_VARIANT_DEFAULT);
- else
- zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
- MSGTYPE06_VARIANT_NORNG);
- ap_queue_init_state(aq);
- ap_queue_init_reply(aq, &zq->reply);
- aq->request_timeout = CEX2C_CLEANUP_TIME;
- dev_set_drvdata(&ap_dev->device, zq);
- rc = zcrypt_queue_register(zq);
- if (rc) {
- zcrypt_queue_free(zq);
- return rc;
- }
-
- if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
- rc = sysfs_create_group(&ap_dev->device.kobj,
- &cca_queue_attr_grp);
- if (rc) {
- zcrypt_queue_unregister(zq);
- zcrypt_queue_free(zq);
- }
- }
-
- return rc;
-}
-
-/*
- * This is called to remove the CEX2C/CEX3C queue driver information
- * if an AP queue device is removed.
- */
-static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
-{
- struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
- struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-
- if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
- sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
-
- zcrypt_queue_unregister(zq);
-}
-
-static struct ap_driver zcrypt_cex2c_queue_driver = {
- .probe = zcrypt_cex2c_queue_probe,
- .remove = zcrypt_cex2c_queue_remove,
- .ids = zcrypt_cex2c_queue_ids,
- .flags = AP_DRIVER_FLAG_DEFAULT,
-};
-
-int __init zcrypt_cex2c_init(void)
-{
- int rc;
-
- rc = ap_driver_register(&zcrypt_cex2c_card_driver,
- THIS_MODULE, "cex2card");
- if (rc)
- return rc;
-
- rc = ap_driver_register(&zcrypt_cex2c_queue_driver,
- THIS_MODULE, "cex2cqueue");
- if (rc)
- ap_driver_unregister(&zcrypt_cex2c_card_driver);
-
- return rc;
-}
-
-void zcrypt_cex2c_exit(void)
-{
- ap_driver_unregister(&zcrypt_cex2c_queue_driver);
- ap_driver_unregister(&zcrypt_cex2c_card_driver);
-}
-
-module_init(zcrypt_cex2c_init);
-module_exit(zcrypt_cex2c_exit);
diff --git a/drivers/s390/crypto/zcrypt_cex2c.h b/drivers/s390/crypto/zcrypt_cex2c.h
index 6ec405c2bec2..e69de29bb2d1 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.h
+++ b/drivers/s390/crypto/zcrypt_cex2c.h
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright IBM Corp. 2001, 2018
- * Author(s): Robert Burroughs
- * Eric Rossman (edrossma@us.ibm.com)
- *
- * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- */
-
-#ifndef _ZCRYPT_CEX2C_H_
-#define _ZCRYPT_CEX2C_H_
-
-int zcrypt_cex2c_init(void);
-void zcrypt_cex2c_exit(void);
-
-#endif /* _ZCRYPT_CEX2C_H_ */
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 958f5ee47f1b..0a877f9792c2 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -29,6 +29,8 @@
#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
+#define EP11_PINBLOB_V1_BYTES 56
+
/* default iv used here */
static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
@@ -113,6 +115,109 @@ static void __exit card_cache_free(void)
spin_unlock_bh(&card_list_lock);
}
+static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver,
+ struct ep11kblob_header **kbhdr, size_t *kbhdrsize,
+ u8 **kbpl, size_t *kbplsize)
+{
+ struct ep11kblob_header *hdr = NULL;
+ size_t hdrsize, plsize = 0;
+ int rc = -EINVAL;
+ u8 *pl = NULL;
+
+ if (kblen < sizeof(struct ep11kblob_header))
+ goto out;
+ hdr = (struct ep11kblob_header *)kb;
+
+ switch (kbver) {
+ case TOKVER_EP11_AES:
+ /* header overlays the payload */
+ hdrsize = 0;
+ break;
+ case TOKVER_EP11_ECC_WITH_HEADER:
+ case TOKVER_EP11_AES_WITH_HEADER:
+ /* payload starts after the header */
+ hdrsize = sizeof(struct ep11kblob_header);
+ break;
+ default:
+ goto out;
+ }
+
+ plsize = kblen - hdrsize;
+ pl = (u8 *)kb + hdrsize;
+
+ if (kbhdr)
+ *kbhdr = hdr;
+ if (kbhdrsize)
+ *kbhdrsize = hdrsize;
+ if (kbpl)
+ *kbpl = pl;
+ if (kbplsize)
+ *kbplsize = plsize;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int ep11_kb_decode(const u8 *kb, size_t kblen,
+ struct ep11kblob_header **kbhdr, size_t *kbhdrsize,
+ struct ep11keyblob **kbpl, size_t *kbplsize)
+{
+ struct ep11kblob_header *tmph, *hdr = NULL;
+ size_t hdrsize = 0, plsize = 0;
+ struct ep11keyblob *pl = NULL;
+ int rc = -EINVAL;
+ u8 *tmpp;
+
+ if (kblen < sizeof(struct ep11kblob_header))
+ goto out;
+ tmph = (struct ep11kblob_header *)kb;
+
+ if (tmph->type != TOKTYPE_NON_CCA &&
+ tmph->len > kblen)
+ goto out;
+
+ if (ep11_kb_split(kb, kblen, tmph->version,
+ &hdr, &hdrsize, &tmpp, &plsize))
+ goto out;
+
+ if (plsize < sizeof(struct ep11keyblob))
+ goto out;
+
+ if (!is_ep11_keyblob(tmpp))
+ goto out;
+
+ pl = (struct ep11keyblob *)tmpp;
+ plsize = hdr->len - hdrsize;
+
+ if (kbhdr)
+ *kbhdr = hdr;
+ if (kbhdrsize)
+ *kbhdrsize = hdrsize;
+ if (kbpl)
+ *kbpl = pl;
+ if (kbplsize)
+ *kbplsize = plsize;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+/*
+ * For valid ep11 keyblobs, returns a reference to the wrappingkey verification
+ * pattern. Otherwise NULL.
+ */
+const u8 *ep11_kb_wkvp(const u8 *keyblob, size_t keybloblen)
+{
+ struct ep11keyblob *kb;
+
+ if (ep11_kb_decode(keyblob, keybloblen, NULL, NULL, &kb, NULL))
+ return NULL;
+ return kb->wkvp;
+}
+EXPORT_SYMBOL(ep11_kb_wkvp);
+
/*
* Simple check if the key blob is a valid EP11 AES key blob with header.
*/
@@ -489,7 +594,7 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
struct ep11_cprb *req = NULL, *rep = NULL;
struct ep11_target_dev target;
struct ep11_urb *urb = NULL;
- int api = 1, rc = -ENOMEM;
+ int api = EP11_API_V1, rc = -ENOMEM;
/* request cprb and payload */
req = alloc_cprb(sizeof(struct ep11_info_req_pl));
@@ -664,8 +769,9 @@ EXPORT_SYMBOL(ep11_get_domain_info);
*/
#define KEY_ATTR_DEFAULTS 0x00200c00
-int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize)
+static int _ep11_genaeskey(u16 card, u16 domain,
+ u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
{
struct keygen_req_pl {
struct pl_head head;
@@ -685,8 +791,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
u32 attr_bool_bits;
u32 attr_val_len_type;
u32 attr_val_len_value;
- u8 pin_tag;
- u8 pin_len;
+ /* followed by empty pin tag or empty pinblob tag */
} __packed * req_pl;
struct keygen_rep_pl {
struct pl_head head;
@@ -699,10 +804,11 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
u8 data[512];
} __packed * rep_pl;
struct ep11_cprb *req = NULL, *rep = NULL;
+ size_t req_pl_size, pinblob_size = 0;
struct ep11_target_dev target;
struct ep11_urb *urb = NULL;
- struct ep11keyblob *kb;
int api, rc = -ENOMEM;
+ u8 *p;
switch (keybitsize) {
case 128:
@@ -718,12 +824,22 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
}
/* request cprb and payload */
- req = alloc_cprb(sizeof(struct keygen_req_pl));
+ api = (!keygenflags || keygenflags & 0x00200000) ?
+ EP11_API_V4 : EP11_API_V1;
+ if (ap_is_se_guest()) {
+ /*
+ * genkey within SE environment requires API ordinal 6
+ * with empty pinblob
+ */
+ api = EP11_API_V6;
+ pinblob_size = EP11_PINBLOB_V1_BYTES;
+ }
+ req_pl_size = sizeof(struct keygen_req_pl) + ASN1TAGLEN(pinblob_size);
+ req = alloc_cprb(req_pl_size);
if (!req)
goto out;
req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req));
- api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
- prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */
+ prep_head(&req_pl->head, req_pl_size, api, 21); /* GenerateKey */
req_pl->var_tag = 0x04;
req_pl->var_len = sizeof(u32);
req_pl->keybytes_tag = 0x04;
@@ -739,7 +855,10 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */
req_pl->attr_val_len_value = keybitsize / 8;
- req_pl->pin_tag = 0x04;
+ p = ((u8 *)req_pl) + sizeof(*req_pl);
+ /* pin tag */
+ *p++ = 0x04;
+ *p++ = pinblob_size;
/* reply cprb and payload */
rep = alloc_cprb(sizeof(struct keygen_rep_pl));
@@ -754,7 +873,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
target.ap_id = card;
target.dom_id = domain;
prep_urb(urb, &target, 1,
- req, sizeof(*req) + sizeof(*req_pl),
+ req, sizeof(*req) + req_pl_size,
rep, sizeof(*rep) + sizeof(*rep_pl));
rc = zcrypt_send_ep11_cprb(urb);
@@ -780,14 +899,9 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
goto out;
}
- /* copy key blob and set header values */
+ /* copy key blob */
memcpy(keybuf, rep_pl->data, rep_pl->data_len);
*keybufsize = rep_pl->data_len;
- kb = (struct ep11keyblob *)keybuf;
- kb->head.type = TOKTYPE_NON_CCA;
- kb->head.len = rep_pl->data_len;
- kb->head.version = TOKVER_EP11_AES;
- kb->head.keybitlen = keybitsize;
out:
kfree(req);
@@ -795,6 +909,43 @@ out:
kfree(urb);
return rc;
}
+
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize, u32 keybufver)
+{
+ struct ep11kblob_header *hdr;
+ size_t hdr_size, pl_size;
+ u8 *pl;
+ int rc;
+
+ switch (keybufver) {
+ case TOKVER_EP11_AES:
+ case TOKVER_EP11_AES_WITH_HEADER:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = ep11_kb_split(keybuf, *keybufsize, keybufver,
+ &hdr, &hdr_size, &pl, &pl_size);
+ if (rc)
+ return rc;
+
+ rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags,
+ pl, &pl_size);
+ if (rc)
+ return rc;
+
+ *keybufsize = hdr_size + pl_size;
+
+ /* update header information */
+ hdr->type = TOKTYPE_NON_CCA;
+ hdr->len = *keybufsize;
+ hdr->version = keybufver;
+ hdr->bitlen = keybitsize;
+
+ return 0;
+}
EXPORT_SYMBOL(ep11_genaeskey);
static int ep11_cryptsingle(u16 card, u16 domain,
@@ -830,7 +981,7 @@ static int ep11_cryptsingle(u16 card, u16 domain,
struct ep11_target_dev target;
struct ep11_urb *urb = NULL;
size_t req_pl_size, rep_pl_size;
- int n, api = 1, rc = -ENOMEM;
+ int n, api = EP11_API_V1, rc = -ENOMEM;
u8 *p;
/* the simple asn1 coding used has length limits */
@@ -924,12 +1075,12 @@ out:
return rc;
}
-static int ep11_unwrapkey(u16 card, u16 domain,
- const u8 *kek, size_t keksize,
- const u8 *enckey, size_t enckeysize,
- u32 mech, const u8 *iv,
- u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize)
+static int _ep11_unwrapkey(u16 card, u16 domain,
+ const u8 *kek, size_t keksize,
+ const u8 *enckey, size_t enckeysize,
+ u32 mech, const u8 *iv,
+ u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
{
struct uw_req_pl {
struct pl_head head;
@@ -949,7 +1100,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
* maybe followed by iv data
* followed by kek tag + kek blob
* followed by empty mac tag
- * followed by empty pin tag
+ * followed by empty pin tag or empty pinblob tag
* followed by encryted key tag + bytes
*/
} __packed * req_pl;
@@ -964,21 +1115,30 @@ static int ep11_unwrapkey(u16 card, u16 domain,
u8 data[512];
} __packed * rep_pl;
struct ep11_cprb *req = NULL, *rep = NULL;
+ size_t req_pl_size, pinblob_size = 0;
struct ep11_target_dev target;
struct ep11_urb *urb = NULL;
- struct ep11keyblob *kb;
- size_t req_pl_size;
int api, rc = -ENOMEM;
u8 *p;
/* request cprb and payload */
+ api = (!keygenflags || keygenflags & 0x00200000) ?
+ EP11_API_V4 : EP11_API_V1;
+ if (ap_is_se_guest()) {
+ /*
+ * unwrap within SE environment requires API ordinal 6
+ * with empty pinblob
+ */
+ api = EP11_API_V6;
+ pinblob_size = EP11_PINBLOB_V1_BYTES;
+ }
req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
- + ASN1TAGLEN(keksize) + 4 + ASN1TAGLEN(enckeysize);
+ + ASN1TAGLEN(keksize) + ASN1TAGLEN(0)
+ + ASN1TAGLEN(pinblob_size) + ASN1TAGLEN(enckeysize);
req = alloc_cprb(req_pl_size);
if (!req)
goto out;
req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req));
- api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
req_pl->attr_tag = 0x04;
req_pl->attr_len = 7 * sizeof(u32);
@@ -1003,9 +1163,10 @@ static int ep11_unwrapkey(u16 card, u16 domain,
/* empty mac key tag */
*p++ = 0x04;
*p++ = 0;
- /* empty pin tag */
+ /* pin tag */
*p++ = 0x04;
- *p++ = 0;
+ *p++ = pinblob_size;
+ p += pinblob_size;
/* encrypted key value tag and bytes */
p += asn1tag_write(p, 0x04, enckey, enckeysize);
@@ -1048,14 +1209,9 @@ static int ep11_unwrapkey(u16 card, u16 domain,
goto out;
}
- /* copy key blob and set header values */
+ /* copy key blob */
memcpy(keybuf, rep_pl->data, rep_pl->data_len);
*keybufsize = rep_pl->data_len;
- kb = (struct ep11keyblob *)keybuf;
- kb->head.type = TOKTYPE_NON_CCA;
- kb->head.len = rep_pl->data_len;
- kb->head.version = TOKVER_EP11_AES;
- kb->head.keybitlen = keybitsize;
out:
kfree(req);
@@ -1064,10 +1220,46 @@ out:
return rc;
}
-static int ep11_wrapkey(u16 card, u16 domain,
- const u8 *key, size_t keysize,
- u32 mech, const u8 *iv,
- u8 *databuf, size_t *datasize)
+static int ep11_unwrapkey(u16 card, u16 domain,
+ const u8 *kek, size_t keksize,
+ const u8 *enckey, size_t enckeysize,
+ u32 mech, const u8 *iv,
+ u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize,
+ u8 keybufver)
+{
+ struct ep11kblob_header *hdr;
+ size_t hdr_size, pl_size;
+ u8 *pl;
+ int rc;
+
+ rc = ep11_kb_split(keybuf, *keybufsize, keybufver,
+ &hdr, &hdr_size, &pl, &pl_size);
+ if (rc)
+ return rc;
+
+ rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize,
+ mech, iv, keybitsize, keygenflags,
+ pl, &pl_size);
+ if (rc)
+ return rc;
+
+ *keybufsize = hdr_size + pl_size;
+
+ /* update header information */
+ hdr = (struct ep11kblob_header *)keybuf;
+ hdr->type = TOKTYPE_NON_CCA;
+ hdr->len = *keybufsize;
+ hdr->version = keybufver;
+ hdr->bitlen = keybitsize;
+
+ return 0;
+}
+
+static int _ep11_wrapkey(u16 card, u16 domain,
+ const u8 *key, size_t keysize,
+ u32 mech, const u8 *iv,
+ u8 *databuf, size_t *datasize)
{
struct wk_req_pl {
struct pl_head head;
@@ -1097,20 +1289,10 @@ static int ep11_wrapkey(u16 card, u16 domain,
struct ep11_cprb *req = NULL, *rep = NULL;
struct ep11_target_dev target;
struct ep11_urb *urb = NULL;
- struct ep11keyblob *kb;
size_t req_pl_size;
int api, rc = -ENOMEM;
- bool has_header = false;
u8 *p;
- /* maybe the session field holds a header with key info */
- kb = (struct ep11keyblob *)key;
- if (kb->head.type == TOKTYPE_NON_CCA &&
- kb->head.version == TOKVER_EP11_AES) {
- has_header = true;
- keysize = min_t(size_t, kb->head.len, keysize);
- }
-
/* request cprb and payload */
req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+ ASN1TAGLEN(keysize) + 4;
@@ -1120,7 +1302,8 @@ static int ep11_wrapkey(u16 card, u16 domain,
if (!mech || mech == 0x80060001)
req->flags |= 0x20; /* CPACF_WRAP needs special bit */
req_pl = (struct wk_req_pl *)(((u8 *)req) + sizeof(*req));
- api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */
+ api = (!mech || mech == 0x80060001) ? /* CKM_IBM_CPACF_WRAP */
+ EP11_API_V4 : EP11_API_V1;
prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
req_pl->var_tag = 0x04;
req_pl->var_len = sizeof(u32);
@@ -1135,11 +1318,6 @@ static int ep11_wrapkey(u16 card, u16 domain,
}
/* key blob */
p += asn1tag_write(p, 0x04, key, keysize);
- /* maybe the key argument needs the head data cleaned out */
- if (has_header) {
- kb = (struct ep11keyblob *)(p - keysize);
- memset(&kb->head, 0, sizeof(kb->head));
- }
/* empty kek tag */
*p++ = 0x04;
*p++ = 0;
@@ -1198,10 +1376,10 @@ out:
}
int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
- const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
+ u32 keytype)
{
int rc;
- struct ep11keyblob *kb;
u8 encbuf[64], *kek = NULL;
size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
@@ -1223,17 +1401,15 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
}
/* Step 1: generate AES 256 bit random kek key */
- rc = ep11_genaeskey(card, domain, 256,
- 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
- kek, &keklen);
+ rc = _ep11_genaeskey(card, domain, 256,
+ 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
+ kek, &keklen);
if (rc) {
DEBUG_ERR(
"%s generate kek key failed, rc=%d\n",
__func__, rc);
goto out;
}
- kb = (struct ep11keyblob *)kek;
- memset(&kb->head, 0, sizeof(kb->head));
/* Step 2: encrypt clear key value with the kek key */
rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
@@ -1248,7 +1424,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
/* Step 3: import the encrypted key value as a new key */
rc = ep11_unwrapkey(card, domain, kek, keklen,
encbuf, encbuflen, 0, def_iv,
- keybitsize, 0, keybuf, keybufsize);
+ keybitsize, 0, keybuf, keybufsize, keytype);
if (rc) {
DEBUG_ERR(
"%s importing key value as new key failed,, rc=%d\n",
@@ -1262,11 +1438,12 @@ out:
}
EXPORT_SYMBOL(ep11_clr2keyblob);
-int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
+int ep11_kblob2protkey(u16 card, u16 dom,
+ const u8 *keyblob, size_t keybloblen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
- int rc = -EIO;
- u8 *wkbuf = NULL;
+ struct ep11kblob_header *hdr;
+ struct ep11keyblob *key;
size_t wkbuflen, keylen;
struct wk_info {
u16 version;
@@ -1277,31 +1454,17 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
u8 res2[8];
u8 pkey[];
} __packed * wki;
- const u8 *key;
- struct ep11kblob_header *hdr;
+ u8 *wkbuf = NULL;
+ int rc = -EIO;
- /* key with or without header ? */
- hdr = (struct ep11kblob_header *)keyblob;
- if (hdr->type == TOKTYPE_NON_CCA &&
- (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
- hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
- is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) {
- /* EP11 AES or ECC key with header */
- key = keyblob + sizeof(struct ep11kblob_header);
- keylen = hdr->len - sizeof(struct ep11kblob_header);
- } else if (hdr->type == TOKTYPE_NON_CCA &&
- hdr->version == TOKVER_EP11_AES &&
- is_ep11_keyblob(keyblob)) {
- /* EP11 AES key (old style) */
- key = keyblob;
- keylen = hdr->len;
- } else if (is_ep11_keyblob(keyblob)) {
- /* raw EP11 key blob */
- key = keyblob;
- keylen = keybloblen;
- } else {
+ if (ep11_kb_decode((u8 *)keyblob, keybloblen, &hdr, NULL, &key, &keylen))
return -EINVAL;
+
+ if (hdr->version == TOKVER_EP11_AES) {
+ /* wipe overlayed header */
+ memset(hdr, 0, sizeof(*hdr));
}
+ /* !!! hdr is no longer a valid header !!! */
/* alloc temp working buffer */
wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1));
@@ -1310,8 +1473,8 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
return -ENOMEM;
/* ep11 secure key -> protected key + info */
- rc = ep11_wrapkey(card, dom, key, keylen,
- 0, def_iv, wkbuf, &wkbuflen);
+ rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen,
+ 0, def_iv, wkbuf, &wkbuflen);
if (rc) {
DEBUG_ERR(
"%s rewrapping ep11 key to pkey failed, rc=%d\n",
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
index a3eddf51242d..9d17fd5228a7 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.h
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -12,7 +12,9 @@
#include <asm/zcrypt.h>
#include <asm/pkey.h>
-#define EP11_API_V 4 /* highest known and supported EP11 API version */
+#define EP11_API_V1 1 /* min EP11 API, default if no higher api required */
+#define EP11_API_V4 4 /* supported EP11 API for the ep11misc cprbs */
+#define EP11_API_V6 6 /* min EP11 API for some cprbs in SE environment */
#define EP11_STRUCT_MAGIC 0x1234
#define EP11_BLOB_PKEY_EXTRACTABLE 0x00200000
@@ -29,14 +31,7 @@ struct ep11keyblob {
union {
u8 session[32];
/* only used for PKEY_TYPE_EP11: */
- struct {
- u8 type; /* 0x00 (TOKTYPE_NON_CCA) */
- u8 res0; /* unused */
- u16 len; /* total length in bytes of this blob */
- u8 version; /* 0x03 (TOKVER_EP11_AES) */
- u8 res1; /* unused */
- u16 keybitlen; /* clear key bit len, 0 for unknown */
- } head;
+ struct ep11kblob_header head;
};
u8 wkvp[16]; /* wrapping key verification pattern */
u64 attr; /* boolean key attributes */
@@ -56,6 +51,12 @@ static inline bool is_ep11_keyblob(const u8 *key)
}
/*
+ * For valid ep11 keyblobs, returns a reference to the wrappingkey verification
+ * pattern. Otherwise NULL.
+ */
+const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen);
+
+/*
* Simple check if the key blob is a valid EP11 AES key blob with header.
* If checkcpacfexport is enabled, the key is also checked for the
* attributes needed to export this key for CPACF use.
@@ -114,13 +115,14 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
* Generate (random) EP11 AES secure key.
*/
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
- u8 *keybuf, size_t *keybufsize);
+ u8 *keybuf, size_t *keybufsize, u32 keybufver);
/*
* Generate EP11 AES secure key with given clear key value.
*/
int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
- const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
+ u32 keytype);
/*
* Build a list of ep11 apqns meeting the following constrains:
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 51f8f7a463f7..2e155de8abe5 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright IBM Corp. 2001, 2012
+ * Copyright IBM Corp. 2001, 2023
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -28,15 +28,12 @@
/* >= CEX3A: 4096 bits */
#define CEX3A_MAX_MOD_SIZE 512
-/* CEX2A: max outputdatalength + type80_hdr */
-#define CEX2A_MAX_RESPONSE_SIZE 0x110
-
/* >= CEX3A: 512 bit modulus, (max outputdatalength) + type80_hdr */
#define CEX3A_MAX_RESPONSE_SIZE 0x210
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
- "Copyright IBM Corp. 2001, 2012");
+ "Copyright IBM Corp. 2001, 2023");
MODULE_LICENSE("GPL");
/*
@@ -366,20 +363,17 @@ static int convert_type80(struct zcrypt_queue *zq,
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
return -EAGAIN;
}
- if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
- BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
- else
- BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
+ BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
data = reply->msg + t80h->len - outputdatalength;
if (copy_to_user(outputdata, data, outputdatalength))
return -EFAULT;
return 0;
}
-static int convert_response_cex2a(struct zcrypt_queue *zq,
- struct ap_message *reply,
- char __user *outputdata,
- unsigned int outputdatalength)
+static int convert_response(struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
{
/* Response type byte is the second byte in the response. */
unsigned char rtype = ((unsigned char *)reply->msg)[1];
@@ -414,9 +408,9 @@ static int convert_response_cex2a(struct zcrypt_queue *zq,
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
-static void zcrypt_cex2a_receive(struct ap_queue *aq,
- struct ap_message *msg,
- struct ap_message *reply)
+static void zcrypt_msgtype50_receive(struct ap_queue *aq,
+ struct ap_message *msg,
+ struct ap_message *reply)
{
static struct error_hdr error_reply = {
.type = TYPE82_RSP_CODE,
@@ -456,19 +450,18 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
* CEXxA device to the request distributor
* @mex: pointer to the modexpo request buffer
*/
-static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
- struct ica_rsa_modexpo *mex,
- struct ap_message *ap_msg)
+static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq,
+ struct ica_rsa_modexpo *mex,
+ struct ap_message *ap_msg)
{
struct completion work;
int rc;
- ap_msg->bufsize = (zq->zcard->user_space_type == ZCRYPT_CEX2A) ?
- MSGTYPE50_CRB2_MAX_MSG_SIZE : MSGTYPE50_CRB3_MAX_MSG_SIZE;
+ ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE;
ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
- ap_msg->receive = zcrypt_cex2a_receive;
+ ap_msg->receive = zcrypt_msgtype50_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &work;
@@ -483,9 +476,9 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_cex2a(zq, ap_msg,
- mex->outputdata,
- mex->outputdatalength);
+ rc = convert_response(zq, ap_msg,
+ mex->outputdata,
+ mex->outputdatalength);
} else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
@@ -507,19 +500,18 @@ out:
* CEXxA device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
*/
-static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
- struct ica_rsa_modexpo_crt *crt,
- struct ap_message *ap_msg)
+static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq,
+ struct ica_rsa_modexpo_crt *crt,
+ struct ap_message *ap_msg)
{
struct completion work;
int rc;
- ap_msg->bufsize = (zq->zcard->user_space_type == ZCRYPT_CEX2A) ?
- MSGTYPE50_CRB2_MAX_MSG_SIZE : MSGTYPE50_CRB3_MAX_MSG_SIZE;
+ ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE;
ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL);
if (!ap_msg->msg)
return -ENOMEM;
- ap_msg->receive = zcrypt_cex2a_receive;
+ ap_msg->receive = zcrypt_msgtype50_receive;
ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg->private = &work;
@@ -534,9 +526,9 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_cex2a(zq, ap_msg,
- crt->outputdata,
- crt->outputdatalength);
+ rc = convert_response(zq, ap_msg,
+ crt->outputdata,
+ crt->outputdatalength);
} else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
@@ -555,8 +547,8 @@ out:
* The crypto operations for message type 50.
*/
static struct zcrypt_ops zcrypt_msgtype50_ops = {
- .rsa_modexpo = zcrypt_cex2a_modexpo,
- .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
+ .rsa_modexpo = zcrypt_msgtype50_modexpo,
+ .rsa_modexpo_crt = zcrypt_msgtype50_modexpo_crt,
.owner = THIS_MODULE,
.name = MSGTYPE50_NAME,
.variant = MSGTYPE50_VARIANT_DEFAULT,
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
index eb49f06bed29..323e93b90b12 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright IBM Corp. 2001, 2012
+ * Copyright IBM Corp. 2001, 2023
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -15,7 +15,6 @@
#define MSGTYPE50_NAME "zcrypt_msgtype50"
#define MSGTYPE50_VARIANT_DEFAULT 0
-#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /* sizeof(struct type50_crb3_msg) */
#define MSGTYPE_ADJUSTMENT 0x08 /* type04 extension (not needed in type50) */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index e668ff5eb384..3c53abbdc342 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright IBM Corp. 2001, 2022
+ * Copyright IBM Corp. 2001, 2023
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -42,7 +42,7 @@ struct response_type {
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
- "Copyright IBM Corp. 2001, 2012");
+ "Copyright IBM Corp. 2001, 2023");
MODULE_LICENSE("GPL");
struct function_and_rules_block {
@@ -1348,14 +1348,6 @@ out:
/*
* The crypto operations for a CEXxC card.
*/
-static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
- .owner = THIS_MODULE,
- .name = MSGTYPE06_NAME,
- .variant = MSGTYPE06_VARIANT_NORNG,
- .rsa_modexpo = zcrypt_msgtype6_modexpo,
- .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
- .send_cprb = zcrypt_msgtype6_send_cprb,
-};
static struct zcrypt_ops zcrypt_msgtype6_ops = {
.owner = THIS_MODULE,
@@ -1378,14 +1370,12 @@ static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
void __init zcrypt_msgtype6_init(void)
{
- zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
}
void __exit zcrypt_msgtype6_exit(void)
{
- zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops);
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
}
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index 711252e52d8e..95a86e0dfd77 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -209,54 +209,6 @@ raid_attr_ro_state(level);
raid_attr_ro_fn(resync);
raid_attr_ro_state_fn(state);
-static void raid_component_release(struct device *dev)
-{
- struct raid_component *rc =
- container_of(dev, struct raid_component, dev);
- dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n");
- put_device(rc->dev.parent);
- kfree(rc);
-}
-
-int raid_component_add(struct raid_template *r,struct device *raid_dev,
- struct device *component_dev)
-{
- struct device *cdev =
- attribute_container_find_class_device(&r->raid_attrs.ac,
- raid_dev);
- struct raid_component *rc;
- struct raid_data *rd = dev_get_drvdata(cdev);
- int err;
-
- rc = kzalloc(sizeof(*rc), GFP_KERNEL);
- if (!rc)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&rc->node);
- device_initialize(&rc->dev);
- rc->dev.release = raid_component_release;
- rc->dev.parent = get_device(component_dev);
- rc->num = rd->component_count++;
-
- dev_set_name(&rc->dev, "component-%d", rc->num);
- list_add_tail(&rc->node, &rd->component_list);
- rc->dev.class = &raid_class.class;
- err = device_add(&rc->dev);
- if (err)
- goto err_out;
-
- return 0;
-
-err_out:
- put_device(&rc->dev);
- list_del(&rc->node);
- rd->component_count--;
- put_device(component_dev);
- kfree(rc);
- return err;
-}
-EXPORT_SYMBOL(raid_component_add);
-
struct raid_template *
raid_class_attach(struct raid_function_template *ft)
{
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index e429ad23c396..4db3ba62fcd3 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -303,12 +303,11 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
"Snic Tgt: device_add, with err = %d\n",
ret);
- put_device(&tgt->dev);
put_device(&snic->shost->shost_gendev);
spin_lock_irqsave(snic->shost->host_lock, flags);
list_del(&tgt->list);
spin_unlock_irqrestore(snic->shost->host_lock, flags);
- kfree(tgt);
+ put_device(&tgt->dev);
tgt = NULL;
return tgt;
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index b3c226eb5292..58746e570d14 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -524,7 +524,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
* saved microcode information and put in the new.
*/
memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
- strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
+ strscpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
qe_firmware_info.extended_modes = be64_to_cpu(firmware->extended_modes);
memcpy(qe_firmware_info.vtraps, firmware->vtraps,
sizeof(firmware->vtraps));
@@ -599,7 +599,7 @@ struct qe_firmware_info *qe_get_firmware_info(void)
/* Copy the data into qe_firmware_info*/
sprop = of_get_property(fw, "id", NULL);
if (sprop)
- strlcpy(qe_firmware_info.id, sprop,
+ strscpy(qe_firmware_info.id, sprop,
sizeof(qe_firmware_info.id));
of_property_read_u64(fw, "extended-modes",
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index cf78839b3f74..1cc2281cb370 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -3,6 +3,7 @@
#include <linux/acpi.h>
#include <linux/delay.h>
+#include <linux/irq.h>
#include <linux/mod_devicetable.h>
#include <linux/pm_runtime.h>
#include <linux/soundwire/sdw_registers.h>
@@ -25,6 +26,23 @@ static int sdw_get_id(struct sdw_bus *bus)
return 0;
}
+static int sdw_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct sdw_bus *bus = h->host_data;
+
+ irq_set_chip_data(virq, bus);
+ irq_set_chip(virq, &bus->irq_chip);
+ irq_set_nested_thread(virq, 1);
+ irq_set_noprobe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops sdw_domain_ops = {
+ .map = sdw_irq_map,
+};
+
/**
* sdw_bus_master_add() - add a bus Master instance
* @bus: bus instance
@@ -151,6 +169,14 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
bus->params.curr_bank = SDW_BANK0;
bus->params.next_bank = SDW_BANK1;
+ bus->irq_chip.name = dev_name(bus->dev);
+ bus->domain = irq_domain_create_linear(fwnode, SDW_MAX_DEVICES,
+ &sdw_domain_ops, bus);
+ if (!bus->domain) {
+ dev_err(bus->dev, "Failed to add IRQ domain\n");
+ return -EINVAL;
+ }
+
return 0;
}
EXPORT_SYMBOL(sdw_bus_master_add);
@@ -187,6 +213,9 @@ static int sdw_delete_slave(struct device *dev, void *data)
void sdw_bus_master_delete(struct sdw_bus *bus)
{
device_for_each_child(bus->dev, NULL, sdw_delete_slave);
+
+ irq_domain_remove(bus->domain);
+
sdw_master_device_del(bus);
sdw_bus_debugfs_exit(bus);
@@ -1725,6 +1754,9 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
struct device *dev = &slave->dev;
struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
+ if (slave->prop.use_domain_irq && slave->irq)
+ handle_nested_irq(slave->irq);
+
if (drv->ops && drv->ops->interrupt_callback) {
slave_intr.sdca_cascade = sdca_cascade;
slave_intr.control_port = clear;
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 1f43ee848eac..fafbc284e82d 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -122,6 +122,12 @@ static int sdw_drv_probe(struct device *dev)
if (drv->ops && drv->ops->read_prop)
drv->ops->read_prop(slave);
+ if (slave->prop.use_domain_irq) {
+ slave->irq = irq_create_mapping(slave->bus->domain, slave->dev_num);
+ if (!slave->irq)
+ dev_warn(dev, "Failed to map IRQ\n");
+ }
+
/* init the sysfs as we have properties now */
ret = sdw_slave_sysfs_init(slave);
if (ret < 0)
@@ -166,7 +172,13 @@ static int sdw_drv_remove(struct device *dev)
int ret = 0;
mutex_lock(&slave->sdw_dev_lock);
+
slave->probed = false;
+
+ if (slave->prop.use_domain_irq)
+ irq_dispose_mapping(irq_find_mapping(slave->bus->domain,
+ slave->dev_num));
+
mutex_unlock(&slave->sdw_dev_lock);
if (drv->remove)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8962b2557615..2c21d5b96fdc 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -281,6 +281,13 @@ config SPI_COLDFIRE_QSPI
This enables support for the Coldfire QSPI controller in master
mode.
+config SPI_CS42L43
+ tristate "Cirrus Logic CS42L43 SPI controller"
+ depends on MFD_CS42L43 && PINCTRL_CS42L43
+ help
+ This enables support for the SPI controller inside the Cirrus Logic
+ CS42L43 audio codec.
+
config SPI_DAVINCI
tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
depends on ARCH_DAVINCI || ARCH_KEYSTONE || COMPILE_TEST
@@ -516,6 +523,32 @@ config SPI_LM70_LLP
which interfaces to an LM70 temperature sensor using
a parallel port.
+config SPI_LOONGSON_CORE
+ tristate
+ depends on LOONGARCH || COMPILE_TEST
+
+config SPI_LOONGSON_PCI
+ tristate "Loongson SPI Controller PCI Driver Support"
+ select SPI_LOONGSON_CORE
+ depends on PCI && (LOONGARCH || COMPILE_TEST)
+ help
+ This bus driver supports the Loongson SPI hardware controller in
+ the Loongson platforms and supports to use PCI framework to
+ register SPI device resources.
+ Say Y or M here if you want to use the SPI controller on
+ Loongson platform.
+
+config SPI_LOONGSON_PLATFORM
+ tristate "Loongson SPI Controller Platform Driver Support"
+ select SPI_LOONGSON_CORE
+ depends on OF && (LOONGARCH || COMPILE_TEST)
+ help
+ This bus driver supports the Loongson SPI hardware controller in
+ the Loongson platforms and supports to use DTS framework to
+ register SPI device resources.
+ Say Y or M here if you want to use the SPI controller on
+ Loongson platform.
+
config SPI_LP8841_RTC
tristate "ICP DAS LP-8841 SPI Controller for RTC"
depends on MACH_PXA27X_DT || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 080c2c1b3ec1..6af54842b9fa 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_SPI_CADENCE_QUADSPI) += spi-cadence-quadspi.o
obj-$(CONFIG_SPI_CADENCE_XSPI) += spi-cadence-xspi.o
obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
+obj-$(CONFIG_SPI_CS42L43) += spi-cs42l43.o
obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
obj-$(CONFIG_SPI_DLN2) += spi-dln2.o
obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
@@ -71,6 +72,9 @@ obj-$(CONFIG_SPI_INTEL_PLATFORM) += spi-intel-platform.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
+obj-$(CONFIG_SPI_LOONGSON_CORE) += spi-loongson-core.o
+obj-$(CONFIG_SPI_LOONGSON_PCI) += spi-loongson-pci.o
+obj-$(CONFIG_SPI_LOONGSON_PLATFORM) += spi-loongson-plat.o
obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
obj-$(CONFIG_SPI_MESON_SPICC) += spi-meson-spicc.o
obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index fecead757a3c..5d9b246b6963 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -215,9 +215,9 @@ static int amd_spi_execute_opcode(struct amd_spi *amd_spi)
}
}
-static int amd_spi_master_setup(struct spi_device *spi)
+static int amd_spi_host_setup(struct spi_device *spi)
{
- struct amd_spi *amd_spi = spi_master_get_devdata(spi->master);
+ struct amd_spi *amd_spi = spi_controller_get_devdata(spi->controller);
amd_spi_clear_fifo_ptr(amd_spi);
@@ -272,7 +272,7 @@ static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz)
}
static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
- struct spi_master *master,
+ struct spi_controller *host,
struct spi_message *message)
{
struct spi_transfer *xfer = NULL;
@@ -353,15 +353,15 @@ fin_msg:
return -ENODEV;
}
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return message->status;
}
-static int amd_spi_master_transfer(struct spi_master *master,
+static int amd_spi_host_transfer(struct spi_controller *host,
struct spi_message *msg)
{
- struct amd_spi *amd_spi = spi_master_get_devdata(master);
+ struct amd_spi *amd_spi = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
amd_spi_select_chip(amd_spi, spi_get_chipselect(spi, 0));
@@ -370,7 +370,7 @@ static int amd_spi_master_transfer(struct spi_master *master,
* Extract spi_transfers from the spi message and
* program the controller.
*/
- return amd_spi_fifo_xfer(amd_spi, master, msg);
+ return amd_spi_fifo_xfer(amd_spi, host, msg);
}
static size_t amd_spi_max_transfer_size(struct spi_device *spi)
@@ -381,16 +381,16 @@ static size_t amd_spi_max_transfer_size(struct spi_device *spi)
static int amd_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct spi_master *master;
+ struct spi_controller *host;
struct amd_spi *amd_spi;
int err;
- /* Allocate storage for spi_master and driver private data */
- master = devm_spi_alloc_master(dev, sizeof(struct amd_spi));
- if (!master)
- return dev_err_probe(dev, -ENOMEM, "Error allocating SPI master\n");
+ /* Allocate storage for host and driver private data */
+ host = devm_spi_alloc_host(dev, sizeof(struct amd_spi));
+ if (!host)
+ return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n");
- amd_spi = spi_master_get_devdata(master);
+ amd_spi = spi_controller_get_devdata(host);
amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(amd_spi->io_remap_addr))
return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr),
@@ -398,22 +398,22 @@ static int amd_spi_probe(struct platform_device *pdev)
dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr);
- amd_spi->version = (enum amd_spi_versions) device_get_match_data(dev);
+ amd_spi->version = (uintptr_t) device_get_match_data(dev);
- /* Initialize the spi_master fields */
- master->bus_num = 0;
- master->num_chipselect = 4;
- master->mode_bits = 0;
- master->flags = SPI_MASTER_HALF_DUPLEX;
- master->max_speed_hz = AMD_SPI_MAX_HZ;
- master->min_speed_hz = AMD_SPI_MIN_HZ;
- master->setup = amd_spi_master_setup;
- master->transfer_one_message = amd_spi_master_transfer;
- master->max_transfer_size = amd_spi_max_transfer_size;
- master->max_message_size = amd_spi_max_transfer_size;
+ /* Initialize the spi_controller fields */
+ host->bus_num = 0;
+ host->num_chipselect = 4;
+ host->mode_bits = 0;
+ host->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ host->max_speed_hz = AMD_SPI_MAX_HZ;
+ host->min_speed_hz = AMD_SPI_MIN_HZ;
+ host->setup = amd_spi_host_setup;
+ host->transfer_one_message = amd_spi_host_transfer;
+ host->max_transfer_size = amd_spi_max_transfer_size;
+ host->max_message_size = amd_spi_max_transfer_size;
/* Register the controller with SPI framework */
- err = devm_spi_register_master(dev, master);
+ err = devm_spi_register_controller(dev, host);
if (err)
return dev_err_probe(dev, err, "error registering SPI controller\n");
diff --git a/drivers/spi/spi-amlogic-spifc-a1.c b/drivers/spi/spi-amlogic-spifc-a1.c
index 3c4224c38399..fadf6667cd51 100644
--- a/drivers/spi/spi-amlogic-spifc-a1.c
+++ b/drivers/spi/spi-amlogic-spifc-a1.c
@@ -72,7 +72,7 @@
#define SPIFC_A1_USER_DBUF_ADDR_REG 0x248
-#define SPIFC_A1_BUFFER_SIZE 512
+#define SPIFC_A1_BUFFER_SIZE 512U
#define SPIFC_A1_MAX_HZ 200000000
#define SPIFC_A1_MIN_HZ 1000000
@@ -107,6 +107,7 @@ struct amlogic_spifc_a1 {
struct clk *clk;
struct device *dev;
void __iomem *base;
+ u32 curr_speed_hz;
};
static int amlogic_spifc_a1_request(struct amlogic_spifc_a1 *spifc, bool read)
@@ -235,66 +236,68 @@ static int amlogic_spifc_a1_write(struct amlogic_spifc_a1 *spifc,
return amlogic_spifc_a1_request(spifc, false);
}
-static int amlogic_spifc_a1_exec_op(struct spi_mem *mem,
- const struct spi_mem_op *op)
+static int amlogic_spifc_a1_set_freq(struct amlogic_spifc_a1 *spifc, u32 freq)
{
- struct amlogic_spifc_a1 *spifc =
- spi_controller_get_devdata(mem->spi->controller);
- size_t off, nbytes = op->data.nbytes;
- u32 cmd_cfg, addr_cfg, dummy_cfg, dmode;
int ret;
- amlogic_spifc_a1_user_init(spifc);
-
- cmd_cfg = SPIFC_A1_USER_CMD(op);
- amlogic_spifc_a1_set_cmd(spifc, cmd_cfg);
+ if (freq == spifc->curr_speed_hz)
+ return 0;
- if (op->addr.nbytes) {
- addr_cfg = SPIFC_A1_USER_ADDR(op);
- amlogic_spifc_a1_set_addr(spifc, op->addr.val, addr_cfg);
- }
+ ret = clk_set_rate(spifc->clk, freq);
+ if (ret)
+ return ret;
- if (op->dummy.nbytes) {
- dummy_cfg = SPIFC_A1_USER_DUMMY(op);
- amlogic_spifc_a1_set_dummy(spifc, dummy_cfg);
- }
+ spifc->curr_speed_hz = freq;
+ return 0;
+}
- if (!op->data.nbytes)
- return amlogic_spifc_a1_request(spifc, false);
+static int amlogic_spifc_a1_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct amlogic_spifc_a1 *spifc =
+ spi_controller_get_devdata(mem->spi->controller);
+ size_t data_size = op->data.nbytes;
+ int ret;
- dmode = ilog2(op->data.buswidth);
- off = 0;
+ ret = amlogic_spifc_a1_set_freq(spifc, mem->spi->max_speed_hz);
+ if (ret)
+ return ret;
- do {
- size_t block_size = min_t(size_t, nbytes, SPIFC_A1_BUFFER_SIZE);
+ amlogic_spifc_a1_user_init(spifc);
+ amlogic_spifc_a1_set_cmd(spifc, SPIFC_A1_USER_CMD(op));
- amlogic_spifc_a1_set_cmd(spifc, cmd_cfg);
+ if (op->addr.nbytes)
+ amlogic_spifc_a1_set_addr(spifc, op->addr.val,
+ SPIFC_A1_USER_ADDR(op));
- if (op->addr.nbytes)
- amlogic_spifc_a1_set_addr(spifc, op->addr.val + off,
- addr_cfg);
+ if (op->dummy.nbytes)
+ amlogic_spifc_a1_set_dummy(spifc, SPIFC_A1_USER_DUMMY(op));
- if (op->dummy.nbytes)
- amlogic_spifc_a1_set_dummy(spifc, dummy_cfg);
+ if (data_size) {
+ u32 mode = ilog2(op->data.buswidth);
writel(0, spifc->base + SPIFC_A1_USER_DBUF_ADDR_REG);
if (op->data.dir == SPI_MEM_DATA_IN)
- ret = amlogic_spifc_a1_read(spifc,
- op->data.buf.in + off,
- block_size, dmode);
+ ret = amlogic_spifc_a1_read(spifc, op->data.buf.in,
+ data_size, mode);
else
- ret = amlogic_spifc_a1_write(spifc,
- op->data.buf.out + off,
- block_size, dmode);
-
- nbytes -= block_size;
- off += block_size;
- } while (nbytes != 0 && !ret);
+ ret = amlogic_spifc_a1_write(spifc, op->data.buf.out,
+ data_size, mode);
+ } else {
+ ret = amlogic_spifc_a1_request(spifc, false);
+ }
return ret;
}
+static int amlogic_spifc_a1_adjust_op_size(struct spi_mem *mem,
+ struct spi_mem_op *op)
+{
+ op->data.nbytes = min(op->data.nbytes, SPIFC_A1_BUFFER_SIZE);
+ return 0;
+}
+
static void amlogic_spifc_a1_hw_init(struct amlogic_spifc_a1 *spifc)
{
u32 regv;
@@ -314,6 +317,7 @@ static void amlogic_spifc_a1_hw_init(struct amlogic_spifc_a1 *spifc)
static const struct spi_controller_mem_ops amlogic_spifc_a1_mem_ops = {
.exec_op = amlogic_spifc_a1_exec_op,
+ .adjust_op_size = amlogic_spifc_a1_adjust_op_size,
};
static int amlogic_spifc_a1_probe(struct platform_device *pdev)
@@ -322,7 +326,7 @@ static int amlogic_spifc_a1_probe(struct platform_device *pdev)
struct amlogic_spifc_a1 *spifc;
int ret;
- ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(*spifc));
+ ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*spifc));
if (!ctrl)
return -ENOMEM;
diff --git a/drivers/spi/spi-ar934x.c b/drivers/spi/spi-ar934x.c
index 9dcada8c4cb9..58b98cea31d9 100644
--- a/drivers/spi/spi-ar934x.c
+++ b/drivers/spi/spi-ar934x.c
@@ -14,7 +14,8 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#define DRIVER_NAME "spi-ar934x"
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index a7fb7c94e70e..0103ac0158c0 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -17,8 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/spi/spi.h>
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
index e75b0d51f06a..21b0fa646c7d 100644
--- a/drivers/spi/spi-aspeed-smc.c
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -295,7 +295,7 @@ static const struct aspeed_spi_data ast2400_spi_data;
static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->master);
+ struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller);
struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(mem->spi, 0)];
u32 addr_mode, addr_mode_backup;
u32 ctl_val;
@@ -374,7 +374,7 @@ static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
static const char *aspeed_spi_get_name(struct spi_mem *mem)
{
- struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->master);
+ struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller);
struct device *dev = aspi->dev;
return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
@@ -553,7 +553,7 @@ static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip);
static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
- struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->master);
+ struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller);
struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)];
struct spi_mem_op *op = &desc->info.op_tmpl;
u32 ctl_val;
@@ -620,7 +620,7 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offset, size_t len, void *buf)
{
- struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->master);
+ struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller);
struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)];
/* Switch to USER command mode if mapping window is too small */
@@ -669,7 +669,7 @@ static void aspeed_spi_chip_enable(struct aspeed_spi *aspi, unsigned int cs, boo
static int aspeed_spi_setup(struct spi_device *spi)
{
- struct aspeed_spi *aspi = spi_controller_get_devdata(spi->master);
+ struct aspeed_spi *aspi = spi_controller_get_devdata(spi->controller);
const struct aspeed_spi_data *data = aspi->data;
unsigned int cs = spi_get_chipselect(spi, 0);
struct aspeed_spi_chip *chip = &aspi->chips[cs];
@@ -697,7 +697,7 @@ static int aspeed_spi_setup(struct spi_device *spi)
static void aspeed_spi_cleanup(struct spi_device *spi)
{
- struct aspeed_spi *aspi = spi_controller_get_devdata(spi->master);
+ struct aspeed_spi *aspi = spi_controller_get_devdata(spi->controller);
unsigned int cs = spi_get_chipselect(spi, 0);
aspeed_spi_chip_enable(aspi, cs, false);
@@ -726,7 +726,7 @@ static int aspeed_spi_probe(struct platform_device *pdev)
if (!data)
return -ENODEV;
- ctlr = devm_spi_alloc_master(dev, sizeof(*aspi));
+ ctlr = devm_spi_alloc_host(dev, sizeof(*aspi));
if (!ctlr)
return -ENOMEM;
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index 7854d9790fe9..b11d0f993cc7 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -486,10 +485,7 @@ static int at91_usart_gpio_setup(struct platform_device *pdev)
cs_gpios = devm_gpiod_get_array_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
- if (IS_ERR(cs_gpios))
- return PTR_ERR(cs_gpios);
-
- return 0;
+ return PTR_ERR_OR_ZERO(cs_gpios);
}
static int at91_usart_spi_probe(struct platform_device *pdev)
@@ -527,7 +523,7 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
controller->dev.of_node = pdev->dev.parent->of_node;
controller->bits_per_word_mask = SPI_BPW_MASK(8);
controller->setup = at91_usart_spi_setup;
- controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
controller->transfer_one = at91_usart_spi_transfer_one;
controller->prepare_message = at91_usart_spi_prepare_message;
controller->unprepare_message = at91_usart_spi_unprepare_message;
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index d3dd21386f12..1b6d977d111c 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -185,7 +185,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
host->use_gpio_descriptors = true;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- host->flags = SPI_MASTER_GPIO_SS;
+ host->flags = SPI_CONTROLLER_GPIO_SS;
host->num_chipselect = 3;
host->mem_ops = &ath79_mem_ops;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 152cd6773403..6aa8adbe4170 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1450,10 +1450,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs)
- return -ENXIO;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -1475,8 +1471,8 @@ static int atmel_spi_probe(struct platform_device *pdev)
host->bus_num = pdev->id;
host->num_chipselect = 4;
host->setup = atmel_spi_setup;
- host->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX |
- SPI_MASTER_GPIO_SS);
+ host->flags = (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX |
+ SPI_CONTROLLER_GPIO_SS);
host->transfer_one = atmel_spi_one_transfer;
host->set_cs = atmel_spi_set_cs;
host->cleanup = atmel_spi_cleanup;
@@ -1490,7 +1486,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
spin_lock_init(&as->lock);
as->pdev = pdev;
- as->regs = devm_ioremap_resource(&pdev->dev, regs);
+ as->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(as->regs)) {
ret = PTR_ERR(as->regs);
goto out_unmap_regs;
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index 0b57e6afce0f..1011b1a8f241 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -54,7 +54,7 @@ struct au1550_spi {
int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
irqreturn_t (*irq_callback)(struct au1550_spi *hw);
- struct completion master_done;
+ struct completion host_done;
unsigned int usedma;
u32 dma_tx_id;
@@ -66,7 +66,7 @@ struct au1550_spi {
unsigned int dma_rx_tmpbuf_size;
u32 dma_rx_tmpbuf_addr;
- struct spi_master *master;
+ struct spi_controller *host;
struct device *dev;
struct au1550_spi_info *pdata;
struct resource *ioarea;
@@ -159,7 +159,7 @@ static void au1550_spi_reset_fifos(struct au1550_spi *hw)
*/
static void au1550_spi_chipsel(struct spi_device *spi, int value)
{
- struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+ struct au1550_spi *hw = spi_controller_get_devdata(spi->controller);
unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
u32 cfg, stat;
@@ -219,7 +219,7 @@ static void au1550_spi_chipsel(struct spi_device *spi, int value)
static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
{
- struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+ struct au1550_spi *hw = spi_controller_get_devdata(spi->controller);
unsigned int bpw, hz;
u32 cfg, stat;
@@ -272,7 +272,7 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
* no reliable way how to recognize that spi transfer is done
* dma complete callbacks are called before real spi transfer is finished
* and if only tx dma channel is set up (and rx fifo overflow event masked)
- * spi master done event irq is not generated unless rx fifo is empty (emptied)
+ * spi host done event irq is not generated unless rx fifo is empty (emptied)
* so we need rx tmp buffer to use for rx dma if user does not provide one
*/
static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned int size)
@@ -303,7 +303,7 @@ static void au1550_spi_dma_rxtmp_free(struct au1550_spi *hw)
static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
{
- struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+ struct au1550_spi *hw = spi_controller_get_devdata(spi->controller);
dma_addr_t dma_tx_addr;
dma_addr_t dma_rx_addr;
u32 res;
@@ -387,7 +387,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
hw->regs->psc_spipcr = PSC_SPIPCR_MS;
wmb(); /* drain writebuffer */
- wait_for_completion(&hw->master_done);
+ wait_for_completion(&hw->host_done);
au1xxx_dbdma_stop(hw->dma_tx_ch);
au1xxx_dbdma_stop(hw->dma_rx_ch);
@@ -449,7 +449,7 @@ static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw)
"dma transfer: unexpected SPI error (event=0x%x stat=0x%x)!\n",
evnt, stat);
- complete(&hw->master_done);
+ complete(&hw->host_done);
return IRQ_HANDLED;
}
@@ -458,7 +458,7 @@ static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw)
au1550_spi_mask_ack_all(hw);
hw->rx_count = hw->len;
hw->tx_count = hw->len;
- complete(&hw->master_done);
+ complete(&hw->host_done);
}
return IRQ_HANDLED;
}
@@ -502,7 +502,7 @@ AU1550_SPI_TX_WORD(32, 0xffffff)
static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t)
{
u32 stat, mask;
- struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+ struct au1550_spi *hw = spi_controller_get_devdata(spi->controller);
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
@@ -537,7 +537,7 @@ static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t)
hw->regs->psc_spipcr = PSC_SPIPCR_MS;
wmb(); /* drain writebuffer */
- wait_for_completion(&hw->master_done);
+ wait_for_completion(&hw->host_done);
return min(hw->rx_count, hw->tx_count);
}
@@ -568,7 +568,7 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
dev_err(hw->dev,
"pio transfer: unexpected SPI error (event=0x%x stat=0x%x)!\n",
evnt, stat);
- complete(&hw->master_done);
+ complete(&hw->host_done);
return IRQ_HANDLED;
}
@@ -605,11 +605,11 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
/*
* Restart the SPI transmission in case of a transmit underflow.
* This seems to work despite the notes in the Au1550 data book
- * of Figure 8-4 with flowchart for SPI master operation:
+ * of Figure 8-4 with flowchart for SPI host operation:
*
* """Note 1: An XFR Error Interrupt occurs, unless masked,
* for any of the following events: Tx FIFO Underflow,
- * Rx FIFO Overflow, or Multiple-master Error
+ * Rx FIFO Overflow, or Multiple-host Error
* Note 2: In case of a Tx Underflow Error, all zeroes are
* transmitted."""
*
@@ -627,14 +627,14 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
if (hw->rx_count >= hw->len) {
/* transfer completed successfully */
au1550_spi_mask_ack_all(hw);
- complete(&hw->master_done);
+ complete(&hw->host_done);
}
return IRQ_HANDLED;
}
static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
- struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+ struct au1550_spi *hw = spi_controller_get_devdata(spi->controller);
return hw->txrx_bufs(spi, t);
}
@@ -723,24 +723,24 @@ static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
static int au1550_spi_probe(struct platform_device *pdev)
{
struct au1550_spi *hw;
- struct spi_master *master;
+ struct spi_controller *host;
struct resource *r;
int err = 0;
- master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi));
- if (master == NULL) {
- dev_err(&pdev->dev, "No memory for spi_master\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(struct au1550_spi));
+ if (host == NULL) {
+ dev_err(&pdev->dev, "No memory for spi_controller\n");
err = -ENOMEM;
goto err_nomem;
}
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 24);
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 24);
- hw = spi_master_get_devdata(master);
+ hw = spi_controller_get_devdata(host);
- hw->master = master;
+ hw->host = host;
hw->pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
@@ -798,9 +798,9 @@ static int au1550_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
- init_completion(&hw->master_done);
+ init_completion(&hw->host_done);
- hw->bitbang.master = hw->master;
+ hw->bitbang.master = hw->host;
hw->bitbang.setup_transfer = au1550_spi_setupxfer;
hw->bitbang.chipselect = au1550_spi_chipsel;
hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs;
@@ -858,8 +858,8 @@ static int au1550_spi_probe(struct platform_device *pdev)
goto err_no_irq;
}
- master->bus_num = pdev->id;
- master->num_chipselect = hw->pdata->num_chipselect;
+ host->bus_num = pdev->id;
+ host->num_chipselect = hw->pdata->num_chipselect;
/*
* precompute valid range for spi freq - from au1550 datasheet:
@@ -874,8 +874,8 @@ static int au1550_spi_probe(struct platform_device *pdev)
int min_div = (2 << 0) * (2 * (4 + 1));
int max_div = (2 << 3) * (2 * (63 + 1));
- master->max_speed_hz = hw->pdata->mainclk_hz / min_div;
- master->min_speed_hz =
+ host->max_speed_hz = hw->pdata->mainclk_hz / min_div;
+ host->min_speed_hz =
hw->pdata->mainclk_hz / (max_div + 1) + 1;
}
@@ -883,13 +883,13 @@ static int au1550_spi_probe(struct platform_device *pdev)
err = spi_bitbang_start(&hw->bitbang);
if (err) {
- dev_err(&pdev->dev, "Failed to register SPI master\n");
+ dev_err(&pdev->dev, "Failed to register SPI host\n");
goto err_register;
}
dev_info(&pdev->dev,
- "spi master registered: bus_num=%d num_chipselect=%d\n",
- master->bus_num, master->num_chipselect);
+ "spi host registered: bus_num=%d num_chipselect=%d\n",
+ host->bus_num, host->num_chipselect);
return 0;
@@ -917,7 +917,7 @@ err_ioremap:
err_no_iores:
err_no_pdata:
- spi_master_put(hw->master);
+ spi_controller_put(hw->host);
err_nomem:
return err;
@@ -927,8 +927,8 @@ static void au1550_spi_remove(struct platform_device *pdev)
{
struct au1550_spi *hw = platform_get_drvdata(pdev);
- dev_info(&pdev->dev, "spi master remove: bus_num=%d\n",
- hw->master->bus_num);
+ dev_info(&pdev->dev, "spi host remove: bus_num=%d\n",
+ hw->host->bus_num);
spi_bitbang_stop(&hw->bitbang);
free_irq(hw->irq, hw);
@@ -941,7 +941,7 @@ static void au1550_spi_remove(struct platform_device *pdev)
au1xxx_dbdma_chan_free(hw->dma_tx_ch);
}
- spi_master_put(hw->master);
+ spi_controller_put(hw->host);
}
/* work with hotplug and coldplug */
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 89661f3b0d44..0258c9a72fdc 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -356,8 +356,8 @@ static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
static irqreturn_t spi_engine_irq(int irq, void *devid)
{
- struct spi_master *master = devid;
- struct spi_engine *spi_engine = spi_master_get_devdata(master);
+ struct spi_controller *host = devid;
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
unsigned int disable_int = 0;
unsigned int pending;
@@ -396,7 +396,7 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
msg->status = 0;
msg->actual_length = msg->frame_length;
spi_engine->msg = NULL;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
disable_int |= SPI_ENGINE_INT_SYNC;
}
}
@@ -412,11 +412,11 @@ static irqreturn_t spi_engine_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-static int spi_engine_transfer_one_message(struct spi_master *master,
+static int spi_engine_transfer_one_message(struct spi_controller *host,
struct spi_message *msg)
{
struct spi_engine_program p_dry, *p;
- struct spi_engine *spi_engine = spi_master_get_devdata(master);
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
unsigned int int_enable = 0;
unsigned long flags;
size_t size;
@@ -464,42 +464,42 @@ static int spi_engine_transfer_one_message(struct spi_master *master,
static int spi_engine_probe(struct platform_device *pdev)
{
struct spi_engine *spi_engine;
- struct spi_master *master;
+ struct spi_controller *host;
unsigned int version;
int irq;
int ret;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -ENXIO;
+ if (irq < 0)
+ return irq;
spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
if (!spi_engine)
return -ENOMEM;
- master = spi_alloc_master(&pdev->dev, 0);
- if (!master)
+ host = spi_alloc_host(&pdev->dev, 0);
+ if (!host)
return -ENOMEM;
- spi_master_set_devdata(master, spi_engine);
+ spi_controller_set_devdata(host, spi_engine);
spin_lock_init(&spi_engine->lock);
spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
if (IS_ERR(spi_engine->clk)) {
ret = PTR_ERR(spi_engine->clk);
- goto err_put_master;
+ goto err_put_host;
}
spi_engine->ref_clk = devm_clk_get(&pdev->dev, "spi_clk");
if (IS_ERR(spi_engine->ref_clk)) {
ret = PTR_ERR(spi_engine->ref_clk);
- goto err_put_master;
+ goto err_put_host;
}
ret = clk_prepare_enable(spi_engine->clk);
if (ret)
- goto err_put_master;
+ goto err_put_host;
ret = clk_prepare_enable(spi_engine->ref_clk);
if (ret)
@@ -525,46 +525,46 @@ static int spi_engine_probe(struct platform_device *pdev)
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
- ret = request_irq(irq, spi_engine_irq, 0, pdev->name, master);
+ ret = request_irq(irq, spi_engine_irq, 0, pdev->name, host);
if (ret)
goto err_ref_clk_disable;
- master->dev.of_node = pdev->dev.of_node;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
- master->transfer_one_message = spi_engine_transfer_one_message;
- master->num_chipselect = 8;
+ host->dev.of_node = pdev->dev.of_node;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
+ host->transfer_one_message = spi_engine_transfer_one_message;
+ host->num_chipselect = 8;
- ret = spi_register_master(master);
+ ret = spi_register_controller(host);
if (ret)
goto err_free_irq;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
return 0;
err_free_irq:
- free_irq(irq, master);
+ free_irq(irq, host);
err_ref_clk_disable:
clk_disable_unprepare(spi_engine->ref_clk);
err_clk_disable:
clk_disable_unprepare(spi_engine->clk);
-err_put_master:
- spi_master_put(master);
+err_put_host:
+ spi_controller_put(host);
return ret;
}
static void spi_engine_remove(struct platform_device *pdev)
{
- struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
- struct spi_engine *spi_engine = spi_master_get_devdata(master);
+ struct spi_controller *host = spi_controller_get(platform_get_drvdata(pdev));
+ struct spi_engine *spi_engine = spi_controller_get_devdata(host);
int irq = platform_get_irq(pdev, 0);
- spi_unregister_master(master);
+ spi_unregister_controller(host);
- free_irq(irq, master);
+ free_irq(irq, host);
- spi_master_put(master);
+ spi_controller_put(host);
writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index d91dfbe47aa5..ef08fcac2f6d 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -220,7 +220,7 @@ struct qspi_trans {
struct bcm_qspi {
struct platform_device *pdev;
- struct spi_master *master;
+ struct spi_controller *host;
struct clk *clk;
u32 base_clk;
u32 max_speed_hz;
@@ -732,7 +732,7 @@ static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi *qspi,
struct qspi_trans *qt)
{
if (qt->mspi_last_trans &&
- spi_transfer_is_last(qspi->master, qt->trans))
+ spi_transfer_is_last(qspi->host, qt->trans))
return true;
else
return false;
@@ -979,7 +979,7 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
MSPI_CDRAM_BITSE_BIT);
- /* set 3wrire halfduplex mode data from master to slave */
+ /* set 3wrire halfduplex mode data from host to target */
if ((spi->mode & SPI_3WIRE) && tp.trans->tx_buf)
mspi_cdram |= MSPI_CDRAM_OUTP;
@@ -1035,7 +1035,7 @@ done:
static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
const struct spi_mem_op *op)
{
- struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct bcm_qspi *qspi = spi_controller_get_devdata(spi->controller);
u32 addr = 0, len, rdlen, len_words, from = 0;
int ret = 0;
unsigned long timeo = msecs_to_jiffies(100);
@@ -1118,11 +1118,11 @@ static int bcm_qspi_bspi_exec_mem_op(struct spi_device *spi,
return ret;
}
-static int bcm_qspi_transfer_one(struct spi_master *master,
+static int bcm_qspi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *trans)
{
- struct bcm_qspi *qspi = spi_master_get_devdata(master);
+ struct bcm_qspi *qspi = spi_controller_get_devdata(host);
int slots;
unsigned long timeo = msecs_to_jiffies(100);
@@ -1150,8 +1150,8 @@ static int bcm_qspi_transfer_one(struct spi_master *master,
static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
const struct spi_mem_op *op)
{
- struct spi_master *master = spi->master;
- struct bcm_qspi *qspi = spi_master_get_devdata(master);
+ struct spi_controller *host = spi->controller;
+ struct bcm_qspi *qspi = spi_controller_get_devdata(host);
struct spi_transfer t[2];
u8 cmd[6] = { };
int ret, i;
@@ -1171,7 +1171,7 @@ static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
t[0].tx_nbits = op->cmd.buswidth;
/* lets mspi know that this is not last transfer */
qspi->trans_pos.mspi_last_trans = false;
- ret = bcm_qspi_transfer_one(master, spi, &t[0]);
+ ret = bcm_qspi_transfer_one(host, spi, &t[0]);
/* rx */
qspi->trans_pos.mspi_last_trans = true;
@@ -1181,7 +1181,7 @@ static int bcm_qspi_mspi_exec_mem_op(struct spi_device *spi,
t[1].len = op->data.nbytes;
t[1].rx_nbits = op->data.buswidth;
t[1].bits_per_word = spi->bits_per_word;
- ret = bcm_qspi_transfer_one(master, spi, &t[1]);
+ ret = bcm_qspi_transfer_one(host, spi, &t[1]);
}
return ret;
@@ -1191,7 +1191,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct spi_device *spi = mem->spi;
- struct bcm_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct bcm_qspi *qspi = spi_controller_get_devdata(spi->controller);
int ret = 0;
bool mspi_read = false;
u32 addr = 0, len;
@@ -1486,7 +1486,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
const struct bcm_qspi_data *data;
struct device *dev = &pdev->dev;
struct bcm_qspi *qspi;
- struct spi_master *master;
+ struct spi_controller *host;
struct resource *res;
int irq, ret = 0, num_ints = 0;
u32 val;
@@ -1504,13 +1504,13 @@ int bcm_qspi_probe(struct platform_device *pdev,
data = of_id->data;
- master = devm_spi_alloc_master(dev, sizeof(struct bcm_qspi));
- if (!master) {
- dev_err(dev, "error allocating spi_master\n");
+ host = devm_spi_alloc_host(dev, sizeof(struct bcm_qspi));
+ if (!host) {
+ dev_err(dev, "error allocating spi_controller\n");
return -ENOMEM;
}
- qspi = spi_master_get_devdata(master);
+ qspi = spi_controller_get_devdata(host);
qspi->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(qspi->clk))
@@ -1520,23 +1520,23 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->trans_pos.trans = NULL;
qspi->trans_pos.byte = 0;
qspi->trans_pos.mspi_last_trans = true;
- qspi->master = master;
+ qspi->host = host;
- master->bus_num = -1;
- master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD |
+ host->bus_num = -1;
+ host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD |
SPI_3WIRE;
- master->setup = bcm_qspi_setup;
- master->transfer_one = bcm_qspi_transfer_one;
- master->mem_ops = &bcm_qspi_mem_ops;
- master->cleanup = bcm_qspi_cleanup;
- master->dev.of_node = dev->of_node;
- master->num_chipselect = NUM_CHIPSELECT;
- master->use_gpio_descriptors = true;
+ host->setup = bcm_qspi_setup;
+ host->transfer_one = bcm_qspi_transfer_one;
+ host->mem_ops = &bcm_qspi_mem_ops;
+ host->cleanup = bcm_qspi_cleanup;
+ host->dev.of_node = dev->of_node;
+ host->num_chipselect = NUM_CHIPSELECT;
+ host->use_gpio_descriptors = true;
qspi->big_endian = of_device_is_big_endian(dev->of_node);
if (!of_property_read_u32(dev->of_node, "num-cs", &val))
- master->num_chipselect = val;
+ host->num_chipselect = val;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hif_mspi");
if (!res)
@@ -1659,9 +1659,9 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->xfer_mode.addrlen = -1;
qspi->xfer_mode.hp = -1;
- ret = spi_register_master(master);
+ ret = spi_register_controller(host);
if (ret < 0) {
- dev_err(dev, "can't register master\n");
+ dev_err(dev, "can't register host\n");
goto qspi_reg_err;
}
@@ -1682,7 +1682,7 @@ void bcm_qspi_remove(struct platform_device *pdev)
{
struct bcm_qspi *qspi = platform_get_drvdata(pdev);
- spi_unregister_master(qspi->master);
+ spi_unregister_controller(qspi->host);
bcm_qspi_hw_uninit(qspi);
clk_disable_unprepare(qspi->clk);
kfree(qspi->dev_ids);
@@ -1700,7 +1700,7 @@ static int __maybe_unused bcm_qspi_suspend(struct device *dev)
qspi->s3_strap_override_ctrl =
bcm_qspi_read(qspi, BSPI, BSPI_STRAP_OVERRIDE_CTRL);
- spi_master_suspend(qspi->master);
+ spi_controller_suspend(qspi->host);
clk_disable_unprepare(qspi->clk);
bcm_qspi_hw_uninit(qspi);
@@ -1721,7 +1721,7 @@ static int __maybe_unused bcm_qspi_resume(struct device *dev)
ret = clk_prepare_enable(qspi->clk);
if (!ret)
- spi_master_resume(qspi->master);
+ spi_controller_resume(qspi->host);
return ret;
}
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 3b253da98c05..e7bb2714678a 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -24,7 +24,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/machine.h> /* FIXME: using chip internals */
#include <linux/gpio/driver.h> /* FIXME: using chip internals */
@@ -105,7 +105,7 @@ MODULE_PARM_DESC(polling_limit_us,
* These are counted as well in @count_transfer_polling and
* @count_transfer_irq
* @count_transfer_dma: count how often dma mode is used
- * @slv: SPI slave currently selected
+ * @target: SPI target currently selected
* (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs)
* @tx_dma_active: whether a TX DMA descriptor is in progress
* @rx_dma_active: whether a RX DMA descriptor is in progress
@@ -135,7 +135,7 @@ struct bcm2835_spi {
u64 count_transfer_irq_after_polling;
u64 count_transfer_dma;
- struct bcm2835_spidev *slv;
+ struct bcm2835_spidev *target;
unsigned int tx_dma_active;
unsigned int rx_dma_active;
struct dma_async_tx_descriptor *fill_tx_desc;
@@ -143,14 +143,14 @@ struct bcm2835_spi {
};
/**
- * struct bcm2835_spidev - BCM2835 SPI slave
+ * struct bcm2835_spidev - BCM2835 SPI target
* @prepare_cs: precalculated CS register value for ->prepare_message()
- * (uses slave-specific clock polarity and phase settings)
+ * (uses target-specific clock polarity and phase settings)
* @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
* (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
* @clear_rx_addr: bus address of @clear_rx_cs
* @clear_rx_cs: precalculated CS register value to clear RX FIFO
- * (uses slave-specific clock polarity and phase settings)
+ * (uses target-specific clock polarity and phase settings)
*/
struct bcm2835_spidev {
u32 prepare_cs;
@@ -434,7 +434,7 @@ static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr,
/**
* bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
- * @ctlr: SPI master controller
+ * @ctlr: SPI host controller
* @tfr: SPI transfer
* @bs: BCM2835 SPI controller
* @cs: CS register
@@ -596,7 +596,7 @@ out:
/**
* bcm2835_spi_dma_rx_done() - callback for DMA RX channel
- * @data: SPI master controller
+ * @data: SPI host controller
*
* Used for bidirectional and RX-only transfers.
*/
@@ -624,7 +624,7 @@ static void bcm2835_spi_dma_rx_done(void *data)
/**
* bcm2835_spi_dma_tx_done() - callback for DMA TX channel
- * @data: SPI master controller
+ * @data: SPI host controller
*
* Used for TX-only transfers.
*/
@@ -635,7 +635,7 @@ static void bcm2835_spi_dma_tx_done(void *data)
/* busy-wait for TX FIFO to empty */
while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
- bcm2835_wr(bs, BCM2835_SPI_CS, bs->slv->clear_rx_cs);
+ bcm2835_wr(bs, BCM2835_SPI_CS, bs->target->clear_rx_cs);
bs->tx_dma_active = false;
smp_wmb();
@@ -655,10 +655,10 @@ static void bcm2835_spi_dma_tx_done(void *data)
/**
* bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
- * @ctlr: SPI master controller
+ * @ctlr: SPI host controller
* @tfr: SPI transfer
* @bs: BCM2835 SPI controller
- * @slv: BCM2835 SPI slave
+ * @target: BCM2835 SPI target
* @is_tx: whether to submit DMA descriptor for TX or RX sglist
*
* Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
@@ -667,7 +667,7 @@ static void bcm2835_spi_dma_tx_done(void *data)
static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
struct spi_transfer *tfr,
struct bcm2835_spi *bs,
- struct bcm2835_spidev *slv,
+ struct bcm2835_spidev *target,
bool is_tx)
{
struct dma_chan *chan;
@@ -707,7 +707,7 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
} else if (!tfr->rx_buf) {
desc->callback = bcm2835_spi_dma_tx_done;
desc->callback_param = ctlr;
- bs->slv = slv;
+ bs->target = target;
}
/* submit it to DMA-engine */
@@ -718,9 +718,9 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
/**
* bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
- * @ctlr: SPI master controller
+ * @ctlr: SPI host controller
* @tfr: SPI transfer
- * @slv: BCM2835 SPI slave
+ * @target: BCM2835 SPI target
* @cs: CS register
*
* For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
@@ -732,7 +732,7 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
* clear the RX FIFO by setting the CLEAR_RX bit in the CS register.
*
* The CS register value is precalculated in bcm2835_spi_setup(). Normally
- * this is called only once, on slave registration. A DMA descriptor to write
+ * this is called only once, on target registration. A DMA descriptor to write
* this value is preallocated in bcm2835_dma_init(). All that's left to do
* when performing a TX-only transfer is to submit this descriptor to the RX
* DMA channel. Latency is thereby minimized. The descriptor does not
@@ -765,7 +765,7 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
*/
static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
struct spi_transfer *tfr,
- struct bcm2835_spidev *slv,
+ struct bcm2835_spidev *target,
u32 cs)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
@@ -783,7 +783,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
/* setup tx-DMA */
if (bs->tx_buf) {
- ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, true);
+ ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, target, true);
} else {
cookie = dmaengine_submit(bs->fill_tx_desc);
ret = dma_submit_error(cookie);
@@ -809,9 +809,9 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
* this saves 10us or more.
*/
if (bs->rx_buf) {
- ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, slv, false);
+ ret = bcm2835_spi_prepare_sg(ctlr, tfr, bs, target, false);
} else {
- cookie = dmaengine_submit(slv->clear_rx_desc);
+ cookie = dmaengine_submit(target->clear_rx_desc);
ret = dma_submit_error(cookie);
}
if (ret) {
@@ -903,15 +903,15 @@ static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
/* get tx/rx dma */
ctlr->dma_tx = dma_request_chan(dev, "tx");
if (IS_ERR(ctlr->dma_tx)) {
- dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
- ret = PTR_ERR(ctlr->dma_tx);
+ ret = dev_err_probe(dev, PTR_ERR(ctlr->dma_tx),
+ "no tx-dma configuration found - not using dma mode\n");
ctlr->dma_tx = NULL;
goto err;
}
ctlr->dma_rx = dma_request_chan(dev, "rx");
if (IS_ERR(ctlr->dma_rx)) {
- dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
- ret = PTR_ERR(ctlr->dma_rx);
+ ret = dev_err_probe(dev, PTR_ERR(ctlr->dma_rx),
+ "no rx-dma configuration found - not using dma mode\n");
ctlr->dma_rx = NULL;
goto err_release;
}
@@ -1050,10 +1050,10 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
struct spi_transfer *tfr)
{
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
- struct bcm2835_spidev *slv = spi_get_ctldata(spi);
+ struct bcm2835_spidev *target = spi_get_ctldata(spi);
unsigned long spi_hz, cdiv;
unsigned long hz_per_byte, byte_limit;
- u32 cs = slv->prepare_cs;
+ u32 cs = target->prepare_cs;
/* set clock */
spi_hz = tfr->speed_hz;
@@ -1101,7 +1101,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
* this 1 idle clock cycle pattern but runs the spi clock without gaps
*/
if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
- return bcm2835_spi_transfer_one_dma(ctlr, tfr, slv, cs);
+ return bcm2835_spi_transfer_one_dma(ctlr, tfr, target, cs);
/* run in interrupt-mode */
return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
@@ -1112,7 +1112,7 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
{
struct spi_device *spi = msg->spi;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
- struct bcm2835_spidev *slv = spi_get_ctldata(spi);
+ struct bcm2835_spidev *target = spi_get_ctldata(spi);
int ret;
if (ctlr->can_dma) {
@@ -1131,7 +1131,7 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
* Set up clock polarity before spi_transfer_one_message() asserts
* chip select to avoid a gratuitous clock signal edge.
*/
- bcm2835_wr(bs, BCM2835_SPI_CS, slv->prepare_cs);
+ bcm2835_wr(bs, BCM2835_SPI_CS, target->prepare_cs);
return 0;
}
@@ -1163,51 +1163,51 @@ static int chip_match_name(struct gpio_chip *chip, void *data)
static void bcm2835_spi_cleanup(struct spi_device *spi)
{
- struct bcm2835_spidev *slv = spi_get_ctldata(spi);
+ struct bcm2835_spidev *target = spi_get_ctldata(spi);
struct spi_controller *ctlr = spi->controller;
- if (slv->clear_rx_desc)
- dmaengine_desc_free(slv->clear_rx_desc);
+ if (target->clear_rx_desc)
+ dmaengine_desc_free(target->clear_rx_desc);
- if (slv->clear_rx_addr)
+ if (target->clear_rx_addr)
dma_unmap_single(ctlr->dma_rx->device->dev,
- slv->clear_rx_addr,
+ target->clear_rx_addr,
sizeof(u32),
DMA_TO_DEVICE);
- kfree(slv);
+ kfree(target);
}
static int bcm2835_spi_setup_dma(struct spi_controller *ctlr,
struct spi_device *spi,
struct bcm2835_spi *bs,
- struct bcm2835_spidev *slv)
+ struct bcm2835_spidev *target)
{
int ret;
if (!ctlr->dma_rx)
return 0;
- slv->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
- &slv->clear_rx_cs,
- sizeof(u32),
- DMA_TO_DEVICE);
- if (dma_mapping_error(ctlr->dma_rx->device->dev, slv->clear_rx_addr)) {
+ target->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
+ &target->clear_rx_cs,
+ sizeof(u32),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctlr->dma_rx->device->dev, target->clear_rx_addr)) {
dev_err(&spi->dev, "cannot map clear_rx_cs\n");
- slv->clear_rx_addr = 0;
+ target->clear_rx_addr = 0;
return -ENOMEM;
}
- slv->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
- slv->clear_rx_addr,
- sizeof(u32), 0,
- DMA_MEM_TO_DEV, 0);
- if (!slv->clear_rx_desc) {
+ target->clear_rx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
+ target->clear_rx_addr,
+ sizeof(u32), 0,
+ DMA_MEM_TO_DEV, 0);
+ if (!target->clear_rx_desc) {
dev_err(&spi->dev, "cannot prepare clear_rx_desc\n");
return -ENOMEM;
}
- ret = dmaengine_desc_set_reuse(slv->clear_rx_desc);
+ ret = dmaengine_desc_set_reuse(target->clear_rx_desc);
if (ret) {
dev_err(&spi->dev, "cannot reuse clear_rx_desc\n");
return ret;
@@ -1220,26 +1220,26 @@ static int bcm2835_spi_setup(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
- struct bcm2835_spidev *slv = spi_get_ctldata(spi);
+ struct bcm2835_spidev *target = spi_get_ctldata(spi);
struct gpio_chip *chip;
int ret;
u32 cs;
- if (!slv) {
- slv = kzalloc(ALIGN(sizeof(*slv), dma_get_cache_alignment()),
+ if (!target) {
+ target = kzalloc(ALIGN(sizeof(*target), dma_get_cache_alignment()),
GFP_KERNEL);
- if (!slv)
+ if (!target)
return -ENOMEM;
- spi_set_ctldata(spi, slv);
+ spi_set_ctldata(spi, target);
- ret = bcm2835_spi_setup_dma(ctlr, spi, bs, slv);
+ ret = bcm2835_spi_setup_dma(ctlr, spi, bs, target);
if (ret)
goto err_cleanup;
}
/*
- * Precalculate SPI slave's CS register value for ->prepare_message():
+ * Precalculate SPI target's CS register value for ->prepare_message():
* The driver always uses software-controlled GPIO chip select, hence
* set the hardware-controlled native chip select to an invalid value
* to prevent it from interfering.
@@ -1249,18 +1249,18 @@ static int bcm2835_spi_setup(struct spi_device *spi)
cs |= BCM2835_SPI_CS_CPOL;
if (spi->mode & SPI_CPHA)
cs |= BCM2835_SPI_CS_CPHA;
- slv->prepare_cs = cs;
+ target->prepare_cs = cs;
/*
- * Precalculate SPI slave's CS register value to clear RX FIFO
+ * Precalculate SPI target's CS register value to clear RX FIFO
* in case of a TX-only DMA transfer.
*/
if (ctlr->dma_rx) {
- slv->clear_rx_cs = cs | BCM2835_SPI_CS_TA |
+ target->clear_rx_cs = cs | BCM2835_SPI_CS_TA |
BCM2835_SPI_CS_DMAEN |
BCM2835_SPI_CS_CLEAR_RX;
dma_sync_single_for_device(ctlr->dma_rx->device->dev,
- slv->clear_rx_addr,
+ target->clear_rx_addr,
sizeof(u32),
DMA_TO_DEVICE);
}
@@ -1328,7 +1328,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
struct bcm2835_spi *bs;
int err;
- ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
+ ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*bs));
if (!ctlr)
return -ENOMEM;
@@ -1360,10 +1360,12 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
ctlr->max_speed_hz = clk_get_rate(bs->clk) / 2;
bs->irq = platform_get_irq(pdev, 0);
- if (bs->irq <= 0)
- return bs->irq ? bs->irq : -ENODEV;
+ if (bs->irq < 0)
+ return bs->irq;
- clk_prepare_enable(bs->clk);
+ err = clk_prepare_enable(bs->clk);
+ if (err)
+ return err;
bs->clk_hz = clk_get_rate(bs->clk);
err = bcm2835_dma_init(ctlr, &pdev->dev, bs);
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 288f7b994b36..6d2a5d9f2498 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -20,9 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
@@ -233,8 +231,8 @@ static void bcm2835aux_spi_transfer_helper(struct bcm2835aux_spi *bs)
static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_id;
+ struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
/* IRQ may be shared, so return if our interrupts are disabled */
if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
@@ -253,17 +251,17 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
/* and if rx_len is 0 then disable interrupts and wake up completion */
if (!bs->rx_len) {
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
}
return IRQ_HANDLED;
}
-static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
+static int __bcm2835aux_spi_transfer_one_irq(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *tfr)
{
- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
/* enable interrupts */
bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1] |
@@ -274,11 +272,11 @@ static int __bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
return 1;
}
-static int bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
+static int bcm2835aux_spi_transfer_one_irq(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *tfr)
{
- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
/* update statistics */
bs->count_transfer_irq++;
@@ -296,14 +294,14 @@ static int bcm2835aux_spi_transfer_one_irq(struct spi_master *master,
}
/* now run the interrupt mode */
- return __bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
+ return __bcm2835aux_spi_transfer_one_irq(host, spi, tfr);
}
-static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
+static int bcm2835aux_spi_transfer_one_poll(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *tfr)
{
- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
unsigned long timeout;
/* update statistics */
@@ -330,7 +328,7 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
bs->tx_len, bs->rx_len);
/* forward to interrupt handler */
bs->count_transfer_irq_after_poll++;
- return __bcm2835aux_spi_transfer_one_irq(master,
+ return __bcm2835aux_spi_transfer_one_irq(host,
spi, tfr);
}
}
@@ -339,11 +337,11 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
return 0;
}
-static int bcm2835aux_spi_transfer_one(struct spi_master *master,
+static int bcm2835aux_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *tfr)
{
- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
unsigned long spi_hz, clk_hz, speed;
unsigned long hz_per_byte, byte_limit;
@@ -394,17 +392,17 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
/* run in polling mode for short transfers */
if (tfr->len < byte_limit)
- return bcm2835aux_spi_transfer_one_poll(master, spi, tfr);
+ return bcm2835aux_spi_transfer_one_poll(host, spi, tfr);
/* run in interrupt mode for all others */
- return bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
+ return bcm2835aux_spi_transfer_one_irq(host, spi, tfr);
}
-static int bcm2835aux_spi_prepare_message(struct spi_master *master,
+static int bcm2835aux_spi_prepare_message(struct spi_controller *host,
struct spi_message *msg)
{
struct spi_device *spi = msg->spi;
- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE |
BCM2835_AUX_SPI_CNTL0_VAR_WIDTH |
@@ -424,20 +422,20 @@ static int bcm2835aux_spi_prepare_message(struct spi_master *master,
return 0;
}
-static int bcm2835aux_spi_unprepare_message(struct spi_master *master,
+static int bcm2835aux_spi_unprepare_message(struct spi_controller *host,
struct spi_message *msg)
{
- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
bcm2835aux_spi_reset_hw(bs);
return 0;
}
-static void bcm2835aux_spi_handle_err(struct spi_master *master,
+static void bcm2835aux_spi_handle_err(struct spi_controller *host,
struct spi_message *msg)
{
- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
bcm2835aux_spi_reset_hw(bs);
}
@@ -475,18 +473,18 @@ static int bcm2835aux_spi_setup(struct spi_device *spi)
static int bcm2835aux_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct bcm2835aux_spi *bs;
unsigned long clk_hz;
int err;
- master = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
- if (!master)
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(*bs));
+ if (!host)
return -ENOMEM;
- platform_set_drvdata(pdev, master);
- master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
- master->bits_per_word_mask = SPI_BPW_MASK(8);
+ platform_set_drvdata(pdev, host);
+ host->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
/* even though the driver never officially supported native CS
* allow a single native CS for legacy DT support purposes when
* no cs-gpio is configured.
@@ -498,16 +496,16 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
* * cs_delay_usec: cs is always deasserted one SCK cycle after
* a spi_transfer
*/
- master->num_chipselect = 1;
- master->setup = bcm2835aux_spi_setup;
- master->transfer_one = bcm2835aux_spi_transfer_one;
- master->handle_err = bcm2835aux_spi_handle_err;
- master->prepare_message = bcm2835aux_spi_prepare_message;
- master->unprepare_message = bcm2835aux_spi_unprepare_message;
- master->dev.of_node = pdev->dev.of_node;
- master->use_gpio_descriptors = true;
+ host->num_chipselect = 1;
+ host->setup = bcm2835aux_spi_setup;
+ host->transfer_one = bcm2835aux_spi_transfer_one;
+ host->handle_err = bcm2835aux_spi_handle_err;
+ host->prepare_message = bcm2835aux_spi_prepare_message;
+ host->unprepare_message = bcm2835aux_spi_unprepare_message;
+ host->dev.of_node = pdev->dev.of_node;
+ host->use_gpio_descriptors = true;
- bs = spi_master_get_devdata(master);
+ bs = spi_controller_get_devdata(host);
/* the main area */
bs->regs = devm_platform_ioremap_resource(pdev, 0);
@@ -522,8 +520,8 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
}
bs->irq = platform_get_irq(pdev, 0);
- if (bs->irq <= 0)
- return bs->irq ? bs->irq : -ENODEV;
+ if (bs->irq < 0)
+ return bs->irq;
/* this also enables the HW block */
err = clk_prepare_enable(bs->clk);
@@ -546,15 +544,15 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, bs->irq,
bcm2835aux_spi_interrupt,
IRQF_SHARED,
- dev_name(&pdev->dev), master);
+ dev_name(&pdev->dev), host);
if (err) {
dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
goto out_clk_disable;
}
- err = spi_register_master(master);
+ err = spi_register_controller(host);
if (err) {
- dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
+ dev_err(&pdev->dev, "could not register SPI host: %d\n", err);
goto out_clk_disable;
}
@@ -569,12 +567,12 @@ out_clk_disable:
static void bcm2835aux_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct bcm2835aux_spi *bs = spi_controller_get_devdata(host);
bcm2835aux_debugfs_remove(bs);
- spi_unregister_master(master);
+ spi_unregister_controller(host);
bcm2835aux_spi_reset_hw(bs);
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 9e218e143263..1ca857c2a4aa 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -149,7 +149,7 @@ static ssize_t wait_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(ctrl);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(ctrl);
return sprintf(buf, "%d\n", bs->wait_mode);
}
@@ -158,7 +158,7 @@ static ssize_t wait_mode_store(struct device *dev, struct device_attribute *attr
const char *buf, size_t count)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(ctrl);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(ctrl);
u32 val;
if (kstrtou32(buf, 10, &val))
@@ -185,7 +185,7 @@ static ssize_t xfer_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(ctrl);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(ctrl);
return sprintf(buf, "%d\n", bs->xfer_mode);
}
@@ -194,7 +194,7 @@ static ssize_t xfer_mode_store(struct device *dev, struct device_attribute *attr
const char *buf, size_t count)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(ctrl);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(ctrl);
u32 val;
if (kstrtou32(buf, 10, &val))
@@ -262,12 +262,12 @@ static int bcm63xx_hsspi_wait_cmd(struct bcm63xx_hsspi *bs)
return rc;
}
-static bool bcm63xx_prepare_prepend_transfer(struct spi_master *master,
+static bool bcm63xx_prepare_prepend_transfer(struct spi_controller *host,
struct spi_message *msg,
struct spi_transfer *t_prepend)
{
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(host);
bool tx_only = false;
struct spi_transfer *t;
@@ -348,7 +348,7 @@ static bool bcm63xx_prepare_prepend_transfer(struct spi_master *master,
static int bcm63xx_hsspi_do_prepend_txrx(struct spi_device *spi,
struct spi_transfer *t)
{
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(spi->controller);
unsigned int chip_select = spi_get_chipselect(spi, 0);
u16 opcode = 0, val;
const u8 *tx = t->tx_buf;
@@ -467,7 +467,7 @@ static void bcm63xx_hsspi_set_clk(struct bcm63xx_hsspi *bs,
static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
{
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(spi->controller);
unsigned int chip_select = spi_get_chipselect(spi, 0);
u16 opcode = 0, val;
int pending = t->len;
@@ -541,7 +541,7 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
static int bcm63xx_hsspi_setup(struct spi_device *spi)
{
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(spi->controller);
u32 reg;
reg = __raw_readl(bs->regs +
@@ -579,7 +579,7 @@ static int bcm63xx_hsspi_setup(struct spi_device *spi)
static int bcm63xx_hsspi_do_dummy_cs_txrx(struct spi_device *spi,
struct spi_message *msg)
{
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(spi->controller);
int status = -EINVAL;
int dummy_cs;
bool keep_cs = false;
@@ -653,10 +653,10 @@ static int bcm63xx_hsspi_do_dummy_cs_txrx(struct spi_device *spi,
return status;
}
-static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
+static int bcm63xx_hsspi_transfer_one(struct spi_controller *host,
struct spi_message *msg)
{
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
int status = -EINVAL;
bool prependable = false;
@@ -665,7 +665,7 @@ static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
mutex_lock(&bs->msg_mutex);
if (bs->xfer_mode != HSSPI_XFER_MODE_DUMMYCS)
- prependable = bcm63xx_prepare_prepend_transfer(master, msg, &t_prepend);
+ prependable = bcm63xx_prepare_prepend_transfer(host, msg, &t_prepend);
if (prependable) {
status = bcm63xx_hsspi_do_prepend_txrx(spi, &t_prepend);
@@ -681,7 +681,7 @@ static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
mutex_unlock(&bs->msg_mutex);
msg->status = status;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return 0;
}
@@ -723,7 +723,7 @@ static irqreturn_t bcm63xx_hsspi_interrupt(int irq, void *dev_id)
static int bcm63xx_hsspi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct bcm63xx_hsspi *bs;
void __iomem *regs;
struct device *dev = &pdev->dev;
@@ -779,13 +779,13 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
}
}
- master = spi_alloc_master(&pdev->dev, sizeof(*bs));
- if (!master) {
+ host = spi_alloc_host(&pdev->dev, sizeof(*bs));
+ if (!host) {
ret = -ENOMEM;
goto out_disable_pll_clk;
}
- bs = spi_master_get_devdata(master);
+ bs = spi_controller_get_devdata(host);
bs->pdev = pdev;
bs->clk = clk;
bs->pll_clk = pll_clk;
@@ -796,17 +796,17 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
bs->prepend_buf = devm_kzalloc(dev, HSSPI_BUFFER_LEN, GFP_KERNEL);
if (!bs->prepend_buf) {
ret = -ENOMEM;
- goto out_put_master;
+ goto out_put_host;
}
mutex_init(&bs->bus_mutex);
mutex_init(&bs->msg_mutex);
init_completion(&bs->done);
- master->mem_ops = &bcm63xx_hsspi_mem_ops;
- master->dev.of_node = dev->of_node;
+ host->mem_ops = &bcm63xx_hsspi_mem_ops;
+ host->dev.of_node = dev->of_node;
if (!dev->of_node)
- master->bus_num = HSSPI_BUS_NUM;
+ host->bus_num = HSSPI_BUS_NUM;
of_property_read_u32(dev->of_node, "num-cs", &num_cs);
if (num_cs > 8) {
@@ -814,18 +814,18 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
num_cs);
num_cs = HSSPI_SPI_MAX_CS;
}
- master->num_chipselect = num_cs;
- master->setup = bcm63xx_hsspi_setup;
- master->transfer_one_message = bcm63xx_hsspi_transfer_one;
- master->max_transfer_size = bcm63xx_hsspi_max_message_size;
- master->max_message_size = bcm63xx_hsspi_max_message_size;
+ host->num_chipselect = num_cs;
+ host->setup = bcm63xx_hsspi_setup;
+ host->transfer_one_message = bcm63xx_hsspi_transfer_one;
+ host->max_transfer_size = bcm63xx_hsspi_max_message_size;
+ host->max_message_size = bcm63xx_hsspi_max_message_size;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
SPI_RX_DUAL | SPI_TX_DUAL;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->auto_runtime_pm = true;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->auto_runtime_pm = true;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
/* Initialize the hardware */
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
@@ -844,7 +844,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
pdev->name, bs);
if (ret)
- goto out_put_master;
+ goto out_put_host;
}
pm_runtime_enable(&pdev->dev);
@@ -856,7 +856,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
}
/* register and we are done */
- ret = devm_spi_register_master(dev, master);
+ ret = devm_spi_register_controller(dev, host);
if (ret)
goto out_sysgroup_disable;
@@ -868,8 +868,8 @@ out_sysgroup_disable:
sysfs_remove_group(&pdev->dev.kobj, &bcm63xx_hsspi_group);
out_pm_disable:
pm_runtime_disable(&pdev->dev);
-out_put_master:
- spi_master_put(master);
+out_put_host:
+ spi_controller_put(host);
out_disable_pll_clk:
clk_disable_unprepare(pll_clk);
out_disable_clk:
@@ -880,8 +880,8 @@ out_disable_clk:
static void bcm63xx_hsspi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(host);
/* reset the hardware and block queue progress */
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
@@ -893,10 +893,10 @@ static void bcm63xx_hsspi_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int bcm63xx_hsspi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(host);
- spi_master_suspend(master);
+ spi_controller_suspend(host);
clk_disable_unprepare(bs->pll_clk);
clk_disable_unprepare(bs->clk);
@@ -905,8 +905,8 @@ static int bcm63xx_hsspi_suspend(struct device *dev)
static int bcm63xx_hsspi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(bs->clk);
@@ -921,7 +921,7 @@ static int bcm63xx_hsspi_resume(struct device *dev)
}
}
- spi_master_resume(master);
+ spi_controller_resume(host);
return 0;
}
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 07b5b71b2352..aac41bd05f98 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -186,7 +186,7 @@ static const unsigned int bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+ struct bcm63xx_spi *bs = spi_controller_get_devdata(spi->controller);
u8 clk_cfg, reg;
int i;
@@ -217,7 +217,7 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
unsigned int num_transfers)
{
- struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+ struct bcm63xx_spi *bs = spi_controller_get_devdata(spi->controller);
u16 msg_ctl;
u16 cmd;
unsigned int i, timeout = 0, prepend_len = 0, len = 0;
@@ -312,10 +312,10 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
return 0;
}
-static int bcm63xx_spi_transfer_one(struct spi_master *master,
+static int bcm63xx_spi_transfer_one(struct spi_controller *host,
struct spi_message *m)
{
- struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ struct bcm63xx_spi *bs = spi_controller_get_devdata(host);
struct spi_transfer *t, *first = NULL;
struct spi_device *spi = m->spi;
int status = 0;
@@ -385,18 +385,18 @@ static int bcm63xx_spi_transfer_one(struct spi_master *master,
}
exit:
m->status = status;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return 0;
}
-/* This driver supports single master mode only. Hence
+/* This driver supports single host mode only. Hence
* CMD_DONE is the only interrupt we care about
*/
static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
{
- struct spi_master *master = (struct spi_master *)dev_id;
- struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ struct spi_controller *host = (struct spi_controller *)dev_id;
+ struct bcm63xx_spi *bs = spi_controller_get_devdata(host);
u8 intr;
/* Read interupts and clear them immediately */
@@ -413,7 +413,7 @@ static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
static size_t bcm63xx_spi_max_length(struct spi_device *spi)
{
- struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+ struct bcm63xx_spi *bs = spi_controller_get_devdata(spi->controller);
return bs->fifo_size;
}
@@ -479,7 +479,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
const unsigned long *bcm63xx_spireg;
struct device *dev = &pdev->dev;
int irq, bus_num;
- struct spi_master *master;
+ struct spi_controller *host;
struct clk *clk;
struct bcm63xx_spi *bs;
int ret;
@@ -525,16 +525,16 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
if (IS_ERR(reset))
return PTR_ERR(reset);
- master = spi_alloc_master(dev, sizeof(*bs));
- if (!master) {
+ host = spi_alloc_host(dev, sizeof(*bs));
+ if (!host) {
dev_err(dev, "out of memory\n");
return -ENOMEM;
}
- bs = spi_master_get_devdata(master);
+ bs = spi_controller_get_devdata(host);
init_completion(&bs->done);
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
bs->pdev = pdev;
bs->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
@@ -549,21 +549,21 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
bs->fifo_size = bs->reg_offsets[SPI_MSG_DATA_SIZE];
ret = devm_request_irq(&pdev->dev, irq, bcm63xx_spi_interrupt, 0,
- pdev->name, master);
+ pdev->name, host);
if (ret) {
dev_err(dev, "unable to request irq\n");
goto out_err;
}
- master->dev.of_node = dev->of_node;
- master->bus_num = bus_num;
- master->num_chipselect = num_cs;
- master->transfer_one_message = bcm63xx_spi_transfer_one;
- master->mode_bits = MODEBITS;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->max_transfer_size = bcm63xx_spi_max_length;
- master->max_message_size = bcm63xx_spi_max_length;
- master->auto_runtime_pm = true;
+ host->dev.of_node = dev->of_node;
+ host->bus_num = bus_num;
+ host->num_chipselect = num_cs;
+ host->transfer_one_message = bcm63xx_spi_transfer_one;
+ host->mode_bits = MODEBITS;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->max_transfer_size = bcm63xx_spi_max_length;
+ host->max_message_size = bcm63xx_spi_max_length;
+ host->auto_runtime_pm = true;
bs->msg_type_shift = bs->reg_offsets[SPI_MSG_TYPE_SHIFT];
bs->msg_ctl_width = bs->reg_offsets[SPI_MSG_CTL_WIDTH];
bs->tx_io = (u8 *)(bs->regs + bs->reg_offsets[SPI_MSG_DATA]);
@@ -585,7 +585,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
/* register and we are done */
- ret = devm_spi_register_master(dev, master);
+ ret = devm_spi_register_controller(dev, host);
if (ret) {
dev_err(dev, "spi register failed\n");
goto out_pm_disable;
@@ -601,14 +601,14 @@ out_pm_disable:
out_clk_disable:
clk_disable_unprepare(clk);
out_err:
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
static void bcm63xx_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct bcm63xx_spi *bs = spi_controller_get_devdata(host);
/* reset spi block */
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
@@ -619,10 +619,10 @@ static void bcm63xx_spi_remove(struct platform_device *pdev)
static int bcm63xx_spi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct bcm63xx_spi *bs = spi_controller_get_devdata(host);
- spi_master_suspend(master);
+ spi_controller_suspend(host);
clk_disable_unprepare(bs->clk);
@@ -631,15 +631,15 @@ static int bcm63xx_spi_suspend(struct device *dev)
static int bcm63xx_spi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct bcm63xx_spi *bs = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(bs->clk);
if (ret)
return ret;
- spi_master_resume(master);
+ spi_controller_resume(host);
return 0;
}
diff --git a/drivers/spi/spi-bcmbca-hsspi.c b/drivers/spi/spi-bcmbca-hsspi.c
index ca1b4741e9f4..9f64afd8164e 100644
--- a/drivers/spi/spi-bcmbca-hsspi.c
+++ b/drivers/spi/spi-bcmbca-hsspi.c
@@ -127,7 +127,7 @@ static ssize_t wait_mode_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
- struct bcmbca_hsspi *bs = spi_master_get_devdata(ctrl);
+ struct bcmbca_hsspi *bs = spi_controller_get_devdata(ctrl);
return sprintf(buf, "%d\n", bs->wait_mode);
}
@@ -136,7 +136,7 @@ static ssize_t wait_mode_store(struct device *dev, struct device_attribute *attr
const char *buf, size_t count)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
- struct bcmbca_hsspi *bs = spi_master_get_devdata(ctrl);
+ struct bcmbca_hsspi *bs = spi_controller_get_devdata(ctrl);
u32 val;
if (kstrtou32(buf, 10, &val))
@@ -250,7 +250,7 @@ static int bcmbca_hsspi_wait_cmd(struct bcmbca_hsspi *bs, unsigned int cs)
static int bcmbca_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t,
struct spi_message *msg)
{
- struct bcmbca_hsspi *bs = spi_master_get_devdata(spi->master);
+ struct bcmbca_hsspi *bs = spi_controller_get_devdata(spi->controller);
unsigned int chip_select = spi_get_chipselect(spi, 0);
u16 opcode = 0, val;
int pending = t->len;
@@ -328,7 +328,7 @@ static int bcmbca_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t,
static int bcmbca_hsspi_setup(struct spi_device *spi)
{
- struct bcmbca_hsspi *bs = spi_master_get_devdata(spi->master);
+ struct bcmbca_hsspi *bs = spi_controller_get_devdata(spi->controller);
u32 reg;
reg = __raw_readl(bs->regs +
@@ -366,10 +366,10 @@ static int bcmbca_hsspi_setup(struct spi_device *spi)
return 0;
}
-static int bcmbca_hsspi_transfer_one(struct spi_master *master,
+static int bcmbca_hsspi_transfer_one(struct spi_controller *host,
struct spi_message *msg)
{
- struct bcmbca_hsspi *bs = spi_master_get_devdata(master);
+ struct bcmbca_hsspi *bs = spi_controller_get_devdata(host);
struct spi_transfer *t;
struct spi_device *spi = msg->spi;
int status = -EINVAL;
@@ -409,7 +409,7 @@ static int bcmbca_hsspi_transfer_one(struct spi_master *master,
bcmbca_hsspi_set_cs(bs, spi_get_chipselect(spi, 0), false);
msg->status = status;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return 0;
}
@@ -431,7 +431,7 @@ static irqreturn_t bcmbca_hsspi_interrupt(int irq, void *dev_id)
static int bcmbca_hsspi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct bcmbca_hsspi *bs;
struct resource *res_mem;
void __iomem *spim_ctrl;
@@ -487,13 +487,13 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
}
}
- master = spi_alloc_master(&pdev->dev, sizeof(*bs));
- if (!master) {
+ host = spi_alloc_host(&pdev->dev, sizeof(*bs));
+ if (!host) {
ret = -ENOMEM;
goto out_disable_pll_clk;
}
- bs = spi_master_get_devdata(master);
+ bs = spi_controller_get_devdata(host);
bs->pdev = pdev;
bs->clk = clk;
bs->pll_clk = pll_clk;
@@ -507,9 +507,9 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
mutex_init(&bs->msg_mutex);
init_completion(&bs->done);
- master->dev.of_node = dev->of_node;
+ host->dev.of_node = dev->of_node;
if (!dev->of_node)
- master->bus_num = HSSPI_BUS_NUM;
+ host->bus_num = HSSPI_BUS_NUM;
of_property_read_u32(dev->of_node, "num-cs", &num_cs);
if (num_cs > 8) {
@@ -517,15 +517,15 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
num_cs);
num_cs = HSSPI_SPI_MAX_CS;
}
- master->num_chipselect = num_cs;
- master->setup = bcmbca_hsspi_setup;
- master->transfer_one_message = bcmbca_hsspi_transfer_one;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
- SPI_RX_DUAL | SPI_TX_DUAL;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->auto_runtime_pm = true;
+ host->num_chipselect = num_cs;
+ host->setup = bcmbca_hsspi_setup;
+ host->transfer_one_message = bcmbca_hsspi_transfer_one;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
+ SPI_RX_DUAL | SPI_TX_DUAL;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->auto_runtime_pm = true;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
/* Initialize the hardware */
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
@@ -543,7 +543,7 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, bcmbca_hsspi_interrupt, IRQF_SHARED,
pdev->name, bs);
if (ret)
- goto out_put_master;
+ goto out_put_host;
}
pm_runtime_enable(&pdev->dev);
@@ -555,7 +555,7 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
}
/* register and we are done */
- ret = devm_spi_register_master(dev, master);
+ ret = devm_spi_register_controller(dev, host);
if (ret)
goto out_sysgroup_disable;
@@ -567,8 +567,8 @@ out_sysgroup_disable:
sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
out_pm_disable:
pm_runtime_disable(&pdev->dev);
-out_put_master:
- spi_master_put(master);
+out_put_host:
+ spi_controller_put(host);
out_disable_pll_clk:
clk_disable_unprepare(pll_clk);
out_disable_clk:
@@ -578,8 +578,8 @@ out_disable_clk:
static void bcmbca_hsspi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct bcmbca_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct bcmbca_hsspi *bs = spi_controller_get_devdata(host);
/* reset the hardware and block queue progress */
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
@@ -591,10 +591,10 @@ static void bcmbca_hsspi_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int bcmbca_hsspi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct bcmbca_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct bcmbca_hsspi *bs = spi_controller_get_devdata(host);
- spi_master_suspend(master);
+ spi_controller_suspend(host);
clk_disable_unprepare(bs->pll_clk);
clk_disable_unprepare(bs->clk);
@@ -603,8 +603,8 @@ static int bcmbca_hsspi_suspend(struct device *dev)
static int bcmbca_hsspi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct bcmbca_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct bcmbca_hsspi *bs = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(bs->clk);
@@ -619,7 +619,7 @@ static int bcmbca_hsspi_resume(struct device *dev)
}
}
- spi_master_resume(master);
+ spi_controller_resume(host);
return 0;
}
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index 2dcbe166df63..0cab48b7875b 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -57,7 +57,7 @@ bitbang_txrx_be_cpha0(struct spi_device *spi,
for (word <<= (32 - bits); likely(bits); bits--) {
/* setup MSB (to slave) on trailing edge */
- if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((flags & SPI_CONTROLLER_NO_TX) == 0) {
if ((word & (1 << 31)) != oldbit) {
setmosi(spi, word & (1 << 31));
oldbit = word & (1 << 31);
@@ -70,7 +70,7 @@ bitbang_txrx_be_cpha0(struct spi_device *spi,
/* sample MSB (from slave) on leading edge */
word <<= 1;
- if ((flags & SPI_MASTER_NO_RX) == 0)
+ if ((flags & SPI_CONTROLLER_NO_RX) == 0)
word |= getmiso(spi);
setsck(spi, cpol);
}
@@ -90,7 +90,7 @@ bitbang_txrx_be_cpha1(struct spi_device *spi,
/* setup MSB (to slave) on leading edge */
setsck(spi, !cpol);
- if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((flags & SPI_CONTROLLER_NO_TX) == 0) {
if ((word & (1 << 31)) != oldbit) {
setmosi(spi, word & (1 << 31));
oldbit = word & (1 << 31);
@@ -103,7 +103,7 @@ bitbang_txrx_be_cpha1(struct spi_device *spi,
/* sample MSB (from slave) on trailing edge */
word <<= 1;
- if ((flags & SPI_MASTER_NO_RX) == 0)
+ if ((flags & SPI_CONTROLLER_NO_RX) == 0)
word |= getmiso(spi);
}
return word;
@@ -122,7 +122,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
for (; likely(bits); bits--) {
/* setup LSB (to slave) on trailing edge */
- if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((flags & SPI_CONTROLLER_NO_TX) == 0) {
if ((word & 1) != oldbit) {
setmosi(spi, word & 1);
oldbit = word & 1;
@@ -135,7 +135,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
/* sample LSB (from slave) on leading edge */
word >>= 1;
- if ((flags & SPI_MASTER_NO_RX) == 0)
+ if ((flags & SPI_CONTROLLER_NO_RX) == 0)
word |= getmiso(spi) << rxbit;
setsck(spi, cpol);
}
@@ -156,7 +156,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
/* setup LSB (to slave) on leading edge */
setsck(spi, !cpol);
- if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((flags & SPI_CONTROLLER_NO_TX) == 0) {
if ((word & 1) != oldbit) {
setmosi(spi, word & 1);
oldbit = word & 1;
@@ -169,7 +169,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
/* sample LSB (from slave) on trailing edge */
word >>= 1;
- if ((flags & SPI_MASTER_NO_RX) == 0)
+ if ((flags & SPI_CONTROLLER_NO_RX) == 0)
word |= getmiso(spi) << rxbit;
}
return word;
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 27d0087f8688..ecd44016c197 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -248,7 +248,7 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
if (spi->mode & SPI_3WIRE) {
unsigned flags;
- flags = t->tx_buf ? SPI_MASTER_NO_RX : SPI_MASTER_NO_TX;
+ flags = t->tx_buf ? SPI_CONTROLLER_NO_RX : SPI_CONTROLLER_NO_TX;
return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, flags);
}
return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t, 0);
@@ -349,11 +349,11 @@ int spi_bitbang_init(struct spi_bitbang *bitbang)
/*
* We only need the chipselect callback if we are actually using it.
* If we just use GPIO descriptors, it is surplus. If the
- * SPI_MASTER_GPIO_SS flag is set, we always need to call the
+ * SPI_CONTROLLER_GPIO_SS flag is set, we always need to call the
* driver-specific chipselect routine.
*/
custom_cs = (!master->use_gpio_descriptors ||
- (master->flags & SPI_MASTER_GPIO_SS));
+ (master->flags & SPI_CONTROLLER_GPIO_SS));
if (custom_cs && !bitbang->chipselect)
return -EINVAL;
@@ -371,7 +371,7 @@ int spi_bitbang_init(struct spi_bitbang *bitbang)
master->transfer_one = spi_bitbang_transfer_one;
/*
* When using GPIO descriptors, the ->set_cs() callback doesn't even
- * get called unless SPI_MASTER_GPIO_SS is set.
+ * get called unless SPI_CONTROLLER_GPIO_SS is set.
*/
if (custom_cs)
master->set_cs = spi_bitbang_set_cs;
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index cceae816cebc..289b4454242a 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -178,7 +178,7 @@ static void butterfly_attach(struct parport *p)
struct pardevice *pd;
int status;
struct butterfly *pp;
- struct spi_master *master;
+ struct spi_controller *host;
struct device *dev = p->physport->dev;
struct pardev_cb butterfly_cb;
@@ -189,12 +189,12 @@ static void butterfly_attach(struct parport *p)
* and no way to be selective about what it binds to.
*/
- master = spi_alloc_master(dev, sizeof(*pp));
- if (!master) {
+ host = spi_alloc_host(dev, sizeof(*pp));
+ if (!host) {
status = -ENOMEM;
goto done;
}
- pp = spi_master_get_devdata(master);
+ pp = spi_controller_get_devdata(host);
/*
* SPI and bitbang hookup
@@ -202,10 +202,10 @@ static void butterfly_attach(struct parport *p)
* use default setup(), cleanup(), and transfer() methods; and
* only bother implementing mode 0. Start it later.
*/
- master->bus_num = 42;
- master->num_chipselect = 2;
+ host->bus_num = 42;
+ host->num_chipselect = 2;
- pp->bitbang.master = master;
+ pp->bitbang.master = host;
pp->bitbang.chipselect = butterfly_chipselect;
pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0;
@@ -280,7 +280,7 @@ clean2:
clean1:
parport_unregister_device(pd);
clean0:
- spi_master_put(pp->bitbang.master);
+ spi_controller_put(host);
done:
pr_debug("%s: butterfly probe, fail %d\n", p->name, status);
}
@@ -308,7 +308,7 @@ static void butterfly_detach(struct parport *p)
parport_release(pp->pd);
parport_unregister_device(pp->pd);
- spi_master_put(pp->bitbang.master);
+ spi_controller_put(pp->bitbang.master);
}
static struct parport_driver butterfly_driver = {
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index abf10f92415d..b50db71ac4cc 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -47,6 +46,12 @@
#define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
+enum {
+ CLK_QSPI_APB = 0,
+ CLK_QSPI_AHB,
+ CLK_QSPI_NUM,
+};
+
struct cqspi_st;
struct cqspi_flash_pdata {
@@ -62,8 +67,9 @@ struct cqspi_flash_pdata {
struct cqspi_st {
struct platform_device *pdev;
- struct spi_master *master;
+ struct spi_controller *host;
struct clk *clk;
+ struct clk *clks[CLK_QSPI_NUM];
unsigned int sclk;
void __iomem *iobase;
@@ -92,6 +98,8 @@ struct cqspi_st {
bool wr_completion;
bool slow_sram;
bool apb_ahb_hazard;
+
+ bool is_jh7110; /* Flag for StarFive JH7110 SoC */
};
struct cqspi_driver_platdata {
@@ -100,6 +108,8 @@ struct cqspi_driver_platdata {
int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
u_char *rxbuf, loff_t from_addr, size_t n_rx);
u32 (*get_dma_status)(struct cqspi_st *cqspi);
+ int (*jh7110_clk_init)(struct platform_device *pdev,
+ struct cqspi_st *cqspi);
};
/* Operation timeout value */
@@ -1369,7 +1379,7 @@ static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
+ struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
struct cqspi_flash_pdata *f_pdata;
f_pdata = &cqspi->f_pdata[spi_get_chipselect(mem->spi, 0)];
@@ -1575,7 +1585,7 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
static const char *cqspi_get_name(struct spi_mem *mem)
{
- struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
+ struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
struct device *dev = &cqspi->pdev->dev;
return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
@@ -1630,31 +1640,77 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
return 0;
}
+static int cqspi_jh7110_clk_init(struct platform_device *pdev, struct cqspi_st *cqspi)
+{
+ static struct clk_bulk_data qspiclk[] = {
+ { .id = "apb" },
+ { .id = "ahb" },
+ };
+
+ int ret = 0;
+
+ ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(qspiclk), qspiclk);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: failed to get qspi clocks\n", __func__);
+ return ret;
+ }
+
+ cqspi->clks[CLK_QSPI_APB] = qspiclk[0].clk;
+ cqspi->clks[CLK_QSPI_AHB] = qspiclk[1].clk;
+
+ ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_APB]);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_APB\n", __func__);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_AHB]);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_AHB\n", __func__);
+ goto disable_apb_clk;
+ }
+
+ cqspi->is_jh7110 = true;
+
+ return 0;
+
+disable_apb_clk:
+ clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]);
+
+ return ret;
+}
+
+static void cqspi_jh7110_disable_clk(struct platform_device *pdev, struct cqspi_st *cqspi)
+{
+ clk_disable_unprepare(cqspi->clks[CLK_QSPI_AHB]);
+ clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]);
+}
static int cqspi_probe(struct platform_device *pdev)
{
const struct cqspi_driver_platdata *ddata;
struct reset_control *rstc, *rstc_ocp, *rstc_ref;
struct device *dev = &pdev->dev;
- struct spi_master *master;
+ struct spi_controller *host;
struct resource *res_ahb;
struct cqspi_st *cqspi;
int ret;
int irq;
- master = devm_spi_alloc_master(&pdev->dev, sizeof(*cqspi));
- if (!master) {
- dev_err(&pdev->dev, "spi_alloc_master failed\n");
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(*cqspi));
+ if (!host) {
+ dev_err(&pdev->dev, "devm_spi_alloc_host failed\n");
return -ENOMEM;
}
- master->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL;
- master->mem_ops = &cqspi_mem_ops;
- master->mem_caps = &cqspi_mem_caps;
- master->dev.of_node = pdev->dev.of_node;
+ host->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL;
+ host->mem_ops = &cqspi_mem_ops;
+ host->mem_caps = &cqspi_mem_caps;
+ host->dev.of_node = pdev->dev.of_node;
- cqspi = spi_master_get_devdata(master);
+ cqspi = spi_controller_get_devdata(host);
cqspi->pdev = pdev;
- cqspi->master = master;
+ cqspi->host = host;
+ cqspi->is_jh7110 = false;
platform_set_drvdata(pdev, cqspi);
/* Obtain configuration from OF. */
@@ -1741,7 +1797,7 @@ static int cqspi_probe(struct platform_device *pdev)
reset_control_deassert(rstc_ocp);
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
- master->max_speed_hz = cqspi->master_ref_clk_hz;
+ host->max_speed_hz = cqspi->master_ref_clk_hz;
/* write completion is supported by default */
cqspi->wr_completion = true;
@@ -1752,7 +1808,7 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC,
cqspi->master_ref_clk_hz);
if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
- master->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
+ host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) {
cqspi->use_direct_mode = true;
cqspi->use_direct_mode_wr = true;
@@ -1766,6 +1822,12 @@ static int cqspi_probe(struct platform_device *pdev)
if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR)
cqspi->apb_ahb_hazard = true;
+ if (ddata->jh7110_clk_init) {
+ ret = cqspi_jh7110_clk_init(pdev, cqspi);
+ if (ret)
+ goto probe_clk_failed;
+ }
+
if (of_device_is_compatible(pdev->dev.of_node,
"xlnx,versal-ospi-1.0")) {
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
@@ -1786,7 +1848,7 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->current_cs = -1;
cqspi->sclk = 0;
- master->num_chipselect = cqspi->num_chipselect;
+ host->num_chipselect = cqspi->num_chipselect;
ret = cqspi_setup_flash(cqspi);
if (ret) {
@@ -1800,7 +1862,7 @@ static int cqspi_probe(struct platform_device *pdev)
goto probe_setup_failed;
}
- ret = spi_register_master(master);
+ ret = spi_register_controller(host);
if (ret) {
dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret);
goto probe_setup_failed;
@@ -1822,7 +1884,7 @@ static void cqspi_remove(struct platform_device *pdev)
{
struct cqspi_st *cqspi = platform_get_drvdata(pdev);
- spi_unregister_master(cqspi->master);
+ spi_unregister_controller(cqspi->host);
cqspi_controller_enable(cqspi, 0);
if (cqspi->rx_chan)
@@ -1830,6 +1892,9 @@ static void cqspi_remove(struct platform_device *pdev)
clk_disable_unprepare(cqspi->clk);
+ if (cqspi->is_jh7110)
+ cqspi_jh7110_disable_clk(pdev, cqspi);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
@@ -1837,10 +1902,10 @@ static void cqspi_remove(struct platform_device *pdev)
static int cqspi_suspend(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
int ret;
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
cqspi_controller_enable(cqspi, 0);
clk_disable_unprepare(cqspi->clk);
@@ -1851,7 +1916,7 @@ static int cqspi_suspend(struct device *dev)
static int cqspi_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
clk_prepare_enable(cqspi->clk);
cqspi_wait_idle(cqspi);
@@ -1860,7 +1925,7 @@ static int cqspi_resume(struct device *dev)
cqspi->current_cs = -1;
cqspi->sclk = 0;
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
static DEFINE_SIMPLE_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend, cqspi_resume);
@@ -1897,6 +1962,7 @@ static const struct cqspi_driver_platdata versal_ospi = {
static const struct cqspi_driver_platdata jh7110_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
+ .jh7110_clk_init = cqspi_jh7110_clk_init,
};
static const struct cqspi_driver_platdata pensando_cdns_qspi = {
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
index ce4a3145f065..b7e04b03be58 100644
--- a/drivers/spi/spi-cadence-xspi.c
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -11,7 +11,6 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -420,7 +419,7 @@ static int cdns_xspi_mem_op_execute(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct cdns_xspi_dev *cdns_xspi =
- spi_master_get_devdata(mem->spi->master);
+ spi_controller_get_devdata(mem->spi->controller);
int ret = 0;
ret = cdns_xspi_mem_op(cdns_xspi, mem, op);
@@ -431,7 +430,7 @@ static int cdns_xspi_mem_op_execute(struct spi_mem *mem,
static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct cdns_xspi_dev *cdns_xspi =
- spi_master_get_devdata(mem->spi->master);
+ spi_controller_get_devdata(mem->spi->controller);
op->data.nbytes = clamp_val(op->data.nbytes, 0, cdns_xspi->sdmasize);
@@ -528,26 +527,26 @@ static void cdns_xspi_print_phy_config(struct cdns_xspi_dev *cdns_xspi)
static int cdns_xspi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct spi_master *master = NULL;
+ struct spi_controller *host = NULL;
struct cdns_xspi_dev *cdns_xspi = NULL;
struct resource *res;
int ret;
- master = devm_spi_alloc_master(dev, sizeof(*cdns_xspi));
- if (!master)
+ host = devm_spi_alloc_host(dev, sizeof(*cdns_xspi));
+ if (!host)
return -ENOMEM;
- master->mode_bits = SPI_3WIRE | SPI_TX_DUAL | SPI_TX_QUAD |
+ host->mode_bits = SPI_3WIRE | SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL | SPI_RX_OCTAL |
SPI_MODE_0 | SPI_MODE_3;
- master->mem_ops = &cadence_xspi_mem_ops;
- master->dev.of_node = pdev->dev.of_node;
- master->bus_num = -1;
+ host->mem_ops = &cadence_xspi_mem_ops;
+ host->dev.of_node = pdev->dev.of_node;
+ host->bus_num = -1;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- cdns_xspi = spi_master_get_devdata(master);
+ cdns_xspi = spi_controller_get_devdata(host);
cdns_xspi->pdev = pdev;
cdns_xspi->dev = &pdev->dev;
cdns_xspi->cur_cs = 0;
@@ -597,15 +596,15 @@ static int cdns_xspi_probe(struct platform_device *pdev)
return ret;
}
- master->num_chipselect = 1 << cdns_xspi->hw_num_banks;
+ host->num_chipselect = 1 << cdns_xspi->hw_num_banks;
- ret = devm_spi_register_master(dev, master);
+ ret = devm_spi_register_controller(dev, host);
if (ret) {
- dev_err(dev, "Failed to register SPI master\n");
+ dev_err(dev, "Failed to register SPI host\n");
return ret;
}
- dev_info(dev, "Successfully registered SPI master\n");
+ dev_info(dev, "Successfully registered SPI host\n");
return 0;
}
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index de8fe3c5becb..12c940ba074a 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Cadence SPI controller driver (master and slave mode)
+ * Cadence SPI controller driver (host and target mode)
*
* Copyright (C) 2008 - 2014 Xilinx, Inc.
*
@@ -59,10 +59,10 @@
CDNS_SPI_CR_BAUD_DIV_4)
/*
- * SPI Configuration Register - Baud rate and slave select
+ * SPI Configuration Register - Baud rate and target select
*
* These are the values used in the calculation of baud rate divisor and
- * setting the slave select.
+ * setting the target select.
*/
#define CDNS_SPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
@@ -141,20 +141,20 @@ static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val)
/**
* cdns_spi_init_hw - Initialize the hardware and configure the SPI controller
* @xspi: Pointer to the cdns_spi structure
- * @is_slave: Flag to indicate slave or master mode
- * * On reset the SPI controller is configured to slave or master mode.
- * In master mode baud rate divisor is set to 4, threshold value for TX FIFO
+ * @is_target: Flag to indicate target or host mode
+ * * On reset the SPI controller is configured to target or host mode.
+ * In host mode baud rate divisor is set to 4, threshold value for TX FIFO
* not full interrupt is set to 1 and size of the word to be transferred as 8 bit.
*
* This function initializes the SPI controller to disable and clear all the
- * interrupts, enable manual slave select and manual start, deselect all the
+ * interrupts, enable manual target select and manual start, deselect all the
* chip select lines, and enable the SPI controller.
*/
-static void cdns_spi_init_hw(struct cdns_spi *xspi, bool is_slave)
+static void cdns_spi_init_hw(struct cdns_spi *xspi, bool is_target)
{
u32 ctrl_reg = 0;
- if (!is_slave)
+ if (!is_target)
ctrl_reg |= CDNS_SPI_CR_DEFAULT;
if (xspi->is_decoded_cs)
@@ -185,10 +185,10 @@ static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
if (is_high) {
- /* Deselect the slave */
+ /* Deselect the target */
ctrl_reg |= CDNS_SPI_CR_SSCTRL;
} else {
- /* Select the slave */
+ /* Select the target */
ctrl_reg &= ~CDNS_SPI_CR_SSCTRL;
if (!(xspi->is_decoded_cs))
ctrl_reg |= ((~(CDNS_SPI_SS0 << spi_get_chipselect(spi, 0))) <<
@@ -227,7 +227,7 @@ static void cdns_spi_config_clock_mode(struct spi_device *spi)
/*
* Just writing the CR register does not seem to apply the clock
* setting changes. This is problematic when changing the clock
- * polarity as it will cause the SPI slave to see spurious clock
+ * polarity as it will cause the SPI target to see spurious clock
* transitions. To workaround the issue toggle the ER register.
*/
cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
@@ -317,12 +317,6 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
xspi->rx_bytes -= nrx;
while (ntx || nrx) {
- /* When xspi in busy condition, bytes may send failed,
- * then spi control did't work thoroughly, add one byte delay
- */
- if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL)
- udelay(10);
-
if (ntx) {
if (xspi->txbuf)
cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
@@ -392,6 +386,11 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
if (xspi->tx_bytes) {
cdns_spi_process_fifo(xspi, trans_cnt, trans_cnt);
} else {
+ /* Fixed delay due to controller limitation with
+ * RX_NEMPTY incorrect status
+ * Xilinx AR:65885 contains more details
+ */
+ udelay(10);
cdns_spi_process_fifo(xspi, 0, trans_cnt);
cdns_spi_write(xspi, CDNS_SPI_IDR,
CDNS_SPI_IXR_DEFAULT);
@@ -406,7 +405,7 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
static int cdns_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
- if (!spi_controller_is_slave(ctlr))
+ if (!spi_controller_is_target(ctlr))
cdns_spi_config_clock_mode(msg->spi);
return 0;
}
@@ -418,9 +417,9 @@ static int cdns_prepare_message(struct spi_controller *ctlr,
* @transfer: Pointer to the spi_transfer structure which provides
* information about next transfer parameters
*
- * This function in master mode fills the TX FIFO, starts the SPI transfer and
+ * This function in host mode fills the TX FIFO, starts the SPI transfer and
* returns a positive transfer count so that core will wait for completion.
- * This function in slave mode fills the TX FIFO and wait for transfer trigger.
+ * This function in target mode fills the TX FIFO and wait for transfer trigger.
*
* Return: Number of bytes transferred in the last transfer
*/
@@ -435,16 +434,22 @@ static int cdns_transfer_one(struct spi_controller *ctlr,
xspi->tx_bytes = transfer->len;
xspi->rx_bytes = transfer->len;
- if (!spi_controller_is_slave(ctlr)) {
+ if (!spi_controller_is_target(ctlr)) {
cdns_spi_setup_transfer(spi, transfer);
} else {
/* Set TX empty threshold to half of FIFO depth
- * only if TX bytes are more than half FIFO depth.
+ * only if TX bytes are more than FIFO depth.
*/
if (xspi->tx_bytes > xspi->tx_fifo_depth)
cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1);
}
+ /* When xspi in busy condition, bytes may send failed,
+ * then spi control didn't work thoroughly, add one byte delay
+ */
+ if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL)
+ udelay(10);
+
cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0);
spi_transfer_delay_exec(transfer);
@@ -457,7 +462,7 @@ static int cdns_transfer_one(struct spi_controller *ctlr,
* @ctlr: Pointer to the spi_controller structure which provides
* information about the controller.
*
- * This function enables SPI master controller.
+ * This function enables SPI host controller.
*
* Return: 0 always
*/
@@ -475,7 +480,7 @@ static int cdns_prepare_transfer_hardware(struct spi_controller *ctlr)
* @ctlr: Pointer to the spi_controller structure which provides
* information about the controller.
*
- * This function disables the SPI master controller when no slave selected.
+ * This function disables the SPI host controller when no target selected.
* This function flush out if any pending data in FIFO.
*
* Return: 0 always
@@ -486,15 +491,15 @@ static int cdns_unprepare_transfer_hardware(struct spi_controller *ctlr)
u32 ctrl_reg;
unsigned int cnt = xspi->tx_fifo_depth;
- if (spi_controller_is_slave(ctlr)) {
+ if (spi_controller_is_target(ctlr)) {
while (cnt--)
cdns_spi_read(xspi, CDNS_SPI_RXD);
}
- /* Disable the SPI if slave is deselected */
+ /* Disable the SPI if target is deselected */
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
ctrl_reg = (ctrl_reg & CDNS_SPI_CR_SSCTRL) >> CDNS_SPI_SS_SHIFT;
- if (ctrl_reg == CDNS_SPI_NOSS || spi_controller_is_slave(ctlr))
+ if (ctrl_reg == CDNS_SPI_NOSS || spi_controller_is_target(ctlr))
cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
/* Reset to default */
@@ -521,14 +526,14 @@ static void cdns_spi_detect_fifo_depth(struct cdns_spi *xspi)
}
/**
- * cdns_slave_abort - Abort slave transfer
+ * cdns_target_abort - Abort target transfer
* @ctlr: Pointer to the spi_controller structure
*
- * This function abort slave transfer if there any transfer timeout.
+ * This function abort target transfer if there any transfer timeout.
*
* Return: 0 always
*/
-static int cdns_slave_abort(struct spi_controller *ctlr)
+static int cdns_target_abort(struct spi_controller *ctlr)
{
struct cdns_spi *xspi = spi_controller_get_devdata(ctlr);
u32 intr_status;
@@ -555,13 +560,13 @@ static int cdns_spi_probe(struct platform_device *pdev)
struct spi_controller *ctlr;
struct cdns_spi *xspi;
u32 num_cs;
- bool slave;
+ bool target;
- slave = of_property_read_bool(pdev->dev.of_node, "spi-slave");
- if (slave)
- ctlr = spi_alloc_slave(&pdev->dev, sizeof(*xspi));
+ target = of_property_read_bool(pdev->dev.of_node, "spi-slave");
+ if (target)
+ ctlr = spi_alloc_target(&pdev->dev, sizeof(*xspi));
else
- ctlr = spi_alloc_master(&pdev->dev, sizeof(*xspi));
+ ctlr = spi_alloc_host(&pdev->dev, sizeof(*xspi));
if (!ctlr)
return -ENOMEM;
@@ -589,7 +594,7 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto remove_ctlr;
}
- if (!spi_controller_is_slave(ctlr)) {
+ if (!spi_controller_is_target(ctlr)) {
xspi->ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
if (IS_ERR(xspi->ref_clk)) {
dev_err(&pdev->dev, "ref_clk clock not found.\n");
@@ -624,11 +629,11 @@ static int cdns_spi_probe(struct platform_device *pdev)
cdns_spi_detect_fifo_depth(xspi);
/* SPI controller initializations */
- cdns_spi_init_hw(xspi, spi_controller_is_slave(ctlr));
+ cdns_spi_init_hw(xspi, spi_controller_is_target(ctlr));
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- ret = -ENXIO;
+ if (irq < 0) {
+ ret = irq;
goto clk_dis_all;
}
@@ -648,7 +653,7 @@ static int cdns_spi_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
- if (!spi_controller_is_slave(ctlr)) {
+ if (!spi_controller_is_target(ctlr)) {
ctlr->mode_bits |= SPI_CS_HIGH;
ctlr->set_cs = cdns_spi_chipselect;
ctlr->auto_runtime_pm = true;
@@ -660,7 +665,7 @@ static int cdns_spi_probe(struct platform_device *pdev)
pm_runtime_put_autosuspend(&pdev->dev);
} else {
ctlr->mode_bits |= SPI_NO_CS;
- ctlr->slave_abort = cdns_slave_abort;
+ ctlr->target_abort = cdns_target_abort;
}
ret = spi_register_controller(ctlr);
if (ret) {
@@ -671,7 +676,7 @@ static int cdns_spi_probe(struct platform_device *pdev)
return ret;
clk_dis_all:
- if (!spi_controller_is_slave(ctlr)) {
+ if (!spi_controller_is_target(ctlr)) {
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(xspi->ref_clk);
@@ -690,8 +695,6 @@ remove_ctlr:
* This function is called if a device is physically removed from the system or
* if the driver module is being unloaded. It frees all resources allocated to
* the device.
- *
- * Return: 0 on success and error value on error
*/
static void cdns_spi_remove(struct platform_device *pdev)
{
@@ -737,7 +740,7 @@ static int __maybe_unused cdns_spi_resume(struct device *dev)
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct cdns_spi *xspi = spi_controller_get_devdata(ctlr);
- cdns_spi_init_hw(xspi, spi_controller_is_slave(ctlr));
+ cdns_spi_init_hw(xspi, spi_controller_is_target(ctlr));
return spi_controller_resume(ctlr);
}
diff --git a/drivers/spi/spi-cavium-octeon.c b/drivers/spi/spi-cavium-octeon.c
index 58060be33106..4511c3b31223 100644
--- a/drivers/spi/spi-cavium-octeon.c
+++ b/drivers/spi/spi-cavium-octeon.c
@@ -19,15 +19,15 @@
static int octeon_spi_probe(struct platform_device *pdev)
{
void __iomem *reg_base;
- struct spi_master *master;
+ struct spi_controller *host;
struct octeon_spi *p;
int err = -ENOENT;
- master = spi_alloc_master(&pdev->dev, sizeof(struct octeon_spi));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(struct octeon_spi));
+ if (!host)
return -ENOMEM;
- p = spi_master_get_devdata(master);
- platform_set_drvdata(pdev, master);
+ p = spi_controller_get_devdata(host);
+ platform_set_drvdata(pdev, host);
reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base)) {
@@ -43,21 +43,21 @@ static int octeon_spi_probe(struct platform_device *pdev)
p->regs.tx = 0x10;
p->regs.data = 0x80;
- master->num_chipselect = 4;
- master->mode_bits = SPI_CPHA |
+ host->num_chipselect = 4;
+ host->mode_bits = SPI_CPHA |
SPI_CPOL |
SPI_CS_HIGH |
SPI_LSB_FIRST |
SPI_3WIRE;
- master->transfer_one_message = octeon_spi_transfer_one_message;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+ host->transfer_one_message = octeon_spi_transfer_one_message;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
- master->dev.of_node = pdev->dev.of_node;
- err = devm_spi_register_master(&pdev->dev, master);
+ host->dev.of_node = pdev->dev.of_node;
+ err = devm_spi_register_controller(&pdev->dev, host);
if (err) {
- dev_err(&pdev->dev, "register master failed: %d\n", err);
+ dev_err(&pdev->dev, "register host failed: %d\n", err);
goto fail;
}
@@ -65,14 +65,14 @@ static int octeon_spi_probe(struct platform_device *pdev)
return 0;
fail:
- spi_master_put(master);
+ spi_controller_put(host);
return err;
}
static void octeon_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct octeon_spi *p = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct octeon_spi *p = spi_controller_get_devdata(host);
/* Clear the CSENA* and put everything in a known state. */
writeq(0, p->register_base + OCTEON_SPI_CFG(p));
diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c
index 60c0d6934654..f7c378a5f1bc 100644
--- a/drivers/spi/spi-cavium-thunderx.c
+++ b/drivers/spi/spi-cavium-thunderx.c
@@ -20,15 +20,15 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
- struct spi_master *master;
+ struct spi_controller *host;
struct octeon_spi *p;
int ret;
- master = spi_alloc_master(dev, sizeof(struct octeon_spi));
- if (!master)
+ host = spi_alloc_host(dev, sizeof(struct octeon_spi));
+ if (!host)
return -ENOMEM;
- p = spi_master_get_devdata(master);
+ p = spi_controller_get_devdata(host);
ret = pcim_enable_device(pdev);
if (ret)
@@ -64,18 +64,18 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
p->sys_freq = SYS_FREQ_DEFAULT;
dev_info(dev, "Set system clock to %u\n", p->sys_freq);
- master->flags = SPI_MASTER_HALF_DUPLEX;
- master->num_chipselect = 4;
- master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH |
+ host->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ host->num_chipselect = 4;
+ host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH |
SPI_LSB_FIRST | SPI_3WIRE;
- master->transfer_one_message = octeon_spi_transfer_one_message;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
- master->dev.of_node = pdev->dev.of_node;
+ host->transfer_one_message = octeon_spi_transfer_one_message;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+ host->dev.of_node = pdev->dev.of_node;
- pci_set_drvdata(pdev, master);
+ pci_set_drvdata(pdev, host);
- ret = devm_spi_register_master(dev, master);
+ ret = devm_spi_register_controller(dev, host);
if (ret)
goto error;
@@ -84,16 +84,16 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
error:
clk_disable_unprepare(p->clk);
pci_release_regions(pdev);
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
static void thunderx_spi_remove(struct pci_dev *pdev)
{
- struct spi_master *master = pci_get_drvdata(pdev);
+ struct spi_controller *host = pci_get_drvdata(pdev);
struct octeon_spi *p;
- p = spi_master_get_devdata(master);
+ p = spi_controller_get_devdata(host);
if (!p)
return;
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index c005ed26a3e1..5552ccd716fc 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -33,10 +33,10 @@ struct spi_clps711x_data {
int len;
};
-static int spi_clps711x_prepare_message(struct spi_master *master,
+static int spi_clps711x_prepare_message(struct spi_controller *host,
struct spi_message *msg)
{
- struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ struct spi_clps711x_data *hw = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
/* Setup mode for transfer */
@@ -45,11 +45,11 @@ static int spi_clps711x_prepare_message(struct spi_master *master,
SYSCON3_ADCCKNSEN : 0);
}
-static int spi_clps711x_transfer_one(struct spi_master *master,
+static int spi_clps711x_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ struct spi_clps711x_data *hw = spi_controller_get_devdata(host);
u8 data;
clk_set_rate(hw->spi_clk, xfer->speed_hz ? : spi->max_speed_hz);
@@ -68,8 +68,8 @@ static int spi_clps711x_transfer_one(struct spi_master *master,
static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_id;
+ struct spi_clps711x_data *hw = spi_controller_get_devdata(host);
u8 data;
/* Handle RX */
@@ -83,7 +83,7 @@ static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN,
hw->syncio);
} else
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
return IRQ_HANDLED;
}
@@ -92,26 +92,26 @@ static int spi_clps711x_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spi_clps711x_data *hw;
- struct spi_master *master;
+ struct spi_controller *host;
int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- master = spi_alloc_master(&pdev->dev, sizeof(*hw));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*hw));
+ if (!host)
return -ENOMEM;
- master->use_gpio_descriptors = true;
- master->bus_num = -1;
- master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
- master->dev.of_node = pdev->dev.of_node;
- master->prepare_message = spi_clps711x_prepare_message;
- master->transfer_one = spi_clps711x_transfer_one;
+ host->use_gpio_descriptors = true;
+ host->bus_num = -1;
+ host->mode_bits = SPI_CPHA | SPI_CS_HIGH;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
+ host->dev.of_node = pdev->dev.of_node;
+ host->prepare_message = spi_clps711x_prepare_message;
+ host->transfer_one = spi_clps711x_transfer_one;
- hw = spi_master_get_devdata(master);
+ hw = spi_controller_get_devdata(host);
hw->spi_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(hw->spi_clk)) {
@@ -138,16 +138,16 @@ static int spi_clps711x_probe(struct platform_device *pdev)
readl(hw->syncio);
ret = devm_request_irq(&pdev->dev, irq, spi_clps711x_isr, 0,
- dev_name(&pdev->dev), master);
+ dev_name(&pdev->dev), host);
if (ret)
goto err_out;
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (!ret)
return 0;
err_out:
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index b1bd8a6b5bf9..f0b630fe16c3 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -286,7 +286,7 @@ static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
static void mcfqspi_set_cs(struct spi_device *spi, bool enable)
{
- struct mcfqspi *mcfqspi = spi_master_get_devdata(spi->master);
+ struct mcfqspi *mcfqspi = spi_controller_get_devdata(spi->controller);
bool cs_high = spi->mode & SPI_CS_HIGH;
if (enable)
@@ -295,11 +295,11 @@ static void mcfqspi_set_cs(struct spi_device *spi, bool enable)
mcfqspi_cs_deselect(mcfqspi, spi_get_chipselect(spi, 0), cs_high);
}
-static int mcfqspi_transfer_one(struct spi_master *master,
+static int mcfqspi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ struct mcfqspi *mcfqspi = spi_controller_get_devdata(host);
u16 qmr = MCFQSPI_QMR_MSTR;
qmr |= t->bits_per_word << 10;
@@ -323,7 +323,7 @@ static int mcfqspi_transfer_one(struct spi_master *master,
static int mcfqspi_setup(struct spi_device *spi)
{
- mcfqspi_cs_deselect(spi_master_get_devdata(spi->master),
+ mcfqspi_cs_deselect(spi_controller_get_devdata(spi->controller),
spi_get_chipselect(spi, 0), spi->mode & SPI_CS_HIGH);
dev_dbg(&spi->dev,
@@ -337,7 +337,7 @@ static int mcfqspi_setup(struct spi_device *spi)
static int mcfqspi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct mcfqspi *mcfqspi;
struct mcfqspi_platform_data *pdata;
int status;
@@ -353,13 +353,13 @@ static int mcfqspi_probe(struct platform_device *pdev)
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi));
- if (master == NULL) {
- dev_dbg(&pdev->dev, "spi_alloc_master failed\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(*mcfqspi));
+ if (host == NULL) {
+ dev_dbg(&pdev->dev, "spi_alloc_host failed\n");
return -ENOMEM;
}
- mcfqspi = spi_master_get_devdata(master);
+ mcfqspi = spi_controller_get_devdata(host);
mcfqspi->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mcfqspi->iobase)) {
@@ -381,53 +381,50 @@ static int mcfqspi_probe(struct platform_device *pdev)
goto fail0;
}
- mcfqspi->clk = devm_clk_get(&pdev->dev, "qspi_clk");
+ mcfqspi->clk = devm_clk_get_enabled(&pdev->dev, "qspi_clk");
if (IS_ERR(mcfqspi->clk)) {
dev_dbg(&pdev->dev, "clk_get failed\n");
status = PTR_ERR(mcfqspi->clk);
goto fail0;
}
- clk_prepare_enable(mcfqspi->clk);
- master->bus_num = pdata->bus_num;
- master->num_chipselect = pdata->num_chipselect;
+ host->bus_num = pdata->bus_num;
+ host->num_chipselect = pdata->num_chipselect;
mcfqspi->cs_control = pdata->cs_control;
status = mcfqspi_cs_setup(mcfqspi);
if (status) {
dev_dbg(&pdev->dev, "error initializing cs_control\n");
- goto fail1;
+ goto fail0;
}
init_waitqueue_head(&mcfqspi->waitq);
- master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
- master->setup = mcfqspi_setup;
- master->set_cs = mcfqspi_set_cs;
- master->transfer_one = mcfqspi_transfer_one;
- master->auto_runtime_pm = true;
+ host->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
+ host->setup = mcfqspi_setup;
+ host->set_cs = mcfqspi_set_cs;
+ host->transfer_one = mcfqspi_transfer_one;
+ host->auto_runtime_pm = true;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
pm_runtime_enable(&pdev->dev);
- status = devm_spi_register_master(&pdev->dev, master);
+ status = devm_spi_register_controller(&pdev->dev, host);
if (status) {
- dev_dbg(&pdev->dev, "spi_register_master failed\n");
- goto fail2;
+ dev_dbg(&pdev->dev, "devm_spi_register_controller failed\n");
+ goto fail1;
}
dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
return 0;
-fail2:
+fail1:
pm_runtime_disable(&pdev->dev);
mcfqspi_cs_teardown(mcfqspi);
-fail1:
- clk_disable_unprepare(mcfqspi->clk);
fail0:
- spi_master_put(master);
+ spi_controller_put(host);
dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n");
@@ -436,8 +433,8 @@ fail0:
static void mcfqspi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct mcfqspi *mcfqspi = spi_controller_get_devdata(host);
pm_runtime_disable(&pdev->dev);
/* disable the hardware (set the baud rate to 0) */
@@ -450,11 +447,11 @@ static void mcfqspi_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int mcfqspi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_controller_get_devdata(host);
int ret;
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
if (ret)
return ret;
@@ -465,20 +462,20 @@ static int mcfqspi_suspend(struct device *dev)
static int mcfqspi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_controller_get_devdata(host);
clk_enable(mcfqspi->clk);
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
#endif
#ifdef CONFIG_PM
static int mcfqspi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_controller_get_devdata(host);
clk_disable(mcfqspi->clk);
@@ -487,8 +484,8 @@ static int mcfqspi_runtime_suspend(struct device *dev)
static int mcfqspi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_controller_get_devdata(host);
clk_enable(mcfqspi->clk);
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
new file mode 100644
index 000000000000..453a9b37ce78
--- /dev/null
+++ b/drivers/spi/spi-cs42l43.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// CS42L43 SPI Controller Driver
+//
+// Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+// Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/mfd/cs42l43.h>
+#include <linux/mfd/cs42l43-regs.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+#define CS42L43_FIFO_SIZE 16
+#define CS42L43_SPI_ROOT_HZ (40 * HZ_PER_MHZ)
+#define CS42L43_SPI_MAX_LENGTH 65532
+
+enum cs42l43_spi_cmd {
+ CS42L43_WRITE,
+ CS42L43_READ
+};
+
+struct cs42l43_spi {
+ struct device *dev;
+ struct regmap *regmap;
+ struct spi_controller *ctlr;
+};
+
+static const unsigned int cs42l43_clock_divs[] = {
+ 2, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
+};
+
+static int cs42l43_spi_tx(struct regmap *regmap, const u8 *buf, unsigned int len)
+{
+ const u8 *end = buf + len;
+ u32 val = 0;
+ int ret;
+
+ while (buf < end) {
+ const u8 *block = min(buf + CS42L43_FIFO_SIZE, end);
+
+ while (buf < block) {
+ const u8 *word = min(buf + sizeof(u32), block);
+ int pad = (buf + sizeof(u32)) - word;
+
+ while (buf < word) {
+ val >>= BITS_PER_BYTE;
+ val |= FIELD_PREP(GENMASK(31, 24), *buf);
+
+ buf++;
+ }
+
+ val >>= pad * BITS_PER_BYTE;
+
+ regmap_write(regmap, CS42L43_TX_DATA, val);
+ }
+
+ regmap_write(regmap, CS42L43_TRAN_CONFIG8, CS42L43_SPI_TX_DONE_MASK);
+
+ ret = regmap_read_poll_timeout(regmap, CS42L43_TRAN_STATUS1,
+ val, (val & CS42L43_SPI_TX_REQUEST_MASK),
+ 1000, 5000);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cs42l43_spi_rx(struct regmap *regmap, u8 *buf, unsigned int len)
+{
+ u8 *end = buf + len;
+ u32 val;
+ int ret;
+
+ while (buf < end) {
+ u8 *block = min(buf + CS42L43_FIFO_SIZE, end);
+
+ ret = regmap_read_poll_timeout(regmap, CS42L43_TRAN_STATUS1,
+ val, (val & CS42L43_SPI_RX_REQUEST_MASK),
+ 1000, 5000);
+ if (ret)
+ return ret;
+
+ while (buf < block) {
+ u8 *word = min(buf + sizeof(u32), block);
+
+ ret = regmap_read(regmap, CS42L43_RX_DATA, &val);
+ if (ret)
+ return ret;
+
+ while (buf < word) {
+ *buf = FIELD_GET(GENMASK(7, 0), val);
+
+ val >>= BITS_PER_BYTE;
+ buf++;
+ }
+ }
+
+ regmap_write(regmap, CS42L43_TRAN_CONFIG8, CS42L43_SPI_RX_DONE_MASK);
+ }
+
+ return 0;
+}
+
+static int cs42l43_transfer_one(struct spi_controller *ctlr, struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct cs42l43_spi *priv = spi_controller_get_devdata(spi->controller);
+ int i, ret = -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(cs42l43_clock_divs); i++) {
+ if (CS42L43_SPI_ROOT_HZ / cs42l43_clock_divs[i] <= tfr->speed_hz)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(cs42l43_clock_divs))
+ return -EINVAL;
+
+ regmap_write(priv->regmap, CS42L43_SPI_CLK_CONFIG1, i);
+
+ if (tfr->tx_buf) {
+ regmap_write(priv->regmap, CS42L43_TRAN_CONFIG3, CS42L43_WRITE);
+ regmap_write(priv->regmap, CS42L43_TRAN_CONFIG4, tfr->len - 1);
+ } else if (tfr->rx_buf) {
+ regmap_write(priv->regmap, CS42L43_TRAN_CONFIG3, CS42L43_READ);
+ regmap_write(priv->regmap, CS42L43_TRAN_CONFIG5, tfr->len - 1);
+ }
+
+ regmap_write(priv->regmap, CS42L43_TRAN_CONFIG1, CS42L43_SPI_START_MASK);
+
+ if (tfr->tx_buf)
+ ret = cs42l43_spi_tx(priv->regmap, (const u8 *)tfr->tx_buf, tfr->len);
+ else if (tfr->rx_buf)
+ ret = cs42l43_spi_rx(priv->regmap, (u8 *)tfr->rx_buf, tfr->len);
+
+ return ret;
+}
+
+static void cs42l43_set_cs(struct spi_device *spi, bool is_high)
+{
+ struct cs42l43_spi *priv = spi_controller_get_devdata(spi->controller);
+
+ if (spi_get_chipselect(spi, 0) == 0)
+ regmap_write(priv->regmap, CS42L43_SPI_CONFIG2, !is_high);
+}
+
+static int cs42l43_prepare_message(struct spi_controller *ctlr, struct spi_message *msg)
+{
+ struct cs42l43_spi *priv = spi_controller_get_devdata(ctlr);
+ struct spi_device *spi = msg->spi;
+ unsigned int spi_config1 = 0;
+
+ /* select another internal CS, which doesn't exist, so CS 0 is not used */
+ if (spi_get_csgpiod(spi, 0))
+ spi_config1 |= 1 << CS42L43_SPI_SS_SEL_SHIFT;
+ if (spi->mode & SPI_CPOL)
+ spi_config1 |= CS42L43_SPI_CPOL_MASK;
+ if (spi->mode & SPI_CPHA)
+ spi_config1 |= CS42L43_SPI_CPHA_MASK;
+ if (spi->mode & SPI_3WIRE)
+ spi_config1 |= CS42L43_SPI_THREE_WIRE_MASK;
+
+ regmap_write(priv->regmap, CS42L43_SPI_CONFIG1, spi_config1);
+
+ return 0;
+}
+
+static int cs42l43_prepare_transfer_hardware(struct spi_controller *ctlr)
+{
+ struct cs42l43_spi *priv = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = regmap_write(priv->regmap, CS42L43_BLOCK_EN2, CS42L43_SPI_MSTR_EN_MASK);
+ if (ret)
+ dev_err(priv->dev, "Failed to enable SPI controller: %d\n", ret);
+
+ return ret;
+}
+
+static int cs42l43_unprepare_transfer_hardware(struct spi_controller *ctlr)
+{
+ struct cs42l43_spi *priv = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = regmap_write(priv->regmap, CS42L43_BLOCK_EN2, 0);
+ if (ret)
+ dev_err(priv->dev, "Failed to disable SPI controller: %d\n", ret);
+
+ return ret;
+}
+
+static size_t cs42l43_spi_max_length(struct spi_device *spi)
+{
+ return CS42L43_SPI_MAX_LENGTH;
+}
+
+static int cs42l43_spi_probe(struct platform_device *pdev)
+{
+ struct cs42l43 *cs42l43 = dev_get_drvdata(pdev->dev.parent);
+ struct cs42l43_spi *priv;
+ struct fwnode_handle *fwnode = dev_fwnode(cs42l43->dev);
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*priv->ctlr));
+ if (!priv->ctlr)
+ return -ENOMEM;
+
+ spi_controller_set_devdata(priv->ctlr, priv);
+
+ priv->dev = &pdev->dev;
+ priv->regmap = cs42l43->regmap;
+
+ priv->ctlr->prepare_message = cs42l43_prepare_message;
+ priv->ctlr->prepare_transfer_hardware = cs42l43_prepare_transfer_hardware;
+ priv->ctlr->unprepare_transfer_hardware = cs42l43_unprepare_transfer_hardware;
+ priv->ctlr->transfer_one = cs42l43_transfer_one;
+ priv->ctlr->set_cs = cs42l43_set_cs;
+ priv->ctlr->max_transfer_size = cs42l43_spi_max_length;
+
+ if (is_of_node(fwnode))
+ fwnode = fwnode_get_named_child_node(fwnode, "spi");
+
+ device_set_node(&priv->ctlr->dev, fwnode);
+
+ priv->ctlr->mode_bits = SPI_3WIRE | SPI_MODE_X_MASK;
+ priv->ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ priv->ctlr->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(32);
+ priv->ctlr->min_speed_hz = CS42L43_SPI_ROOT_HZ /
+ cs42l43_clock_divs[ARRAY_SIZE(cs42l43_clock_divs) - 1];
+ priv->ctlr->max_speed_hz = CS42L43_SPI_ROOT_HZ / cs42l43_clock_divs[0];
+ priv->ctlr->use_gpio_descriptors = true;
+ priv->ctlr->auto_runtime_pm = true;
+
+ devm_pm_runtime_enable(priv->dev);
+ pm_runtime_idle(priv->dev);
+
+ regmap_write(priv->regmap, CS42L43_TRAN_CONFIG6, CS42L43_FIFO_SIZE - 1);
+ regmap_write(priv->regmap, CS42L43_TRAN_CONFIG7, CS42L43_FIFO_SIZE - 1);
+
+ // Disable Watchdog timer and enable stall
+ regmap_write(priv->regmap, CS42L43_SPI_CONFIG3, 0);
+ regmap_write(priv->regmap, CS42L43_SPI_CONFIG4, CS42L43_SPI_STALL_ENA_MASK);
+
+ ret = devm_spi_register_controller(priv->dev, priv->ctlr);
+ if (ret) {
+ pm_runtime_disable(priv->dev);
+ dev_err(priv->dev, "Failed to register SPI controller: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static const struct platform_device_id cs42l43_spi_id_table[] = {
+ { "cs42l43-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cs42l43_spi_id_table);
+
+static struct platform_driver cs42l43_spi_driver = {
+ .driver = {
+ .name = "cs42l43-spi",
+ },
+ .probe = cs42l43_spi_probe,
+ .id_table = cs42l43_spi_id_table,
+};
+module_platform_driver(cs42l43_spi_driver);
+
+MODULE_DESCRIPTION("CS42L43 SPI Driver");
+MODULE_AUTHOR("Lucas Tanure <tanureal@opensource.cirrus.com>");
+MODULE_AUTHOR("Maciej Strozek <mstrozek@opensource.cirrus.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index b04811c911e2..c457b550d3ad 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -15,7 +15,6 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/slab.h>
@@ -202,7 +201,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
u8 chip_sel = spi_get_chipselect(spi, 0);
u16 spidat1 = CS_DEFAULT;
- dspi = spi_master_get_devdata(spi->master);
+ dspi = spi_controller_get_devdata(spi->controller);
/* program delay transfers if tx_delay is non zero */
if (spicfg && spicfg->wdelay)
@@ -272,7 +271,7 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
u32 hz = 0, spifmt = 0;
int prescale;
- dspi = spi_master_get_devdata(spi->master);
+ dspi = spi_controller_get_devdata(spi->controller);
spicfg = spi->controller_data;
if (!spicfg)
spicfg = &davinci_spi_default_cfg;
@@ -380,7 +379,7 @@ static int davinci_spi_of_setup(struct spi_device *spi)
{
struct davinci_spi_config *spicfg = spi->controller_data;
struct device_node *np = spi->dev.of_node;
- struct davinci_spi *dspi = spi_master_get_devdata(spi->master);
+ struct davinci_spi *dspi = spi_controller_get_devdata(spi->controller);
u32 prop;
if (spicfg == NULL && np) {
@@ -412,7 +411,7 @@ static int davinci_spi_setup(struct spi_device *spi)
struct device_node *np = spi->dev.of_node;
bool internal_cs = true;
- dspi = spi_master_get_devdata(spi->master);
+ dspi = spi_controller_get_devdata(spi->controller);
if (!(spi->mode & SPI_NO_CS)) {
if (np && spi_get_csgpiod(spi, 0))
@@ -442,7 +441,7 @@ static void davinci_spi_cleanup(struct spi_device *spi)
kfree(spicfg);
}
-static bool davinci_spi_can_dma(struct spi_master *master,
+static bool davinci_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
@@ -572,7 +571,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
struct davinci_spi_config *spicfg;
struct davinci_spi_platform_data *pdata;
- dspi = spi_master_get_devdata(spi->master);
+ dspi = spi_controller_get_devdata(spi->controller);
pdata = &dspi->pdata;
spicfg = (struct davinci_spi_config *)spi->controller_data;
if (!spicfg)
@@ -593,7 +592,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
reinit_completion(&dspi->done);
- if (!davinci_spi_can_dma(spi->master, spi, t)) {
+ if (!davinci_spi_can_dma(spi->controller, spi, t)) {
if (spicfg->io_type != SPI_IO_TYPE_POLL)
set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
/* start the transfer */
@@ -674,7 +673,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
}
clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
- if (davinci_spi_can_dma(spi->master, spi, t))
+ if (davinci_spi_can_dma(spi->controller, spi, t))
clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
@@ -856,22 +855,22 @@ static int spi_davinci_get_pdata(struct platform_device *pdev,
*/
static int davinci_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct davinci_spi *dspi;
struct davinci_spi_platform_data *pdata;
struct resource *r;
int ret = 0;
u32 spipc0;
- master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
- if (master == NULL) {
+ host = spi_alloc_host(&pdev->dev, sizeof(struct davinci_spi));
+ if (host == NULL) {
ret = -ENOMEM;
goto err;
}
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- dspi = spi_master_get_devdata(master);
+ dspi = spi_controller_get_devdata(host);
if (dev_get_platdata(&pdev->dev)) {
pdata = dev_get_platdata(&pdev->dev);
@@ -880,7 +879,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
/* update dspi pdata with that from the DT */
ret = spi_davinci_get_pdata(pdev, dspi);
if (ret < 0)
- goto free_master;
+ goto free_host;
}
/* pdata in dspi is now updated and point pdata to that */
@@ -892,57 +891,48 @@ static int davinci_spi_probe(struct platform_device *pdev)
GFP_KERNEL);
if (dspi->bytes_per_word == NULL) {
ret = -ENOMEM;
- goto free_master;
+ goto free_host;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- ret = -ENOENT;
- goto free_master;
- }
-
- dspi->pbase = r->start;
-
- dspi->base = devm_ioremap_resource(&pdev->dev, r);
+ dspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(dspi->base)) {
ret = PTR_ERR(dspi->base);
- goto free_master;
+ goto free_host;
}
+ dspi->pbase = r->start;
init_completion(&dspi->done);
ret = platform_get_irq(pdev, 0);
- if (ret == 0)
- ret = -EINVAL;
if (ret < 0)
- goto free_master;
+ goto free_host;
dspi->irq = ret;
ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
if (ret)
- goto free_master;
+ goto free_host;
- dspi->bitbang.master = master;
+ dspi->bitbang.master = host;
dspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dspi->clk)) {
ret = -ENODEV;
- goto free_master;
+ goto free_host;
}
ret = clk_prepare_enable(dspi->clk);
if (ret)
- goto free_master;
-
- master->use_gpio_descriptors = true;
- master->dev.of_node = pdev->dev.of_node;
- master->bus_num = pdev->id;
- master->num_chipselect = pdata->num_chipselect;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
- master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_GPIO_SS;
- master->setup = davinci_spi_setup;
- master->cleanup = davinci_spi_cleanup;
- master->can_dma = davinci_spi_can_dma;
+ goto free_host;
+
+ host->use_gpio_descriptors = true;
+ host->dev.of_node = pdev->dev.of_node;
+ host->bus_num = pdev->id;
+ host->num_chipselect = pdata->num_chipselect;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
+ host->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_GPIO_SS;
+ host->setup = davinci_spi_setup;
+ host->cleanup = davinci_spi_cleanup;
+ host->can_dma = davinci_spi_can_dma;
dspi->bitbang.chipselect = davinci_spi_chipselect;
dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
@@ -983,7 +973,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
iowrite32(CS_DEFAULT, dspi->base + SPIDEF);
- /* master mode default */
+ /* host mode default */
set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK);
set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
@@ -1003,8 +993,8 @@ free_dma:
}
free_clk:
clk_disable_unprepare(dspi->clk);
-free_master:
- spi_master_put(master);
+free_host:
+ spi_controller_put(host);
err:
return ret;
}
@@ -1021,10 +1011,10 @@ err:
static void davinci_spi_remove(struct platform_device *pdev)
{
struct davinci_spi *dspi;
- struct spi_master *master;
+ struct spi_controller *host;
- master = platform_get_drvdata(pdev);
- dspi = spi_master_get_devdata(master);
+ host = platform_get_drvdata(pdev);
+ dspi = spi_controller_get_devdata(host);
spi_bitbang_stop(&dspi->bitbang);
@@ -1035,7 +1025,7 @@ static void davinci_spi_remove(struct platform_device *pdev)
dma_release_channel(dspi->dma_tx);
}
- spi_master_put(master);
+ spi_controller_put(host);
}
static struct platform_driver davinci_spi_driver = {
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
index 6bd93c47853c..d319dc357fef 100644
--- a/drivers/spi/spi-dln2.c
+++ b/drivers/spi/spi-dln2.c
@@ -79,7 +79,7 @@
struct dln2_spi {
struct platform_device *pdev;
- struct spi_master *master;
+ struct spi_controller *host;
u8 port;
/*
@@ -176,7 +176,7 @@ static int dln2_spi_cs_enable(struct dln2_spi *dln2, u8 cs_mask, bool enable)
static int dln2_spi_cs_enable_all(struct dln2_spi *dln2, bool enable)
{
- u8 cs_mask = GENMASK(dln2->master->num_chipselect - 1, 0);
+ u8 cs_mask = GENMASK(dln2->host->num_chipselect - 1, 0);
return dln2_spi_cs_enable(dln2, cs_mask, enable);
}
@@ -589,11 +589,11 @@ static int dln2_spi_rdwr(struct dln2_spi *dln2, const u8 *tx_data,
return 0;
}
-static int dln2_spi_prepare_message(struct spi_master *master,
+static int dln2_spi_prepare_message(struct spi_controller *host,
struct spi_message *message)
{
int ret;
- struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ struct dln2_spi *dln2 = spi_controller_get_devdata(host);
struct spi_device *spi = message->spi;
if (dln2->cs != spi_get_chipselect(spi, 0)) {
@@ -650,11 +650,11 @@ static int dln2_spi_transfer_setup(struct dln2_spi *dln2, u32 speed,
return dln2_spi_enable(dln2, true);
}
-static int dln2_spi_transfer_one(struct spi_master *master,
+static int dln2_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ struct dln2_spi *dln2 = spi_controller_get_devdata(host);
int status;
u8 attr = 0;
@@ -666,7 +666,7 @@ static int dln2_spi_transfer_one(struct spi_master *master,
return status;
}
- if (!xfer->cs_change && !spi_transfer_is_last(master, xfer))
+ if (!xfer->cs_change && !spi_transfer_is_last(host, xfer))
attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
status = dln2_spi_rdwr(dln2, xfer->tx_buf, xfer->rx_buf,
@@ -679,29 +679,29 @@ static int dln2_spi_transfer_one(struct spi_master *master,
static int dln2_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct dln2_spi *dln2;
struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
int ret;
- master = spi_alloc_master(&pdev->dev, sizeof(*dln2));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*dln2));
+ if (!host)
return -ENOMEM;
- device_set_node(&master->dev, dev_fwnode(dev));
+ device_set_node(&host->dev, dev_fwnode(dev));
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- dln2 = spi_master_get_devdata(master);
+ dln2 = spi_controller_get_devdata(host);
dln2->buf = devm_kmalloc(&pdev->dev, DLN2_SPI_BUF_SIZE, GFP_KERNEL);
if (!dln2->buf) {
ret = -ENOMEM;
- goto exit_free_master;
+ goto exit_free_host;
}
- dln2->master = master;
+ dln2->host = host;
dln2->pdev = pdev;
dln2->port = pdata->port;
/* cs/mode can never be 0xff, so the first transfer will set them */
@@ -712,47 +712,47 @@ static int dln2_spi_probe(struct platform_device *pdev)
ret = dln2_spi_enable(dln2, false);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to disable SPI module\n");
- goto exit_free_master;
+ goto exit_free_host;
}
- ret = dln2_spi_get_cs_num(dln2, &master->num_chipselect);
+ ret = dln2_spi_get_cs_num(dln2, &host->num_chipselect);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get number of CS pins\n");
- goto exit_free_master;
+ goto exit_free_host;
}
ret = dln2_spi_get_speed_range(dln2,
- &master->min_speed_hz,
- &master->max_speed_hz);
+ &host->min_speed_hz,
+ &host->max_speed_hz);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to read bus min/max freqs\n");
- goto exit_free_master;
+ goto exit_free_host;
}
ret = dln2_spi_get_supported_frame_sizes(dln2,
- &master->bits_per_word_mask);
+ &host->bits_per_word_mask);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to read supported frame sizes\n");
- goto exit_free_master;
+ goto exit_free_host;
}
ret = dln2_spi_cs_enable_all(dln2, true);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to enable CS pins\n");
- goto exit_free_master;
+ goto exit_free_host;
}
- master->bus_num = -1;
- master->mode_bits = SPI_CPOL | SPI_CPHA;
- master->prepare_message = dln2_spi_prepare_message;
- master->transfer_one = dln2_spi_transfer_one;
- master->auto_runtime_pm = true;
+ host->bus_num = -1;
+ host->mode_bits = SPI_CPOL | SPI_CPHA;
+ host->prepare_message = dln2_spi_prepare_message;
+ host->transfer_one = dln2_spi_transfer_one;
+ host->auto_runtime_pm = true;
/* enable SPI module, we're good to go */
ret = dln2_spi_enable(dln2, true);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to enable SPI module\n");
- goto exit_free_master;
+ goto exit_free_host;
}
pm_runtime_set_autosuspend_delay(&pdev->dev,
@@ -761,9 +761,9 @@ static int dln2_spi_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register master\n");
+ dev_err(&pdev->dev, "Failed to register host\n");
goto exit_register;
}
@@ -775,16 +775,16 @@ exit_register:
if (dln2_spi_enable(dln2, false) < 0)
dev_err(&pdev->dev, "Failed to disable SPI module\n");
-exit_free_master:
- spi_master_put(master);
+exit_free_host:
+ spi_controller_put(host);
return ret;
}
static void dln2_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct dln2_spi *dln2 = spi_controller_get_devdata(host);
pm_runtime_disable(&pdev->dev);
@@ -796,10 +796,10 @@ static void dln2_spi_remove(struct platform_device *pdev)
static int dln2_spi_suspend(struct device *dev)
{
int ret;
- struct spi_master *master = dev_get_drvdata(dev);
- struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_controller_get_devdata(host);
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
if (ret < 0)
return ret;
@@ -824,8 +824,8 @@ static int dln2_spi_suspend(struct device *dev)
static int dln2_spi_resume(struct device *dev)
{
int ret;
- struct spi_master *master = dev_get_drvdata(dev);
- struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_controller_get_devdata(host);
if (!pm_runtime_suspended(dev)) {
ret = dln2_spi_cs_enable_all(dln2, true);
@@ -837,23 +837,23 @@ static int dln2_spi_resume(struct device *dev)
return ret;
}
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int dln2_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_controller_get_devdata(host);
return dln2_spi_enable(dln2, false);
}
static int dln2_spi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_controller_get_devdata(host);
return dln2_spi_enable(dln2, true);
}
@@ -875,7 +875,7 @@ static struct platform_driver spi_dln2_driver = {
};
module_platform_driver(spi_dln2_driver);
-MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI master interface");
+MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI host interface");
MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:dln2-spi");
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index a8ba41ad4541..0274c9295514 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -61,7 +61,7 @@ static void dw_spi_debugfs_init(struct dw_spi *dws)
{
char name[32];
- snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
+ snprintf(name, 32, "dw_spi%d", dws->host->bus_num);
dws->debugfs = debugfs_create_dir(name, NULL);
dws->regset.regs = dw_spi_dbgfs_regs;
@@ -183,25 +183,25 @@ int dw_spi_check_status(struct dw_spi *dws, bool raw)
irq_status = dw_readl(dws, DW_SPI_ISR);
if (irq_status & DW_SPI_INT_RXOI) {
- dev_err(&dws->master->dev, "RX FIFO overflow detected\n");
+ dev_err(&dws->host->dev, "RX FIFO overflow detected\n");
ret = -EIO;
}
if (irq_status & DW_SPI_INT_RXUI) {
- dev_err(&dws->master->dev, "RX FIFO underflow detected\n");
+ dev_err(&dws->host->dev, "RX FIFO underflow detected\n");
ret = -EIO;
}
if (irq_status & DW_SPI_INT_TXOI) {
- dev_err(&dws->master->dev, "TX FIFO overflow detected\n");
+ dev_err(&dws->host->dev, "TX FIFO overflow detected\n");
ret = -EIO;
}
/* Generically handle the erroneous situation */
if (ret) {
dw_spi_reset_chip(dws);
- if (dws->master->cur_msg)
- dws->master->cur_msg->status = ret;
+ if (dws->host->cur_msg)
+ dws->host->cur_msg->status = ret;
}
return ret;
@@ -213,7 +213,7 @@ static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
u16 irq_status = dw_readl(dws, DW_SPI_ISR);
if (dw_spi_check_status(dws, false)) {
- spi_finalize_current_transfer(dws->master);
+ spi_finalize_current_transfer(dws->host);
return IRQ_HANDLED;
}
@@ -227,7 +227,7 @@ static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
dw_reader(dws);
if (!dws->rx_len) {
dw_spi_mask_intr(dws, 0xff);
- spi_finalize_current_transfer(dws->master);
+ spi_finalize_current_transfer(dws->host);
} else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
}
@@ -248,14 +248,14 @@ static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
static irqreturn_t dw_spi_irq(int irq, void *dev_id)
{
- struct spi_controller *master = dev_id;
- struct dw_spi *dws = spi_controller_get_devdata(master);
+ struct spi_controller *host = dev_id;
+ struct dw_spi *dws = spi_controller_get_devdata(host);
u16 irq_status = dw_readl(dws, DW_SPI_ISR) & DW_SPI_INT_MASK;
if (!irq_status)
return IRQ_NONE;
- if (!master->cur_msg) {
+ if (!host->cur_msg) {
dw_spi_mask_intr(dws, 0xff);
return IRQ_HANDLED;
}
@@ -408,11 +408,11 @@ static int dw_spi_poll_transfer(struct dw_spi *dws,
return 0;
}
-static int dw_spi_transfer_one(struct spi_controller *master,
+static int dw_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *transfer)
{
- struct dw_spi *dws = spi_controller_get_devdata(master);
+ struct dw_spi *dws = spi_controller_get_devdata(host);
struct dw_spi_cfg cfg = {
.tmode = DW_SPI_CTRLR0_TMOD_TR,
.dfs = transfer->bits_per_word,
@@ -440,8 +440,8 @@ static int dw_spi_transfer_one(struct spi_controller *master,
transfer->effective_speed_hz = dws->current_freq;
/* Check if current transfer is a DMA transaction */
- if (master->can_dma && master->can_dma(master, spi, transfer))
- dws->dma_mapped = master->cur_msg_mapped;
+ if (host->can_dma && host->can_dma(host, spi, transfer))
+ dws->dma_mapped = host->cur_msg_mapped;
/* For poll mode just disable all interrupts */
dw_spi_mask_intr(dws, 0xff);
@@ -464,10 +464,10 @@ static int dw_spi_transfer_one(struct spi_controller *master,
return 1;
}
-static void dw_spi_handle_err(struct spi_controller *master,
+static void dw_spi_handle_err(struct spi_controller *host,
struct spi_message *msg)
{
- struct dw_spi *dws = spi_controller_get_devdata(master);
+ struct dw_spi *dws = spi_controller_get_devdata(host);
if (dws->dma_mapped)
dws->dma_ops->dma_stop(dws);
@@ -576,7 +576,7 @@ static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
while (len) {
entries = readl_relaxed(dws->regs + DW_SPI_TXFLR);
if (!entries) {
- dev_err(&dws->master->dev, "CS de-assertion on Tx\n");
+ dev_err(&dws->host->dev, "CS de-assertion on Tx\n");
return -EIO;
}
room = min(dws->fifo_len - entries, len);
@@ -596,7 +596,7 @@ static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
if (!entries) {
sts = readl_relaxed(dws->regs + DW_SPI_RISR);
if (sts & DW_SPI_INT_RXOI) {
- dev_err(&dws->master->dev, "FIFO overflow on Rx\n");
+ dev_err(&dws->host->dev, "FIFO overflow on Rx\n");
return -EIO;
}
continue;
@@ -637,7 +637,7 @@ static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
spi_delay_exec(&delay, NULL);
if (retry < 0) {
- dev_err(&dws->master->dev, "Mem op hanged up\n");
+ dev_err(&dws->host->dev, "Mem op hanged up\n");
return -EIO;
}
@@ -884,56 +884,56 @@ static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws)
int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
{
- struct spi_controller *master;
+ struct spi_controller *host;
int ret;
if (!dws)
return -EINVAL;
- master = spi_alloc_master(dev, 0);
- if (!master)
+ host = spi_alloc_host(dev, 0);
+ if (!host)
return -ENOMEM;
- device_set_node(&master->dev, dev_fwnode(dev));
+ device_set_node(&host->dev, dev_fwnode(dev));
- dws->master = master;
+ dws->host = host;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
- spi_controller_set_devdata(master, dws);
+ spi_controller_set_devdata(host, dws);
/* Basic HW init */
dw_spi_hw_init(dev, dws);
ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
- master);
+ host);
if (ret < 0 && ret != -ENOTCONN) {
dev_err(dev, "can not get IRQ\n");
- goto err_free_master;
+ goto err_free_host;
}
dw_spi_init_mem_ops(dws);
- master->use_gpio_descriptors = true;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
+ host->use_gpio_descriptors = true;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
if (dws->caps & DW_SPI_CAP_DFS32)
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
else
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
- master->bus_num = dws->bus_num;
- master->num_chipselect = dws->num_cs;
- master->setup = dw_spi_setup;
- master->cleanup = dw_spi_cleanup;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ host->bus_num = dws->bus_num;
+ host->num_chipselect = dws->num_cs;
+ host->setup = dw_spi_setup;
+ host->cleanup = dw_spi_cleanup;
if (dws->set_cs)
- master->set_cs = dws->set_cs;
+ host->set_cs = dws->set_cs;
else
- master->set_cs = dw_spi_set_cs;
- master->transfer_one = dw_spi_transfer_one;
- master->handle_err = dw_spi_handle_err;
+ host->set_cs = dw_spi_set_cs;
+ host->transfer_one = dw_spi_transfer_one;
+ host->handle_err = dw_spi_handle_err;
if (dws->mem_ops.exec_op)
- master->mem_ops = &dws->mem_ops;
- master->max_speed_hz = dws->max_freq;
- master->flags = SPI_MASTER_GPIO_SS;
- master->auto_runtime_pm = true;
+ host->mem_ops = &dws->mem_ops;
+ host->max_speed_hz = dws->max_freq;
+ host->flags = SPI_CONTROLLER_GPIO_SS;
+ host->auto_runtime_pm = true;
/* Get default rx sample delay */
device_property_read_u32(dev, "rx-sample-delay-ns",
@@ -946,14 +946,14 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
} else if (ret) {
dev_warn(dev, "DMA init failed\n");
} else {
- master->can_dma = dws->dma_ops->can_dma;
- master->flags |= SPI_CONTROLLER_MUST_TX;
+ host->can_dma = dws->dma_ops->can_dma;
+ host->flags |= SPI_CONTROLLER_MUST_TX;
}
}
- ret = spi_register_controller(master);
+ ret = spi_register_controller(host);
if (ret) {
- dev_err_probe(dev, ret, "problem registering spi master\n");
+ dev_err_probe(dev, ret, "problem registering spi host\n");
goto err_dma_exit;
}
@@ -965,9 +965,9 @@ err_dma_exit:
dws->dma_ops->dma_exit(dws);
dw_spi_enable_chip(dws, 0);
err_free_irq:
- free_irq(dws->irq, master);
-err_free_master:
- spi_controller_put(master);
+ free_irq(dws->irq, host);
+err_free_host:
+ spi_controller_put(host);
return ret;
}
EXPORT_SYMBOL_NS_GPL(dw_spi_add_host, SPI_DW_CORE);
@@ -976,14 +976,14 @@ void dw_spi_remove_host(struct dw_spi *dws)
{
dw_spi_debugfs_remove(dws);
- spi_unregister_controller(dws->master);
+ spi_unregister_controller(dws->host);
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
dw_spi_shutdown_chip(dws);
- free_irq(dws->irq, dws->master);
+ free_irq(dws->irq, dws->host);
}
EXPORT_SYMBOL_NS_GPL(dw_spi_remove_host, SPI_DW_CORE);
@@ -991,7 +991,7 @@ int dw_spi_suspend_host(struct dw_spi *dws)
{
int ret;
- ret = spi_controller_suspend(dws->master);
+ ret = spi_controller_suspend(dws->host);
if (ret)
return ret;
@@ -1002,8 +1002,8 @@ EXPORT_SYMBOL_NS_GPL(dw_spi_suspend_host, SPI_DW_CORE);
int dw_spi_resume_host(struct dw_spi *dws)
{
- dw_spi_hw_init(&dws->master->dev, dws);
- return spi_controller_resume(dws->master);
+ dw_spi_hw_init(&dws->host->dev, dws);
+ return spi_controller_resume(dws->host);
}
EXPORT_SYMBOL_NS_GPL(dw_spi_resume_host, SPI_DW_CORE);
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index df819652901a..0ecbb6c36e23 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -139,8 +139,8 @@ static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws)
if (!dws->txchan)
goto free_rxchan;
- dws->master->dma_rx = dws->rxchan;
- dws->master->dma_tx = dws->txchan;
+ dws->host->dma_rx = dws->rxchan;
+ dws->host->dma_tx = dws->txchan;
init_completion(&dws->dma_completion);
@@ -183,8 +183,8 @@ static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws)
goto free_rxchan;
}
- dws->master->dma_rx = dws->rxchan;
- dws->master->dma_tx = dws->txchan;
+ dws->host->dma_rx = dws->rxchan;
+ dws->host->dma_tx = dws->txchan;
init_completion(&dws->dma_completion);
@@ -242,10 +242,10 @@ static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
}
}
-static bool dw_spi_can_dma(struct spi_controller *master,
+static bool dw_spi_can_dma(struct spi_controller *host,
struct spi_device *spi, struct spi_transfer *xfer)
{
- struct dw_spi *dws = spi_controller_get_devdata(master);
+ struct dw_spi *dws = spi_controller_get_devdata(host);
enum dma_slave_buswidth dma_bus_width;
if (xfer->len <= dws->fifo_len)
@@ -271,7 +271,7 @@ static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
msecs_to_jiffies(ms));
if (ms == 0) {
- dev_err(&dws->master->cur_msg->spi->dev,
+ dev_err(&dws->host->cur_msg->spi->dev,
"DMA transaction timed out\n");
return -ETIMEDOUT;
}
@@ -299,7 +299,7 @@ static int dw_spi_dma_wait_tx_done(struct dw_spi *dws,
spi_delay_exec(&delay, xfer);
if (retry < 0) {
- dev_err(&dws->master->dev, "Tx hanged up\n");
+ dev_err(&dws->host->dev, "Tx hanged up\n");
return -EIO;
}
@@ -400,7 +400,7 @@ static int dw_spi_dma_wait_rx_done(struct dw_spi *dws)
spi_delay_exec(&delay, NULL);
if (retry < 0) {
- dev_err(&dws->master->dev, "Rx hanged up\n");
+ dev_err(&dws->host->dev, "Rx hanged up\n");
return -EIO;
}
@@ -656,13 +656,13 @@ static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
if (ret)
return ret;
- if (dws->master->cur_msg->status == -EINPROGRESS) {
+ if (dws->host->cur_msg->status == -EINPROGRESS) {
ret = dw_spi_dma_wait_tx_done(dws, xfer);
if (ret)
return ret;
}
- if (xfer->rx_buf && dws->master->cur_msg->status == -EINPROGRESS)
+ if (xfer->rx_buf && dws->host->cur_msg->status == -EINPROGRESS)
ret = dw_spi_dma_wait_rx_done(dws);
return ret;
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index a963bc96c223..805264c9c65c 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -76,7 +76,7 @@ struct dw_spi_mscc {
*/
static void dw_spi_mscc_set_cs(struct spi_device *spi, bool enable)
{
- struct dw_spi *dws = spi_master_get_devdata(spi->master);
+ struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct dw_spi_mmio *dwsmmio = container_of(dws, struct dw_spi_mmio, dws);
struct dw_spi_mscc *dwsmscc = dwsmmio->priv;
u32 cs = spi_get_chipselect(spi, 0);
@@ -149,7 +149,7 @@ static int dw_spi_mscc_jaguar2_init(struct platform_device *pdev,
*/
static void dw_spi_sparx5_set_cs(struct spi_device *spi, bool enable)
{
- struct dw_spi *dws = spi_master_get_devdata(spi->master);
+ struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct dw_spi_mmio *dwsmmio = container_of(dws, struct dw_spi_mmio, dws);
struct dw_spi_mscc *dwsmscc = dwsmmio->priv;
u8 cs = spi_get_chipselect(spi, 0);
@@ -277,7 +277,7 @@ static void dw_spi_elba_override_cs(struct regmap *syscon, int cs, int enable)
static void dw_spi_elba_set_cs(struct spi_device *spi, bool enable)
{
- struct dw_spi *dws = spi_master_get_devdata(spi->master);
+ struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct dw_spi_mmio *dwsmmio = container_of(dws, struct dw_spi_mmio, dws);
struct regmap *syscon = dwsmmio->priv;
u8 cs;
@@ -369,7 +369,9 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
dws->max_freq = clk_get_rate(dwsmmio->clk);
- device_property_read_u32(&pdev->dev, "reg-io-width", &dws->reg_io_width);
+ if (device_property_read_u32(&pdev->dev, "reg-io-width",
+ &dws->reg_io_width))
+ dws->reg_io_width = 4;
num_cs = 4;
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 3962e6dcf880..6cafeee8ee2a 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -142,14 +142,14 @@ struct dw_spi_dma_ops {
int (*dma_init)(struct device *dev, struct dw_spi *dws);
void (*dma_exit)(struct dw_spi *dws);
int (*dma_setup)(struct dw_spi *dws, struct spi_transfer *xfer);
- bool (*can_dma)(struct spi_controller *master, struct spi_device *spi,
+ bool (*can_dma)(struct spi_controller *host, struct spi_device *spi,
struct spi_transfer *xfer);
int (*dma_transfer)(struct dw_spi *dws, struct spi_transfer *xfer);
void (*dma_stop)(struct dw_spi *dws);
};
struct dw_spi {
- struct spi_controller *master;
+ struct spi_controller *host;
u32 ip; /* Synopsys DW SSI IP-core ID */
u32 ver; /* Synopsys component version */
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 1615fd22f9a2..a1d60e51c053 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -104,15 +104,15 @@ struct ep93xx_spi {
/**
* ep93xx_spi_calc_divisors() - calculates SPI clock divisors
- * @master: SPI master
+ * @host: SPI host
* @rate: desired SPI output clock rate
* @div_cpsr: pointer to return the cpsr (pre-scaler) divider
* @div_scr: pointer to return the scr divider
*/
-static int ep93xx_spi_calc_divisors(struct spi_master *master,
+static int ep93xx_spi_calc_divisors(struct spi_controller *host,
u32 rate, u8 *div_cpsr, u8 *div_scr)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
unsigned long spi_clk_rate = clk_get_rate(espi->clk);
int cpsr, scr;
@@ -120,7 +120,7 @@ static int ep93xx_spi_calc_divisors(struct spi_master *master,
* Make sure that max value is between values supported by the
* controller.
*/
- rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
+ rate = clamp(rate, host->min_speed_hz, host->max_speed_hz);
/*
* Calculate divisors so that we can get speed according the
@@ -143,18 +143,18 @@ static int ep93xx_spi_calc_divisors(struct spi_master *master,
return -EINVAL;
}
-static int ep93xx_spi_chip_setup(struct spi_master *master,
+static int ep93xx_spi_chip_setup(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
u8 dss = bits_per_word_to_dss(xfer->bits_per_word);
u8 div_cpsr = 0;
u8 div_scr = 0;
u16 cr0;
int err;
- err = ep93xx_spi_calc_divisors(master, xfer->speed_hz,
+ err = ep93xx_spi_calc_divisors(host, xfer->speed_hz,
&div_cpsr, &div_scr);
if (err)
return err;
@@ -166,9 +166,9 @@ static int ep93xx_spi_chip_setup(struct spi_master *master,
cr0 |= SSPCR0_SPH;
cr0 |= dss;
- dev_dbg(&master->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
+ dev_dbg(&host->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
spi->mode, div_cpsr, div_scr, dss);
- dev_dbg(&master->dev, "setup: cr0 %#x\n", cr0);
+ dev_dbg(&host->dev, "setup: cr0 %#x\n", cr0);
writel(div_cpsr, espi->mmio + SSPCPSR);
writel(cr0, espi->mmio + SSPCR0);
@@ -176,10 +176,10 @@ static int ep93xx_spi_chip_setup(struct spi_master *master,
return 0;
}
-static void ep93xx_do_write(struct spi_master *master)
+static void ep93xx_do_write(struct spi_controller *host)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
- struct spi_transfer *xfer = master->cur_msg->state;
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
+ struct spi_transfer *xfer = host->cur_msg->state;
u32 val = 0;
if (xfer->bits_per_word > 8) {
@@ -194,10 +194,10 @@ static void ep93xx_do_write(struct spi_master *master)
writel(val, espi->mmio + SSPDR);
}
-static void ep93xx_do_read(struct spi_master *master)
+static void ep93xx_do_read(struct spi_controller *host)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
- struct spi_transfer *xfer = master->cur_msg->state;
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
+ struct spi_transfer *xfer = host->cur_msg->state;
u32 val;
val = readl(espi->mmio + SSPDR);
@@ -214,7 +214,7 @@ static void ep93xx_do_read(struct spi_master *master)
/**
* ep93xx_spi_read_write() - perform next RX/TX transfer
- * @master: SPI master
+ * @host: SPI host
*
* This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
* called several times, the whole transfer will be completed. Returns
@@ -223,20 +223,20 @@ static void ep93xx_do_read(struct spi_master *master)
* When this function is finished, RX FIFO should be empty and TX FIFO should be
* full.
*/
-static int ep93xx_spi_read_write(struct spi_master *master)
+static int ep93xx_spi_read_write(struct spi_controller *host)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
- struct spi_transfer *xfer = master->cur_msg->state;
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
+ struct spi_transfer *xfer = host->cur_msg->state;
/* read as long as RX FIFO has frames in it */
while ((readl(espi->mmio + SSPSR) & SSPSR_RNE)) {
- ep93xx_do_read(master);
+ ep93xx_do_read(host);
espi->fifo_level--;
}
/* write as long as TX FIFO has room */
while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < xfer->len) {
- ep93xx_do_write(master);
+ ep93xx_do_write(host);
espi->fifo_level++;
}
@@ -261,7 +261,7 @@ ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir)
/**
* ep93xx_spi_dma_prepare() - prepares a DMA transfer
- * @master: SPI master
+ * @host: SPI host
* @dir: DMA transfer direction
*
* Function configures the DMA, maps the buffer and prepares the DMA
@@ -269,11 +269,11 @@ ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir)
* in case of failure.
*/
static struct dma_async_tx_descriptor *
-ep93xx_spi_dma_prepare(struct spi_master *master,
+ep93xx_spi_dma_prepare(struct spi_controller *host,
enum dma_data_direction dir)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
- struct spi_transfer *xfer = master->cur_msg->state;
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
+ struct spi_transfer *xfer = host->cur_msg->state;
struct dma_async_tx_descriptor *txd;
enum dma_slave_buswidth buswidth;
struct dma_slave_config conf;
@@ -348,7 +348,7 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
}
if (WARN_ON(len)) {
- dev_warn(&master->dev, "len = %zu expected 0!\n", len);
+ dev_warn(&host->dev, "len = %zu expected 0!\n", len);
return ERR_PTR(-EINVAL);
}
@@ -367,16 +367,16 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
/**
* ep93xx_spi_dma_finish() - finishes with a DMA transfer
- * @master: SPI master
+ * @host: SPI host
* @dir: DMA transfer direction
*
* Function finishes with the DMA transfer. After this, the DMA buffer is
* unmapped.
*/
-static void ep93xx_spi_dma_finish(struct spi_master *master,
+static void ep93xx_spi_dma_finish(struct spi_controller *host,
enum dma_data_direction dir)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
struct dma_chan *chan;
struct sg_table *sgt;
@@ -393,35 +393,35 @@ static void ep93xx_spi_dma_finish(struct spi_master *master,
static void ep93xx_spi_dma_callback(void *callback_param)
{
- struct spi_master *master = callback_param;
+ struct spi_controller *host = callback_param;
- ep93xx_spi_dma_finish(master, DMA_TO_DEVICE);
- ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
+ ep93xx_spi_dma_finish(host, DMA_TO_DEVICE);
+ ep93xx_spi_dma_finish(host, DMA_FROM_DEVICE);
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
}
-static int ep93xx_spi_dma_transfer(struct spi_master *master)
+static int ep93xx_spi_dma_transfer(struct spi_controller *host)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
struct dma_async_tx_descriptor *rxd, *txd;
- rxd = ep93xx_spi_dma_prepare(master, DMA_FROM_DEVICE);
+ rxd = ep93xx_spi_dma_prepare(host, DMA_FROM_DEVICE);
if (IS_ERR(rxd)) {
- dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
+ dev_err(&host->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
return PTR_ERR(rxd);
}
- txd = ep93xx_spi_dma_prepare(master, DMA_TO_DEVICE);
+ txd = ep93xx_spi_dma_prepare(host, DMA_TO_DEVICE);
if (IS_ERR(txd)) {
- ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
- dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
+ ep93xx_spi_dma_finish(host, DMA_FROM_DEVICE);
+ dev_err(&host->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
return PTR_ERR(txd);
}
/* We are ready when RX is done */
rxd->callback = ep93xx_spi_dma_callback;
- rxd->callback_param = master;
+ rxd->callback_param = host;
/* Now submit both descriptors and start DMA */
dmaengine_submit(rxd);
@@ -436,8 +436,8 @@ static int ep93xx_spi_dma_transfer(struct spi_master *master)
static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_id;
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
u32 val;
/*
@@ -447,15 +447,15 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
if (readl(espi->mmio + SSPIIR) & SSPIIR_RORIS) {
/* clear the overrun interrupt */
writel(0, espi->mmio + SSPICR);
- dev_warn(&master->dev,
+ dev_warn(&host->dev,
"receive overrun, aborting the message\n");
- master->cur_msg->status = -EIO;
+ host->cur_msg->status = -EIO;
} else {
/*
* Interrupt is either RX (RIS) or TX (TIS). For both cases we
* simply execute next data transfer.
*/
- if (ep93xx_spi_read_write(master)) {
+ if (ep93xx_spi_read_write(host)) {
/*
* In normal case, there still is some processing left
* for current transfer. Let's wait for the next
@@ -474,26 +474,26 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
val &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
writel(val, espi->mmio + SSPCR1);
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
return IRQ_HANDLED;
}
-static int ep93xx_spi_transfer_one(struct spi_master *master,
+static int ep93xx_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
u32 val;
int ret;
- ret = ep93xx_spi_chip_setup(master, spi, xfer);
+ ret = ep93xx_spi_chip_setup(host, spi, xfer);
if (ret) {
- dev_err(&master->dev, "failed to setup chip for transfer\n");
+ dev_err(&host->dev, "failed to setup chip for transfer\n");
return ret;
}
- master->cur_msg->state = xfer;
+ host->cur_msg->state = xfer;
espi->rx = 0;
espi->tx = 0;
@@ -503,10 +503,10 @@ static int ep93xx_spi_transfer_one(struct spi_master *master,
* So in these cases we will be using PIO and don't bother for DMA.
*/
if (espi->dma_rx && xfer->len > SPI_FIFO_SIZE)
- return ep93xx_spi_dma_transfer(master);
+ return ep93xx_spi_dma_transfer(host);
/* Using PIO so prime the TX FIFO and enable interrupts */
- ep93xx_spi_read_write(master);
+ ep93xx_spi_read_write(host);
val = readl(espi->mmio + SSPCR1);
val |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
@@ -516,10 +516,10 @@ static int ep93xx_spi_transfer_one(struct spi_master *master,
return 1;
}
-static int ep93xx_spi_prepare_message(struct spi_master *master,
+static int ep93xx_spi_prepare_message(struct spi_controller *host,
struct spi_message *msg)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
unsigned long timeout;
/*
@@ -528,7 +528,7 @@ static int ep93xx_spi_prepare_message(struct spi_master *master,
timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
while (readl(espi->mmio + SSPSR) & SSPSR_RNE) {
if (time_after(jiffies, timeout)) {
- dev_warn(&master->dev,
+ dev_warn(&host->dev,
"timeout while flushing RX FIFO\n");
return -ETIMEDOUT;
}
@@ -544,9 +544,9 @@ static int ep93xx_spi_prepare_message(struct spi_master *master,
return 0;
}
-static int ep93xx_spi_prepare_hardware(struct spi_master *master)
+static int ep93xx_spi_prepare_hardware(struct spi_controller *host)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
u32 val;
int ret;
@@ -561,9 +561,9 @@ static int ep93xx_spi_prepare_hardware(struct spi_master *master)
return 0;
}
-static int ep93xx_spi_unprepare_hardware(struct spi_master *master)
+static int ep93xx_spi_unprepare_hardware(struct spi_controller *host)
{
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
u32 val;
val = readl(espi->mmio + SSPCR1);
@@ -646,7 +646,7 @@ static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
static int ep93xx_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct ep93xx_spi_info *info;
struct ep93xx_spi *espi;
struct resource *res;
@@ -661,63 +661,56 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -EBUSY;
+ return irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "unable to get iomem resource\n");
- return -ENODEV;
- }
-
- master = spi_alloc_master(&pdev->dev, sizeof(*espi));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*espi));
+ if (!host)
return -ENOMEM;
- master->use_gpio_descriptors = true;
- master->prepare_transfer_hardware = ep93xx_spi_prepare_hardware;
- master->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware;
- master->prepare_message = ep93xx_spi_prepare_message;
- master->transfer_one = ep93xx_spi_transfer_one;
- master->bus_num = pdev->id;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ host->use_gpio_descriptors = true;
+ host->prepare_transfer_hardware = ep93xx_spi_prepare_hardware;
+ host->unprepare_transfer_hardware = ep93xx_spi_unprepare_hardware;
+ host->prepare_message = ep93xx_spi_prepare_message;
+ host->transfer_one = ep93xx_spi_transfer_one;
+ host->bus_num = pdev->id;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
/*
* The SPI core will count the number of GPIO descriptors to figure
* out the number of chip selects available on the platform.
*/
- master->num_chipselect = 0;
+ host->num_chipselect = 0;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- espi = spi_master_get_devdata(master);
+ espi = spi_controller_get_devdata(host);
espi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(espi->clk)) {
dev_err(&pdev->dev, "unable to get spi clock\n");
error = PTR_ERR(espi->clk);
- goto fail_release_master;
+ goto fail_release_host;
}
/*
* Calculate maximum and minimum supported clock rates
* for the controller.
*/
- master->max_speed_hz = clk_get_rate(espi->clk) / 2;
- master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
+ host->max_speed_hz = clk_get_rate(espi->clk) / 2;
+ host->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
- espi->sspdr_phys = res->start + SSPDR;
-
- espi->mmio = devm_ioremap_resource(&pdev->dev, res);
+ espi->mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(espi->mmio)) {
error = PTR_ERR(espi->mmio);
- goto fail_release_master;
+ goto fail_release_host;
}
+ espi->sspdr_phys = res->start + SSPDR;
error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
- 0, "ep93xx-spi", master);
+ 0, "ep93xx-spi", host);
if (error) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto fail_release_master;
+ goto fail_release_host;
}
if (info->use_dma && ep93xx_spi_setup_dma(espi))
@@ -726,9 +719,9 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
/* make sure that the hardware is disabled */
writel(0, espi->mmio + SSPCR1);
- error = devm_spi_register_master(&pdev->dev, master);
+ error = devm_spi_register_controller(&pdev->dev, host);
if (error) {
- dev_err(&pdev->dev, "failed to register SPI master\n");
+ dev_err(&pdev->dev, "failed to register SPI host\n");
goto fail_free_dma;
}
@@ -739,16 +732,16 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
fail_free_dma:
ep93xx_spi_release_dma(espi);
-fail_release_master:
- spi_master_put(master);
+fail_release_host:
+ spi_controller_put(host);
return error;
}
static void ep93xx_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct ep93xx_spi *espi = spi_controller_get_devdata(host);
ep93xx_spi_release_dma(espi);
}
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index 4c103dff0d44..84279058f0f1 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -91,14 +91,14 @@
struct falcon_sflash {
u32 sfcmd; /* for caching of opcode, direction, ... */
- struct spi_master *master;
+ struct spi_controller *host;
};
int falcon_sflash_xfer(struct spi_device *spi, struct spi_transfer *t,
unsigned long flags)
{
struct device *dev = &spi->dev;
- struct falcon_sflash *priv = spi_master_get_devdata(spi->master);
+ struct falcon_sflash *priv = spi_controller_get_devdata(spi->controller);
const u8 *txp = t->tx_buf;
u8 *rxp = t->rx_buf;
unsigned int bytelen = ((8 * t->len + 7) / 8);
@@ -351,10 +351,10 @@ static int falcon_sflash_setup(struct spi_device *spi)
return 0;
}
-static int falcon_sflash_xfer_one(struct spi_master *master,
+static int falcon_sflash_xfer_one(struct spi_controller *host,
struct spi_message *m)
{
- struct falcon_sflash *priv = spi_master_get_devdata(master);
+ struct falcon_sflash *priv = spi_controller_get_devdata(host);
struct spi_transfer *t;
unsigned long spi_flags;
unsigned long flags;
@@ -382,7 +382,7 @@ static int falcon_sflash_xfer_one(struct spi_master *master,
}
m->status = ret;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return 0;
}
@@ -390,25 +390,25 @@ static int falcon_sflash_xfer_one(struct spi_master *master,
static int falcon_sflash_probe(struct platform_device *pdev)
{
struct falcon_sflash *priv;
- struct spi_master *master;
+ struct spi_controller *host;
int ret;
- master = spi_alloc_master(&pdev->dev, sizeof(*priv));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*priv));
+ if (!host)
return -ENOMEM;
- priv = spi_master_get_devdata(master);
- priv->master = master;
+ priv = spi_controller_get_devdata(host);
+ priv->host = host;
- master->mode_bits = SPI_MODE_3;
- master->flags = SPI_MASTER_HALF_DUPLEX;
- master->setup = falcon_sflash_setup;
- master->transfer_one_message = falcon_sflash_xfer_one;
- master->dev.of_node = pdev->dev.of_node;
+ host->mode_bits = SPI_MODE_3;
+ host->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ host->setup = falcon_sflash_setup;
+ host->transfer_one_message = falcon_sflash_xfer_one;
+ host->dev.of_node = pdev->dev.of_node;
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret)
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
index ba3b17d7c9ec..fc9e33be1e0e 100644
--- a/drivers/spi/spi-fsi.c
+++ b/drivers/spi/spi-fsi.c
@@ -542,7 +542,7 @@ static int fsi_spi_probe(struct device *dev)
if (of_property_read_u32(np, "reg", &base))
continue;
- ctlr = spi_alloc_master(dev, sizeof(*ctx));
+ ctlr = spi_alloc_host(dev, sizeof(*ctx));
if (!ctlr) {
of_node_put(np);
break;
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
index 38452089e8f3..47c7a5c6257f 100644
--- a/drivers/spi/spi-fsl-cpm.c
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -56,12 +56,12 @@ void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
QE_CR_PROTOCOL_UNSPECIFIED, 0);
} else {
if (mspi->flags & SPI_CPM1) {
- out_be32(&mspi->pram->rstate, 0);
- out_be16(&mspi->pram->rbptr,
- in_be16(&mspi->pram->rbase));
- out_be32(&mspi->pram->tstate, 0);
- out_be16(&mspi->pram->tbptr,
- in_be16(&mspi->pram->tbase));
+ iowrite32be(0, &mspi->pram->rstate);
+ iowrite16be(ioread16be(&mspi->pram->rbase),
+ &mspi->pram->rbptr);
+ iowrite32be(0, &mspi->pram->tstate);
+ iowrite16be(ioread16be(&mspi->pram->tbase),
+ &mspi->pram->tbptr);
} else {
cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
}
@@ -75,24 +75,24 @@ static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
unsigned int xfer_ofs;
- struct fsl_spi_reg *reg_base = mspi->reg_base;
+ struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
if (mspi->rx_dma == mspi->dma_dummy_rx)
- out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
+ iowrite32be(mspi->rx_dma, &rx_bd->cbd_bufaddr);
else
- out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
- out_be16(&rx_bd->cbd_datlen, 0);
- out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
+ iowrite32be(mspi->rx_dma + xfer_ofs, &rx_bd->cbd_bufaddr);
+ iowrite16be(0, &rx_bd->cbd_datlen);
+ iowrite16be(BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP, &rx_bd->cbd_sc);
if (mspi->tx_dma == mspi->dma_dummy_tx)
- out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
+ iowrite32be(mspi->tx_dma, &tx_bd->cbd_bufaddr);
else
- out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
- out_be16(&tx_bd->cbd_datlen, xfer_len);
- out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
- BD_SC_LAST);
+ iowrite32be(mspi->tx_dma + xfer_ofs, &tx_bd->cbd_bufaddr);
+ iowrite16be(xfer_len, &tx_bd->cbd_datlen);
+ iowrite16be(BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | BD_SC_LAST,
+ &tx_bd->cbd_sc);
/* start transfer */
mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR);
@@ -102,7 +102,7 @@ int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
struct spi_transfer *t, bool is_dma_mapped)
{
struct device *dev = mspi->dev;
- struct fsl_spi_reg *reg_base = mspi->reg_base;
+ struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
if (is_dma_mapped) {
mspi->map_tx_dma = 0;
@@ -123,7 +123,7 @@ int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
}
if (t->bits_per_word == 16 && t->tx_buf) {
const u16 *src = t->tx_buf;
- u16 *dst;
+ __le16 *dst;
int i;
dst = kmalloc(t->len, GFP_KERNEL);
@@ -202,12 +202,12 @@ EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
{
u16 len;
- struct fsl_spi_reg *reg_base = mspi->reg_base;
+ struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
- in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
+ ioread16be(&mspi->rx_bd->cbd_datlen), mspi->count);
- len = in_be16(&mspi->rx_bd->cbd_datlen);
+ len = ioread16be(&mspi->rx_bd->cbd_datlen);
if (len > mspi->count) {
WARN_ON(1);
len = mspi->count;
@@ -328,7 +328,7 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
}
if (mspi->flags & SPI_CPM1) {
- void *pram;
+ void __iomem *pram;
pram = devm_platform_ioremap_resource(to_platform_device(dev),
1);
@@ -374,21 +374,21 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
/* Initialize parameter ram. */
- out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
- out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
- out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
- out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
- out_be16(&mspi->pram->mrblr, SPI_MRBLR);
- out_be32(&mspi->pram->rstate, 0);
- out_be32(&mspi->pram->rdp, 0);
- out_be16(&mspi->pram->rbptr, 0);
- out_be16(&mspi->pram->rbc, 0);
- out_be32(&mspi->pram->rxtmp, 0);
- out_be32(&mspi->pram->tstate, 0);
- out_be32(&mspi->pram->tdp, 0);
- out_be16(&mspi->pram->tbptr, 0);
- out_be16(&mspi->pram->tbc, 0);
- out_be32(&mspi->pram->txtmp, 0);
+ iowrite16be(cpm_muram_offset(mspi->tx_bd), &mspi->pram->tbase);
+ iowrite16be(cpm_muram_offset(mspi->rx_bd), &mspi->pram->rbase);
+ iowrite8(CPMFCR_EB | CPMFCR_GBL, &mspi->pram->tfcr);
+ iowrite8(CPMFCR_EB | CPMFCR_GBL, &mspi->pram->rfcr);
+ iowrite16be(SPI_MRBLR, &mspi->pram->mrblr);
+ iowrite32be(0, &mspi->pram->rstate);
+ iowrite32be(0, &mspi->pram->rdp);
+ iowrite16be(0, &mspi->pram->rbptr);
+ iowrite16be(0, &mspi->pram->rbc);
+ iowrite32be(0, &mspi->pram->rxtmp);
+ iowrite32be(0, &mspi->pram->tstate);
+ iowrite32be(0, &mspi->pram->tdp);
+ iowrite16be(0, &mspi->pram->tbptr);
+ iowrite16be(0, &mspi->pram->tbc);
+ iowrite32be(0, &mspi->pram->txtmp);
return 0;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 674cfe05f411..8318249f8a1f 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -13,7 +13,8 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
@@ -22,7 +23,7 @@
#define DRIVER_NAME "fsl-dspi"
#define SPI_MCR 0x00
-#define SPI_MCR_MASTER BIT(31)
+#define SPI_MCR_HOST BIT(31)
#define SPI_MCR_PCSIS(x) ((x) << 16)
#define SPI_MCR_CLR_TXF BIT(11)
#define SPI_MCR_CLR_RXF BIT(10)
@@ -339,7 +340,7 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
- if (spi_controller_is_slave(dspi->ctlr))
+ if (spi_controller_is_target(dspi->ctlr))
return data;
if (dspi->len > 0)
@@ -429,7 +430,7 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
dma_async_issue_pending(dma->chan_rx);
dma_async_issue_pending(dma->chan_tx);
- if (spi_controller_is_slave(dspi->ctlr)) {
+ if (spi_controller_is_target(dspi->ctlr)) {
wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
return 0;
}
@@ -502,15 +503,14 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
dma->chan_rx = dma_request_chan(dev, "rx");
if (IS_ERR(dma->chan_rx)) {
- dev_err(dev, "rx dma channel not available\n");
- ret = PTR_ERR(dma->chan_rx);
- return ret;
+ return dev_err_probe(dev, PTR_ERR(dma->chan_rx),
+ "rx dma channel not available\n");
}
dma->chan_tx = dma_request_chan(dev, "tx");
if (IS_ERR(dma->chan_tx)) {
- dev_err(dev, "tx dma channel not available\n");
ret = PTR_ERR(dma->chan_tx);
+ dev_err_probe(dev, ret, "tx dma channel not available\n");
goto err_tx_channel;
}
@@ -1061,7 +1061,7 @@ static int dspi_setup(struct spi_device *spi)
if (spi->mode & SPI_CPHA)
chip->ctar_val |= SPI_CTAR_CPHA;
- if (!spi_controller_is_slave(dspi->ctlr)) {
+ if (!spi_controller_is_target(dspi->ctlr)) {
chip->ctar_val |= SPI_CTAR_PCSSCK(pcssck) |
SPI_CTAR_CSSCK(cssck) |
SPI_CTAR_PASC(pasc) |
@@ -1216,8 +1216,8 @@ static int dspi_init(struct fsl_dspi *dspi)
if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
mcr |= SPI_MCR_XSPI;
- if (!spi_controller_is_slave(dspi->ctlr))
- mcr |= SPI_MCR_MASTER;
+ if (!spi_controller_is_target(dspi->ctlr))
+ mcr |= SPI_MCR_HOST;
regmap_write(dspi->regmap, SPI_MCR, mcr);
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
@@ -1240,13 +1240,13 @@ static int dspi_init(struct fsl_dspi *dspi)
return 0;
}
-static int dspi_slave_abort(struct spi_master *master)
+static int dspi_target_abort(struct spi_controller *host)
{
- struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ struct fsl_dspi *dspi = spi_controller_get_devdata(host);
/*
* Terminate all pending DMA transactions for the SPI working
- * in SLAVE mode.
+ * in TARGET mode.
*/
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
dmaengine_terminate_sync(dspi->dma->chan_rx);
@@ -1277,7 +1277,7 @@ static int dspi_probe(struct platform_device *pdev)
if (!dspi)
return -ENOMEM;
- ctlr = spi_alloc_master(&pdev->dev, 0);
+ ctlr = spi_alloc_host(&pdev->dev, 0);
if (!ctlr)
return -ENOMEM;
@@ -1292,7 +1292,7 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->cleanup = dspi_cleanup;
- ctlr->slave_abort = dspi_slave_abort;
+ ctlr->target_abort = dspi_target_abort;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
ctlr->use_gpio_descriptors = true;
@@ -1317,7 +1317,7 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->bus_num = bus_num;
if (of_property_read_bool(np, "spi-slave"))
- ctlr->slave = true;
+ ctlr->target = true;
dspi->devtype_data = of_device_get_match_data(&pdev->dev);
if (!dspi->devtype_data) {
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index b3d2d3db5850..ea647ee94da8 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -148,7 +148,7 @@ static inline void fsl_espi_write_reg8(struct fsl_espi *espi, int offset,
static int fsl_espi_check_message(struct spi_message *m)
{
- struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
+ struct fsl_espi *espi = spi_controller_get_devdata(m->spi->controller);
struct spi_transfer *t, *first;
if (m->frame_length > SPCOM_TRANLEN_MAX) {
@@ -323,7 +323,7 @@ start:
static void fsl_espi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct fsl_espi *espi = spi_master_get_devdata(spi->master);
+ struct fsl_espi *espi = spi_controller_get_devdata(spi->controller);
int bits_per_word = t ? t->bits_per_word : spi->bits_per_word;
u32 pm, hz = t ? t->speed_hz : spi->max_speed_hz;
struct fsl_espi_cs *cs = spi_get_ctldata(spi);
@@ -351,7 +351,7 @@ static void fsl_espi_setup_transfer(struct spi_device *spi,
static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
{
- struct fsl_espi *espi = spi_master_get_devdata(spi->master);
+ struct fsl_espi *espi = spi_controller_get_devdata(spi->controller);
unsigned int rx_len = t->len;
u32 mask, spcom;
int ret;
@@ -396,7 +396,7 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
{
- struct fsl_espi *espi = spi_master_get_devdata(m->spi->master);
+ struct fsl_espi *espi = spi_controller_get_devdata(m->spi->controller);
struct spi_device *spi = m->spi;
int ret;
@@ -432,7 +432,7 @@ static int fsl_espi_trans(struct spi_message *m, struct spi_transfer *trans)
return ret;
}
-static int fsl_espi_do_one_msg(struct spi_master *master,
+static int fsl_espi_do_one_msg(struct spi_controller *host,
struct spi_message *m)
{
unsigned int rx_nbits = 0, delay_nsecs = 0;
@@ -470,7 +470,7 @@ out:
if (m->status == -EINPROGRESS)
m->status = ret;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return ret;
}
@@ -488,7 +488,7 @@ static int fsl_espi_setup(struct spi_device *spi)
spi_set_ctldata(spi, cs);
}
- espi = spi_master_get_devdata(spi->master);
+ espi = spi_controller_get_devdata(spi->controller);
pm_runtime_get_sync(espi->dev);
@@ -584,8 +584,8 @@ static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
#ifdef CONFIG_PM
static int fsl_espi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct fsl_espi *espi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct fsl_espi *espi = spi_controller_get_devdata(host);
u32 regval;
regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
@@ -597,8 +597,8 @@ static int fsl_espi_runtime_suspend(struct device *dev)
static int fsl_espi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct fsl_espi *espi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct fsl_espi *espi = spi_controller_get_devdata(host);
u32 regval;
regval = fsl_espi_read_reg(espi, ESPI_SPMODE);
@@ -616,8 +616,8 @@ static size_t fsl_espi_max_message_size(struct spi_device *spi)
static void fsl_espi_init_regs(struct device *dev, bool initial)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct fsl_espi *espi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct fsl_espi *espi = spi_controller_get_devdata(host);
struct device_node *nc;
u32 csmode, cs, prop;
int ret;
@@ -629,10 +629,10 @@ static void fsl_espi_init_regs(struct device *dev, bool initial)
fsl_espi_write_reg(espi, ESPI_SPIE, 0xffffffff);
/* Init eSPI CS mode register */
- for_each_available_child_of_node(master->dev.of_node, nc) {
+ for_each_available_child_of_node(host->dev.of_node, nc) {
/* get chip select */
ret = of_property_read_u32(nc, "reg", &cs);
- if (ret || cs >= master->num_chipselect)
+ if (ret || cs >= host->num_chipselect)
continue;
csmode = CSMODE_INIT_VAL;
@@ -664,28 +664,28 @@ static void fsl_espi_init_regs(struct device *dev, bool initial)
static int fsl_espi_probe(struct device *dev, struct resource *mem,
unsigned int irq, unsigned int num_cs)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct fsl_espi *espi;
int ret;
- master = spi_alloc_master(dev, sizeof(struct fsl_espi));
- if (!master)
+ host = spi_alloc_host(dev, sizeof(struct fsl_espi));
+ if (!host)
return -ENOMEM;
- dev_set_drvdata(dev, master);
+ dev_set_drvdata(dev, host);
- master->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
- SPI_LSB_FIRST | SPI_LOOP;
- master->dev.of_node = dev->of_node;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
- master->setup = fsl_espi_setup;
- master->cleanup = fsl_espi_cleanup;
- master->transfer_one_message = fsl_espi_do_one_msg;
- master->auto_runtime_pm = true;
- master->max_message_size = fsl_espi_max_message_size;
- master->num_chipselect = num_cs;
+ host->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
+ SPI_LSB_FIRST | SPI_LOOP;
+ host->dev.of_node = dev->of_node;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ host->setup = fsl_espi_setup;
+ host->cleanup = fsl_espi_cleanup;
+ host->transfer_one_message = fsl_espi_do_one_msg;
+ host->auto_runtime_pm = true;
+ host->max_message_size = fsl_espi_max_message_size;
+ host->num_chipselect = num_cs;
- espi = spi_master_get_devdata(master);
+ espi = spi_controller_get_devdata(host);
spin_lock_init(&espi->lock);
espi->dev = dev;
@@ -696,8 +696,8 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem,
goto err_probe;
}
/* determined by clock divider fields DIV16/PM in register SPMODEx */
- master->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16);
- master->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4);
+ host->min_speed_hz = DIV_ROUND_UP(espi->spibrg, 4 * 16 * 16);
+ host->max_speed_hz = DIV_ROUND_UP(espi->spibrg, 4);
init_completion(&espi->done);
@@ -720,7 +720,7 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem,
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- ret = devm_spi_register_master(dev, master);
+ ret = devm_spi_register_controller(dev, host);
if (ret < 0)
goto err_pm;
@@ -736,7 +736,7 @@ err_pm:
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
err_probe:
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
@@ -791,10 +791,10 @@ static void of_fsl_espi_remove(struct platform_device *dev)
#ifdef CONFIG_PM_SLEEP
static int of_fsl_espi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
int ret;
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
if (ret)
return ret;
@@ -803,7 +803,7 @@ static int of_fsl_espi_suspend(struct device *dev)
static int of_fsl_espi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
int ret;
fsl_espi_init_regs(dev, false);
@@ -812,7 +812,7 @@ static int of_fsl_espi_resume(struct device *dev)
if (ret < 0)
return ret;
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 76e1192eb025..885757c29fbb 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -18,7 +18,8 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#ifdef CONFIG_FSL_SOC
#include <sysdev/fsl_soc.h>
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index 015a1abb6a84..50a07f984b23 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -103,12 +103,9 @@ extern void mpc8xxx_spi_rx_buf_u32(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
extern struct mpc8xxx_spi_probe_info *to_of_pinfo(
struct fsl_spi_platform_data *pdata);
-extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi,
- struct spi_transfer *t, unsigned int len);
extern const char *mpc8xxx_spi_strmode(unsigned int flags);
extern void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
unsigned int irq);
-extern int mpc8xxx_spi_remove(struct device *dev);
extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev);
#endif /* __SPI_FSL_LIB_H__ */
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index fb68c72df171..11991eb12636 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/dma/imx-dma.h>
@@ -73,7 +72,7 @@
#define CFGR1_PINCFG (BIT(24)|BIT(25))
#define CFGR1_PCSPOL BIT(8)
#define CFGR1_NOSTALL BIT(3)
-#define CFGR1_MASTER BIT(0)
+#define CFGR1_HOST BIT(0)
#define FSR_TXCOUNT (0xFF)
#define RSR_RXEMPTY BIT(1)
#define TCR_CPOL BIT(31)
@@ -97,8 +96,7 @@ struct fsl_lpspi_data {
unsigned long base_phys;
struct clk *clk_ipg;
struct clk *clk_per;
- bool is_slave;
- u32 num_cs;
+ bool is_target;
bool is_only_cs1;
bool is_first_byte;
@@ -115,7 +113,7 @@ struct fsl_lpspi_data {
struct lpspi_config config;
struct completion xfer_done;
- bool slave_aborted;
+ bool target_aborted;
/* DMA */
bool usedma;
@@ -236,7 +234,7 @@ static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi)
}
if (txfifo_cnt < fsl_lpspi->txfifosize) {
- if (!fsl_lpspi->is_slave) {
+ if (!fsl_lpspi->is_target) {
temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
temp &= ~TCR_CONTC;
writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
@@ -260,7 +258,7 @@ static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
temp |= fsl_lpspi->config.bpw - 1;
temp |= (fsl_lpspi->config.mode & 0x3) << 30;
temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
- if (!fsl_lpspi->is_slave) {
+ if (!fsl_lpspi->is_target) {
temp |= fsl_lpspi->config.prescale << 27;
/*
* Set TCR_CONT will keep SS asserted after current transfer.
@@ -387,7 +385,7 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
u32 temp;
int ret;
- if (!fsl_lpspi->is_slave) {
+ if (!fsl_lpspi->is_target) {
ret = fsl_lpspi_set_bitrate(fsl_lpspi);
if (ret)
return ret;
@@ -395,8 +393,8 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
fsl_lpspi_set_watermark(fsl_lpspi);
- if (!fsl_lpspi->is_slave)
- temp = CFGR1_MASTER;
+ if (!fsl_lpspi->is_target)
+ temp = CFGR1_HOST;
else
temp = CFGR1_PINCFG;
if (fsl_lpspi->config.mode & SPI_CS_HIGH)
@@ -463,12 +461,12 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
return fsl_lpspi_config(fsl_lpspi);
}
-static int fsl_lpspi_slave_abort(struct spi_controller *controller)
+static int fsl_lpspi_target_abort(struct spi_controller *controller)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
- fsl_lpspi->slave_aborted = true;
+ fsl_lpspi->target_aborted = true;
if (!fsl_lpspi->usedma)
complete(&fsl_lpspi->xfer_done);
else {
@@ -484,9 +482,9 @@ static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
- if (fsl_lpspi->is_slave) {
+ if (fsl_lpspi->is_target) {
if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) ||
- fsl_lpspi->slave_aborted) {
+ fsl_lpspi->target_aborted) {
dev_dbg(fsl_lpspi->dev, "interrupted\n");
return -EINTR;
}
@@ -589,9 +587,9 @@ static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
reinit_completion(&fsl_lpspi->dma_tx_completion);
dma_async_issue_pending(controller->dma_tx);
- fsl_lpspi->slave_aborted = false;
+ fsl_lpspi->target_aborted = false;
- if (!fsl_lpspi->is_slave) {
+ if (!fsl_lpspi->is_target) {
transfer_timeout = fsl_lpspi_calculate_timeout(fsl_lpspi,
transfer->len);
@@ -617,7 +615,7 @@ static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
}
} else {
if (wait_for_completion_interruptible(&fsl_lpspi->dma_tx_completion) ||
- fsl_lpspi->slave_aborted) {
+ fsl_lpspi->target_aborted) {
dev_dbg(fsl_lpspi->dev,
"I/O Error in DMA TX interrupted\n");
dmaengine_terminate_all(controller->dma_tx);
@@ -627,7 +625,7 @@ static int fsl_lpspi_dma_transfer(struct spi_controller *controller,
}
if (wait_for_completion_interruptible(&fsl_lpspi->dma_rx_completion) ||
- fsl_lpspi->slave_aborted) {
+ fsl_lpspi->target_aborted) {
dev_dbg(fsl_lpspi->dev,
"I/O Error in DMA RX interrupted\n");
dmaengine_terminate_all(controller->dma_tx);
@@ -702,7 +700,7 @@ static int fsl_lpspi_pio_transfer(struct spi_controller *controller,
fsl_lpspi->remain = t->len;
reinit_completion(&fsl_lpspi->xfer_done);
- fsl_lpspi->slave_aborted = false;
+ fsl_lpspi->target_aborted = false;
fsl_lpspi_write_tx_fifo(fsl_lpspi);
@@ -826,16 +824,17 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
struct spi_controller *controller;
struct resource *res;
int ret, irq;
+ u32 num_cs;
u32 temp;
- bool is_slave;
+ bool is_target;
- is_slave = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
- if (is_slave)
- controller = spi_alloc_slave(&pdev->dev,
- sizeof(struct fsl_lpspi_data));
+ is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
+ if (is_target)
+ controller = spi_alloc_target(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
else
- controller = spi_alloc_master(&pdev->dev,
- sizeof(struct fsl_lpspi_data));
+ controller = spi_alloc_host(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
if (!controller)
return -ENOMEM;
@@ -844,25 +843,9 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
fsl_lpspi = spi_controller_get_devdata(controller);
fsl_lpspi->dev = &pdev->dev;
- fsl_lpspi->is_slave = is_slave;
+ fsl_lpspi->is_target = is_target;
fsl_lpspi->is_only_cs1 = of_property_read_bool((&pdev->dev)->of_node,
"fsl,spi-only-use-cs1-sel");
- if (of_property_read_u32((&pdev->dev)->of_node, "num-cs",
- &fsl_lpspi->num_cs))
- fsl_lpspi->num_cs = 1;
-
- controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
- controller->transfer_one = fsl_lpspi_transfer_one;
- controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
- controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
- controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
- controller->dev.of_node = pdev->dev.of_node;
- controller->bus_num = pdev->id;
- controller->num_chipselect = fsl_lpspi->num_cs;
- controller->slave_abort = fsl_lpspi_slave_abort;
- if (!fsl_lpspi->is_slave)
- controller->use_gpio_descriptors = true;
init_completion(&fsl_lpspi->xfer_done);
@@ -912,6 +895,26 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
fsl_lpspi->txfifosize = 1 << (temp & 0x0f);
fsl_lpspi->rxfifosize = 1 << ((temp >> 8) & 0x0f);
+ if (of_property_read_u32((&pdev->dev)->of_node, "num-cs",
+ &num_cs)) {
+ if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx93-spi"))
+ num_cs = ((temp >> 16) & 0xf);
+ else
+ num_cs = 1;
+ }
+
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ controller->transfer_one = fsl_lpspi_transfer_one;
+ controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+ controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+ controller->dev.of_node = pdev->dev.of_node;
+ controller->bus_num = pdev->id;
+ controller->num_chipselect = num_cs;
+ controller->target_abort = fsl_lpspi_target_abort;
+ if (!fsl_lpspi->is_target)
+ controller->use_gpio_descriptors = true;
ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
if (ret == -EPROBE_DEFER)
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 8ade61e5ebc0..79bac30e79af 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -34,7 +34,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/sizes.h>
@@ -368,7 +367,7 @@ static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
static bool fsl_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
int ret;
ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
@@ -641,7 +640,7 @@ static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
void __iomem *base = q->iobase;
u32 addr_offset = 0;
int err = 0;
@@ -703,7 +702,7 @@ static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
- struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
if (op->data.dir == SPI_MEM_DATA_OUT) {
if (op->data.nbytes > q->devtype_data->txfifo)
@@ -809,7 +808,7 @@ static int fsl_qspi_default_setup(struct fsl_qspi *q)
static const char *fsl_qspi_get_name(struct spi_mem *mem)
{
- struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
+ struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->controller);
struct device *dev = &mem->spi->dev;
const char *name;
@@ -849,7 +848,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
struct fsl_qspi *q;
int ret;
- ctlr = spi_alloc_master(&pdev->dev, sizeof(*q));
+ ctlr = spi_alloc_host(&pdev->dev, sizeof(*q));
if (!ctlr)
return -ENOMEM;
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 106fe60a0a50..97faf984801f 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -88,7 +88,7 @@ static int fsl_spi_get_type(struct device *dev)
static void fsl_spi_change_mode(struct spi_device *spi)
{
- struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
+ struct mpc8xxx_spi *mspi = spi_controller_get_devdata(spi->controller);
struct spi_mpc8xxx_cs *cs = spi->controller_state;
struct fsl_spi_reg __iomem *reg_base = mspi->reg_base;
__be32 __iomem *mode = &reg_base->mode;
@@ -183,7 +183,7 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
u32 hz = 0;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
- mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ mpc8xxx_spi = spi_controller_get_devdata(spi->controller);
if (t) {
bits_per_word = t->bits_per_word;
@@ -252,7 +252,7 @@ static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
bool is_dma_mapped)
{
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(spi->controller);
struct fsl_spi_reg __iomem *reg_base;
unsigned int len = t->len;
u8 bits_per_word;
@@ -385,7 +385,7 @@ static int fsl_spi_setup(struct spi_device *spi)
spi_set_ctldata(spi, cs);
initial_setup = true;
}
- mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ mpc8xxx_spi = spi_controller_get_devdata(spi->controller);
reg_base = mpc8xxx_spi->reg_base;
@@ -479,7 +479,7 @@ static irqreturn_t fsl_spi_irq(s32 irq, void *context_data)
static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
{
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(spi->controller);
struct fsl_spi_reg __iomem *reg_base = mpc8xxx_spi->reg_base;
u32 slvsel;
u16 cs = spi_get_chipselect(spi, 0);
@@ -493,8 +493,8 @@ static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
static void fsl_spi_grlib_probe(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(host);
struct fsl_spi_reg __iomem *reg_base = mpc8xxx_spi->reg_base;
int mbits;
u32 capabilities;
@@ -511,8 +511,8 @@ static void fsl_spi_grlib_probe(struct device *dev)
mpc8xxx_spi->native_chipselects = SPCAP_SSSZ(capabilities);
mpc8xxx_spi_write_reg(&reg_base->slvsel, 0xffffffff);
}
- master->num_chipselect = mpc8xxx_spi->native_chipselects;
- master->set_cs = fsl_spi_grlib_cs_control;
+ host->num_chipselect = mpc8xxx_spi->native_chipselects;
+ host->set_cs = fsl_spi_grlib_cs_control;
}
static void fsl_spi_cs_control(struct spi_device *spi, bool on)
@@ -526,35 +526,35 @@ static void fsl_spi_cs_control(struct spi_device *spi, bool on)
iowrite32be(on ? 0 : SPI_BOOT_SEL_BIT, pinfo->immr_spi_cs);
}
-static struct spi_master *fsl_spi_probe(struct device *dev,
+static struct spi_controller *fsl_spi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
- struct spi_master *master;
+ struct spi_controller *host;
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_spi_reg __iomem *reg_base;
u32 regval;
int ret = 0;
- master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
- if (master == NULL) {
+ host = spi_alloc_host(dev, sizeof(struct mpc8xxx_spi));
+ if (host == NULL) {
ret = -ENOMEM;
goto err;
}
- dev_set_drvdata(dev, master);
+ dev_set_drvdata(dev, host);
mpc8xxx_spi_probe(dev, mem, irq);
- master->setup = fsl_spi_setup;
- master->cleanup = fsl_spi_cleanup;
- master->prepare_message = fsl_spi_prepare_message;
- master->transfer_one = fsl_spi_transfer_one;
- master->unprepare_message = fsl_spi_unprepare_message;
- master->use_gpio_descriptors = true;
- master->set_cs = fsl_spi_cs_control;
+ host->setup = fsl_spi_setup;
+ host->cleanup = fsl_spi_cleanup;
+ host->prepare_message = fsl_spi_prepare_message;
+ host->transfer_one = fsl_spi_transfer_one;
+ host->unprepare_message = fsl_spi_unprepare_message;
+ host->use_gpio_descriptors = true;
+ host->set_cs = fsl_spi_cs_control;
- mpc8xxx_spi = spi_master_get_devdata(master);
+ mpc8xxx_spi = spi_controller_get_devdata(host);
mpc8xxx_spi->max_bits_per_word = 32;
mpc8xxx_spi->type = fsl_spi_get_type(dev);
@@ -572,13 +572,13 @@ static struct spi_master *fsl_spi_probe(struct device *dev,
fsl_spi_grlib_probe(dev);
if (mpc8xxx_spi->flags & SPI_CPM_MODE)
- master->bits_per_word_mask =
+ host->bits_per_word_mask =
(SPI_BPW_RANGE_MASK(4, 8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(32));
else
- master->bits_per_word_mask =
+ host->bits_per_word_mask =
(SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32));
- master->bits_per_word_mask &=
+ host->bits_per_word_mask &=
SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
@@ -615,19 +615,19 @@ static struct spi_master *fsl_spi_probe(struct device *dev,
mpc8xxx_spi_write_reg(&reg_base->mode, regval);
- ret = devm_spi_register_master(dev, master);
+ ret = devm_spi_register_controller(dev, host);
if (ret < 0)
goto err_probe;
dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base,
mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
- return master;
+ return host;
err_probe:
fsl_spi_cpm_free(mpc8xxx_spi);
err_cpm_init:
- spi_master_put(master);
+ spi_controller_put(host);
err:
return ERR_PTR(ret);
}
@@ -636,7 +636,7 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
- struct spi_master *master;
+ struct spi_controller *host;
struct resource mem;
int irq, type;
int ret;
@@ -689,9 +689,9 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
goto unmap_out;
}
- master = fsl_spi_probe(dev, &mem, irq);
+ host = fsl_spi_probe(dev, &mem, irq);
- return PTR_ERR_OR_ZERO(master);
+ return PTR_ERR_OR_ZERO(host);
unmap_out:
#if IS_ENABLED(CONFIG_FSL_SOC)
@@ -703,8 +703,8 @@ unmap_out:
static void of_fsl_spi_remove(struct platform_device *ofdev)
{
- struct spi_master *master = platform_get_drvdata(ofdev);
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(ofdev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(host);
fsl_spi_cpm_free(mpc8xxx_spi);
}
@@ -730,7 +730,7 @@ static int plat_mpc8xxx_spi_probe(struct platform_device *pdev)
{
struct resource *mem;
int irq;
- struct spi_master *master;
+ struct spi_controller *host;
if (!dev_get_platdata(&pdev->dev))
return -EINVAL;
@@ -740,17 +740,17 @@ static int plat_mpc8xxx_spi_probe(struct platform_device *pdev)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -EINVAL;
+ if (irq < 0)
+ return irq;
- master = fsl_spi_probe(&pdev->dev, mem, irq);
- return PTR_ERR_OR_ZERO(master);
+ host = fsl_spi_probe(&pdev->dev, mem, irq);
+ return PTR_ERR_OR_ZERO(host);
}
static void plat_mpc8xxx_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_controller_get_devdata(host);
fsl_spi_cpm_free(mpc8xxx_spi);
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 1df9d4844a68..f4f376a8351b 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/soc/qcom/geni-se.h>
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
@@ -52,6 +53,9 @@
#define SPI_CS_CLK_DELAY_MSK GENMASK(19, 10)
#define SPI_CS_CLK_DELAY_SHFT 10
+#define SE_SPI_SLAVE_EN (0x2BC)
+#define SPI_SLAVE_EN BIT(0)
+
/* M_CMD OP codes for SPI */
#define SPI_TX_ONLY 1
#define SPI_RX_ONLY 2
@@ -99,6 +103,16 @@ struct spi_geni_master {
int cur_xfer_mode;
};
+static void spi_slv_setup(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+
+ writel(SPI_SLAVE_EN, se->base + SE_SPI_SLAVE_EN);
+ writel(GENI_IO_MUX_0_EN, se->base + GENI_OUTPUT_CTRL);
+ writel(START_TRIGGER, se->base + SE_GENI_CFG_SEQ_START);
+ dev_dbg(mas->dev, "spi slave setup done\n");
+}
+
static int get_spi_clk_cfg(unsigned int speed_hz,
struct spi_geni_master *mas,
unsigned int *clk_idx,
@@ -140,12 +154,22 @@ static void handle_se_timeout(struct spi_master *spi,
const struct spi_transfer *xfer;
spin_lock_irq(&mas->lock);
- reinit_completion(&mas->cancel_done);
if (mas->cur_xfer_mode == GENI_SE_FIFO)
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
xfer = mas->cur_xfer;
mas->cur_xfer = NULL;
+
+ if (spi->slave) {
+ /*
+ * skip CMD Cancel sequnece since spi slave
+ * doesn`t support CMD Cancel sequnece
+ */
+ spin_unlock_irq(&mas->lock);
+ goto unmap_if_dma;
+ }
+
+ reinit_completion(&mas->cancel_done);
geni_se_cancel_m_cmd(se);
spin_unlock_irq(&mas->lock);
@@ -542,6 +566,10 @@ static bool geni_can_dma(struct spi_controller *ctlr,
if (mas->cur_xfer_mode == GENI_GPI_DMA)
return true;
+ /* Set SE DMA mode for SPI slave. */
+ if (ctlr->slave)
+ return true;
+
len = get_xfer_len_in_words(xfer, mas);
fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
@@ -619,6 +647,7 @@ static void spi_geni_release_dma_chan(struct spi_geni_master *mas)
static int spi_geni_init(struct spi_geni_master *mas)
{
+ struct spi_master *spi = dev_get_drvdata(mas->dev);
struct geni_se *se = &mas->se;
unsigned int proto, major, minor, ver;
u32 spi_tx_cfg, fifo_disable;
@@ -627,7 +656,14 @@ static int spi_geni_init(struct spi_geni_master *mas)
pm_runtime_get_sync(mas->dev);
proto = geni_se_read_proto(se);
- if (proto != GENI_SE_SPI) {
+
+ if (spi->slave) {
+ if (proto != GENI_SE_SPI_SLAVE) {
+ dev_err(mas->dev, "Invalid proto %d\n", proto);
+ goto out_pm;
+ }
+ spi_slv_setup(mas);
+ } else if (proto != GENI_SE_SPI) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
goto out_pm;
}
@@ -679,9 +715,11 @@ static int spi_geni_init(struct spi_geni_master *mas)
}
/* We always control CS manually */
- spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
- spi_tx_cfg &= ~CS_TOGGLE;
- writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
+ if (!spi->slave) {
+ spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
+ spi_tx_cfg &= ~CS_TOGGLE;
+ writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
+ }
out_pm:
pm_runtime_put(mas->dev);
@@ -1074,6 +1112,9 @@ static int spi_geni_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
pm_runtime_enable(dev);
+ if (device_property_read_bool(&pdev->dev, "spi-slave"))
+ spi->slave = true;
+
ret = geni_icc_get(&mas->se, NULL);
if (ret)
goto spi_geni_probe_runtime_disable;
@@ -1094,7 +1135,7 @@ static int spi_geni_probe(struct platform_device *pdev)
* for dma (gsi) mode, the gsi will set cs based on params passed in
* TRE
*/
- if (mas->cur_xfer_mode == GENI_SE_FIFO)
+ if (!spi->slave && mas->cur_xfer_mode == GENI_SE_FIFO)
spi->set_cs = spi_geni_set_cs;
/*
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 092afc7679d4..d8db4564b406 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * SPI master driver using generic bitbanged GPIO
+ * SPI host driver using generic bitbanged GPIO
*
* Copyright (C) 2006,2008 David Brownell
* Copyright (C) 2017 Linus Walleij
@@ -10,7 +10,6 @@
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
@@ -18,7 +17,7 @@
/*
- * This bitbanging SPI master driver should help make systems usable
+ * This bitbanging SPI host driver should help make systems usable
* when a native hardware SPI engine is not available, perhaps because
* its driver isn't yet working or because the I/O pins it requires
* are used for other purposes.
@@ -27,7 +26,7 @@
*
* spi->controller_state ... reserved for bitbang framework code
*
- * spi->master->dev.driver_data ... points to spi_gpio->bitbang
+ * spi->controller->dev.driver_data ... points to spi_gpio->bitbang
*/
struct spi_gpio {
@@ -78,7 +77,7 @@ spi_to_spi_gpio(const struct spi_device *spi)
const struct spi_bitbang *bang;
struct spi_gpio *spi_gpio;
- bang = spi_master_get_devdata(spi->master);
+ bang = spi_controller_get_devdata(spi->controller);
spi_gpio = container_of(bang, struct spi_gpio, bitbang);
return spi_gpio;
}
@@ -170,7 +169,7 @@ static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi,
/*
* These functions do not call setmosi or getmiso if respective flag
- * (SPI_MASTER_NO_RX or SPI_MASTER_NO_TX) is set, so they are safe to
+ * (SPI_CONTROLLER_NO_RX or SPI_CONTROLLER_NO_TX) is set, so they are safe to
* call when such pin is not present or defined in the controller.
* A separate set of callbacks is defined to get highest possible
* speed in the generic case (when both MISO and MOSI lines are
@@ -181,7 +180,7 @@ static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi,
static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- flags = spi->master->flags;
+ flags = spi->controller->flags;
if (unlikely(spi->mode & SPI_LSB_FIRST))
return bitbang_txrx_le_cpha0(spi, nsecs, 0, flags, word, bits);
else
@@ -191,7 +190,7 @@ static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi,
static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- flags = spi->master->flags;
+ flags = spi->controller->flags;
if (unlikely(spi->mode & SPI_LSB_FIRST))
return bitbang_txrx_le_cpha1(spi, nsecs, 0, flags, word, bits);
else
@@ -201,7 +200,7 @@ static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi,
static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- flags = spi->master->flags;
+ flags = spi->controller->flags;
if (unlikely(spi->mode & SPI_LSB_FIRST))
return bitbang_txrx_le_cpha0(spi, nsecs, 1, flags, word, bits);
else
@@ -211,7 +210,7 @@ static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi,
static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits, unsigned flags)
{
- flags = spi->master->flags;
+ flags = spi->controller->flags;
if (unlikely(spi->mode & SPI_LSB_FIRST))
return bitbang_txrx_le_cpha1(spi, nsecs, 1, flags, word, bits);
else
@@ -311,7 +310,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
* On platforms which can do so, configure MISO with a weak pullup unless
* there's an external pullup on that signal. That saves power by avoiding
* floating signals. (A weak pulldown would save power too, but many
- * drivers expect to see all-ones data as the no slave "response".)
+ * drivers expect to see all-ones data as the no target "response".)
*/
static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio)
{
@@ -335,27 +334,27 @@ static const struct of_device_id spi_gpio_dt_ids[] = {
MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids);
static int spi_gpio_probe_dt(struct platform_device *pdev,
- struct spi_master *master)
+ struct spi_controller *host)
{
- master->dev.of_node = pdev->dev.of_node;
- master->use_gpio_descriptors = true;
+ host->dev.of_node = pdev->dev.of_node;
+ host->use_gpio_descriptors = true;
return 0;
}
#else
static inline int spi_gpio_probe_dt(struct platform_device *pdev,
- struct spi_master *master)
+ struct spi_controller *host)
{
return 0;
}
#endif
static int spi_gpio_probe_pdata(struct platform_device *pdev,
- struct spi_master *master)
+ struct spi_controller *host)
{
struct device *dev = &pdev->dev;
struct spi_gpio_platform_data *pdata = dev_get_platdata(dev);
- struct spi_gpio *spi_gpio = spi_master_get_devdata(master);
+ struct spi_gpio *spi_gpio = spi_controller_get_devdata(host);
int i;
#ifdef GENERIC_BITBANG
@@ -363,18 +362,18 @@ static int spi_gpio_probe_pdata(struct platform_device *pdev,
return -ENODEV;
#endif
/*
- * The master needs to think there is a chipselect even if not
+ * The host needs to think there is a chipselect even if not
* connected
*/
- master->num_chipselect = pdata->num_chipselect ?: 1;
+ host->num_chipselect = pdata->num_chipselect ?: 1;
- spi_gpio->cs_gpios = devm_kcalloc(dev, master->num_chipselect,
+ spi_gpio->cs_gpios = devm_kcalloc(dev, host->num_chipselect,
sizeof(*spi_gpio->cs_gpios),
GFP_KERNEL);
if (!spi_gpio->cs_gpios)
return -ENOMEM;
- for (i = 0; i < master->num_chipselect; i++) {
+ for (i = 0; i < host->num_chipselect; i++) {
spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", i,
GPIOD_OUT_HIGH);
if (IS_ERR(spi_gpio->cs_gpios[i]))
@@ -387,58 +386,58 @@ static int spi_gpio_probe_pdata(struct platform_device *pdev,
static int spi_gpio_probe(struct platform_device *pdev)
{
int status;
- struct spi_master *master;
+ struct spi_controller *host;
struct spi_gpio *spi_gpio;
struct device *dev = &pdev->dev;
struct spi_bitbang *bb;
- master = devm_spi_alloc_master(dev, sizeof(*spi_gpio));
- if (!master)
+ host = devm_spi_alloc_host(dev, sizeof(*spi_gpio));
+ if (!host)
return -ENOMEM;
if (pdev->dev.of_node)
- status = spi_gpio_probe_dt(pdev, master);
+ status = spi_gpio_probe_dt(pdev, host);
else
- status = spi_gpio_probe_pdata(pdev, master);
+ status = spi_gpio_probe_pdata(pdev, host);
if (status)
return status;
- spi_gpio = spi_master_get_devdata(master);
+ spi_gpio = spi_controller_get_devdata(host);
status = spi_gpio_request(dev, spi_gpio);
if (status)
return status;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ host->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL |
SPI_CS_HIGH | SPI_LSB_FIRST;
if (!spi_gpio->mosi) {
/* HW configuration without MOSI pin
*
- * No setting SPI_MASTER_NO_RX here - if there is only
+ * No setting SPI_CONTROLLER_NO_RX here - if there is only
* a MOSI pin connected the host can still do RX by
* changing the direction of the line.
*/
- master->flags = SPI_MASTER_NO_TX;
+ host->flags = SPI_CONTROLLER_NO_TX;
}
- master->bus_num = pdev->id;
- master->setup = spi_gpio_setup;
- master->cleanup = spi_gpio_cleanup;
+ host->bus_num = pdev->id;
+ host->setup = spi_gpio_setup;
+ host->cleanup = spi_gpio_cleanup;
bb = &spi_gpio->bitbang;
- bb->master = master;
+ bb->master = host;
/*
* There is some additional business, apart from driving the CS GPIO
* line, that we need to do on selection. This makes the local
* callback for chipselect always get called.
*/
- master->flags |= SPI_MASTER_GPIO_SS;
+ host->flags |= SPI_CONTROLLER_GPIO_SS;
bb->chipselect = spi_gpio_chipselect;
bb->set_line_direction = spi_gpio_set_direction;
- if (master->flags & SPI_MASTER_NO_TX) {
+ if (host->flags & SPI_CONTROLLER_NO_TX) {
bb->txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0;
bb->txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1;
bb->txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2;
@@ -455,7 +454,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
if (status)
return status;
- return devm_spi_register_master(&pdev->dev, master);
+ return devm_spi_register_controller(&pdev->dev, host);
}
MODULE_ALIAS("platform:" DRIVER_NAME);
@@ -469,6 +468,6 @@ static struct platform_driver spi_gpio_driver = {
};
module_platform_driver(spi_gpio_driver);
-MODULE_DESCRIPTION("SPI master driver using generic bitbanged GPIO ");
+MODULE_DESCRIPTION("SPI host driver using generic bitbanged GPIO ");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c
index 684d63f402f3..fd2fac236bbd 100644
--- a/drivers/spi/spi-gxp.c
+++ b/drivers/spi/spi-gxp.c
@@ -3,7 +3,6 @@
#include <linux/iopoll.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
@@ -200,7 +199,7 @@ static ssize_t gxp_spi_write(struct gxp_spi_chip *chip, const struct spi_mem_op
static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct gxp_spi *spifi = spi_controller_get_devdata(mem->spi->master);
+ struct gxp_spi *spifi = spi_controller_get_devdata(mem->spi->controller);
struct gxp_spi_chip *chip = &spifi->chips[spi_get_chipselect(mem->spi, 0)];
int ret;
@@ -236,7 +235,7 @@ static const struct spi_controller_mem_ops gxp_spi_mem_ops = {
static int gxp_spi_setup(struct spi_device *spi)
{
- struct gxp_spi *spifi = spi_controller_get_devdata(spi->master);
+ struct gxp_spi *spifi = spi_controller_get_devdata(spi->controller);
unsigned int cs = spi_get_chipselect(spi, 0);
struct gxp_spi_chip *chip = &spifi->chips[cs];
@@ -258,7 +257,7 @@ static int gxp_spifi_probe(struct platform_device *pdev)
data = of_device_get_match_data(&pdev->dev);
- ctlr = devm_spi_alloc_master(dev, sizeof(*spifi));
+ ctlr = devm_spi_alloc_host(dev, sizeof(*spifi));
if (!ctlr)
return -ENOMEM;
diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
index 2b4b3d2a22b8..35ef5e8e2ffd 100644
--- a/drivers/spi/spi-hisi-kunpeng.c
+++ b/drivers/spi/spi-hisi-kunpeng.c
@@ -164,10 +164,10 @@ static int hisi_spi_debugfs_init(struct hisi_spi *hs)
{
char name[32];
- struct spi_controller *master;
+ struct spi_controller *host;
- master = container_of(hs->dev, struct spi_controller, dev);
- snprintf(name, 32, "hisi_spi%d", master->bus_num);
+ host = container_of(hs->dev, struct spi_controller, dev);
+ snprintf(name, 32, "hisi_spi%d", host->bus_num);
hs->debugfs = debugfs_create_dir(name, NULL);
if (IS_ERR(hs->debugfs))
return -ENOMEM;
@@ -291,18 +291,18 @@ static void __hisi_calc_div_reg(struct hisi_chip_data *chip)
chip->div_post = (chip->clk_div / chip->div_pre) - 1;
}
-static u32 hisi_calc_effective_speed(struct spi_controller *master,
+static u32 hisi_calc_effective_speed(struct spi_controller *host,
struct hisi_chip_data *chip, u32 speed_hz)
{
u32 effective_speed;
/* Note clock divider doesn't support odd numbers */
- chip->clk_div = DIV_ROUND_UP(master->max_speed_hz, speed_hz) + 1;
+ chip->clk_div = DIV_ROUND_UP(host->max_speed_hz, speed_hz) + 1;
chip->clk_div &= 0xfffe;
if (chip->clk_div > CLK_DIV_MAX)
chip->clk_div = CLK_DIV_MAX;
- effective_speed = master->max_speed_hz / chip->clk_div;
+ effective_speed = host->max_speed_hz / chip->clk_div;
if (chip->speed_hz != effective_speed) {
__hisi_calc_div_reg(chip);
chip->speed_hz = effective_speed;
@@ -336,20 +336,20 @@ static void hisi_spi_hw_init(struct hisi_spi *hs)
static irqreturn_t hisi_spi_irq(int irq, void *dev_id)
{
- struct spi_controller *master = dev_id;
- struct hisi_spi *hs = spi_controller_get_devdata(master);
+ struct spi_controller *host = dev_id;
+ struct hisi_spi *hs = spi_controller_get_devdata(host);
u32 irq_status = readl(hs->regs + HISI_SPI_ISR) & ISR_MASK;
if (!irq_status)
return IRQ_NONE;
- if (!master->cur_msg)
+ if (!host->cur_msg)
return IRQ_HANDLED;
/* Error handling */
if (irq_status & ISR_RXOF) {
dev_err(hs->dev, "interrupt_transfer: fifo overflow\n");
- master->cur_msg->status = -EIO;
+ host->cur_msg->status = -EIO;
goto finalize_transfer;
}
@@ -369,20 +369,20 @@ static irqreturn_t hisi_spi_irq(int irq, void *dev_id)
finalize_transfer:
hisi_spi_disable(hs);
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
return IRQ_HANDLED;
}
-static int hisi_spi_transfer_one(struct spi_controller *master,
+static int hisi_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi, struct spi_transfer *transfer)
{
- struct hisi_spi *hs = spi_controller_get_devdata(master);
+ struct hisi_spi *hs = spi_controller_get_devdata(host);
struct hisi_chip_data *chip = spi_get_ctldata(spi);
u32 cr = chip->cr;
/* Update per transfer options for speed and bpw */
transfer->effective_speed_hz =
- hisi_calc_effective_speed(master, chip, transfer->speed_hz);
+ hisi_calc_effective_speed(host, chip, transfer->speed_hz);
cr |= FIELD_PREP(CR_DIV_PRE_MASK, chip->div_pre);
cr |= FIELD_PREP(CR_DIV_POST_MASK, chip->div_post);
cr |= FIELD_PREP(CR_BPW_MASK, transfer->bits_per_word - 1);
@@ -409,10 +409,10 @@ static int hisi_spi_transfer_one(struct spi_controller *master,
return 1;
}
-static void hisi_spi_handle_err(struct spi_controller *master,
+static void hisi_spi_handle_err(struct spi_controller *host,
struct spi_message *msg)
{
- struct hisi_spi *hs = spi_controller_get_devdata(master);
+ struct hisi_spi *hs = spi_controller_get_devdata(host);
hisi_spi_disable(hs);
@@ -452,7 +452,7 @@ static void hisi_spi_cleanup(struct spi_device *spi)
static int hisi_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct spi_controller *master;
+ struct spi_controller *host;
struct hisi_spi *hs;
int ret, irq;
@@ -460,13 +460,13 @@ static int hisi_spi_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- master = devm_spi_alloc_master(dev, sizeof(*hs));
- if (!master)
+ host = devm_spi_alloc_host(dev, sizeof(*hs));
+ if (!host)
return -ENOMEM;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- hs = spi_controller_get_devdata(master);
+ hs = spi_controller_get_devdata(host);
hs->dev = dev;
hs->irq = irq;
@@ -474,9 +474,9 @@ static int hisi_spi_probe(struct platform_device *pdev)
if (IS_ERR(hs->regs))
return PTR_ERR(hs->regs);
- /* Specify maximum SPI clocking speed (master only) by firmware */
+ /* Specify maximum SPI clocking speed (host only) by firmware */
ret = device_property_read_u32(dev, "spi-max-frequency",
- &master->max_speed_hz);
+ &host->max_speed_hz);
if (ret) {
dev_err(dev, "failed to get max SPI clocking speed, ret=%d\n",
ret);
@@ -484,32 +484,32 @@ static int hisi_spi_probe(struct platform_device *pdev)
}
ret = device_property_read_u16(dev, "num-cs",
- &master->num_chipselect);
+ &host->num_chipselect);
if (ret)
- master->num_chipselect = DEFAULT_NUM_CS;
-
- master->use_gpio_descriptors = true;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
- master->bus_num = pdev->id;
- master->setup = hisi_spi_setup;
- master->cleanup = hisi_spi_cleanup;
- master->transfer_one = hisi_spi_transfer_one;
- master->handle_err = hisi_spi_handle_err;
- master->dev.fwnode = dev->fwnode;
+ host->num_chipselect = DEFAULT_NUM_CS;
+
+ host->use_gpio_descriptors = true;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ host->bus_num = pdev->id;
+ host->setup = hisi_spi_setup;
+ host->cleanup = hisi_spi_cleanup;
+ host->transfer_one = hisi_spi_transfer_one;
+ host->handle_err = hisi_spi_handle_err;
+ host->dev.fwnode = dev->fwnode;
hisi_spi_hw_init(hs);
ret = devm_request_irq(dev, hs->irq, hisi_spi_irq, 0, dev_name(dev),
- master);
+ host);
if (ret < 0) {
dev_err(dev, "failed to get IRQ=%d, ret=%d\n", hs->irq, ret);
return ret;
}
- ret = spi_register_controller(master);
+ ret = spi_register_controller(host);
if (ret) {
- dev_err(dev, "failed to register spi master, ret=%d\n", ret);
+ dev_err(dev, "failed to register spi host, ret=%d\n", ret);
return ret;
}
@@ -518,18 +518,18 @@ static int hisi_spi_probe(struct platform_device *pdev)
dev_info(dev, "hw version:0x%x max-freq:%u kHz\n",
readl(hs->regs + HISI_SPI_VERSION),
- master->max_speed_hz / 1000);
+ host->max_speed_hz / 1000);
return 0;
}
static void hisi_spi_remove(struct platform_device *pdev)
{
- struct spi_controller *master = platform_get_drvdata(pdev);
- struct hisi_spi *hs = spi_controller_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct hisi_spi *hs = spi_controller_get_devdata(host);
debugfs_remove_recursive(hs->debugfs);
- spi_unregister_controller(master);
+ spi_unregister_controller(host);
}
static const struct acpi_device_id hisi_spi_acpi_match[] = {
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
index 7cbcb065bb44..9d22018f7985 100644
--- a/drivers/spi/spi-hisi-sfc-v3xx.c
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -152,7 +152,7 @@ static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem,
uintptr_t addr = (uintptr_t)op->data.buf.in;
int max_byte_count;
- host = spi_controller_get_devdata(spi->master);
+ host = spi_controller_get_devdata(spi->controller);
max_byte_count = host->max_cmd_dword * 4;
@@ -174,7 +174,7 @@ static bool hisi_sfc_v3xx_supports_op(struct spi_mem *mem,
struct spi_device *spi = mem->spi;
struct hisi_sfc_v3xx_host *host;
- host = spi_controller_get_devdata(spi->master);
+ host = spi_controller_get_devdata(spi->controller);
if (op->data.buswidth > 4 || op->dummy.buswidth > 4 ||
op->addr.buswidth > 4 || op->cmd.buswidth > 4)
@@ -363,7 +363,7 @@ static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem,
struct spi_device *spi = mem->spi;
u8 chip_select = spi_get_chipselect(spi, 0);
- host = spi_controller_get_devdata(spi->master);
+ host = spi_controller_get_devdata(spi->controller);
return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select);
}
@@ -431,7 +431,7 @@ static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
u32 version, glb_config;
int ret;
- ctlr = spi_alloc_master(&pdev->dev, sizeof(*host));
+ ctlr = spi_alloc_host(&pdev->dev, sizeof(*host));
if (!ctlr)
return -ENOMEM;
@@ -448,13 +448,13 @@ static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
host->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->regbase)) {
ret = PTR_ERR(host->regbase);
- goto err_put_master;
+ goto err_put_host;
}
host->irq = platform_get_irq_optional(pdev, 0);
if (host->irq == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
- goto err_put_master;
+ goto err_put_host;
}
hisi_sfc_v3xx_disable_int(host);
@@ -496,15 +496,15 @@ static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(dev, ctlr);
if (ret)
- goto err_put_master;
+ goto err_put_host;
dev_info(&pdev->dev, "hw version 0x%x, %s mode.\n",
version, host->irq ? "irq" : "polling");
return 0;
-err_put_master:
- spi_master_put(ctlr);
+err_put_host:
+ spi_controller_put(ctlr);
return ret;
}
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index d775f87770e3..d8360f94d3b7 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -86,7 +86,7 @@
struct img_spfi {
struct device *dev;
- struct spi_master *master;
+ struct spi_controller *host;
spinlock_t lock;
void __iomem *regs;
@@ -221,11 +221,11 @@ static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
return count;
}
-static int img_spfi_start_pio(struct spi_master *master,
+static int img_spfi_start_pio(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ struct img_spfi *spfi = spi_controller_get_devdata(spi->controller);
unsigned int tx_bytes = 0, rx_bytes = 0;
const void *tx_buf = xfer->tx_buf;
void *rx_buf = xfer->rx_buf;
@@ -285,7 +285,7 @@ static void img_spfi_dma_rx_cb(void *data)
spin_lock_irqsave(&spfi->lock, flags);
spfi->rx_dma_busy = false;
if (!spfi->tx_dma_busy)
- spi_finalize_current_transfer(spfi->master);
+ spi_finalize_current_transfer(spfi->host);
spin_unlock_irqrestore(&spfi->lock, flags);
}
@@ -299,15 +299,15 @@ static void img_spfi_dma_tx_cb(void *data)
spin_lock_irqsave(&spfi->lock, flags);
spfi->tx_dma_busy = false;
if (!spfi->rx_dma_busy)
- spi_finalize_current_transfer(spfi->master);
+ spi_finalize_current_transfer(spfi->host);
spin_unlock_irqrestore(&spfi->lock, flags);
}
-static int img_spfi_start_dma(struct spi_master *master,
+static int img_spfi_start_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ struct img_spfi *spfi = spi_controller_get_devdata(spi->controller);
struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
struct dma_slave_config rxconf, txconf;
@@ -384,10 +384,10 @@ stop_dma:
return -EIO;
}
-static void img_spfi_handle_err(struct spi_master *master,
+static void img_spfi_handle_err(struct spi_controller *host,
struct spi_message *msg)
{
- struct img_spfi *spfi = spi_master_get_devdata(master);
+ struct img_spfi *spfi = spi_controller_get_devdata(host);
unsigned long flags;
/*
@@ -405,9 +405,9 @@ static void img_spfi_handle_err(struct spi_master *master,
spin_unlock_irqrestore(&spfi->lock, flags);
}
-static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
+static int img_spfi_prepare(struct spi_controller *host, struct spi_message *msg)
{
- struct img_spfi *spfi = spi_master_get_devdata(master);
+ struct img_spfi *spfi = spi_controller_get_devdata(host);
u32 val;
val = spfi_readl(spfi, SPFI_PORT_STATE);
@@ -427,20 +427,20 @@ static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
return 0;
}
-static int img_spfi_unprepare(struct spi_master *master,
+static int img_spfi_unprepare(struct spi_controller *host,
struct spi_message *msg)
{
- struct img_spfi *spfi = spi_master_get_devdata(master);
+ struct img_spfi *spfi = spi_controller_get_devdata(host);
spfi_reset(spfi);
return 0;
}
-static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
+static void img_spfi_config(struct spi_controller *host, struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ struct img_spfi *spfi = spi_controller_get_devdata(spi->controller);
u32 val, div;
/*
@@ -476,11 +476,11 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
spfi_writel(spfi, val, SPFI_CONTROL);
}
-static int img_spfi_transfer_one(struct spi_master *master,
+static int img_spfi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ struct img_spfi *spfi = spi_controller_get_devdata(spi->controller);
int ret;
if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
@@ -490,16 +490,16 @@ static int img_spfi_transfer_one(struct spi_master *master,
return -EINVAL;
}
- img_spfi_config(master, spi, xfer);
- if (master->can_dma && master->can_dma(master, spi, xfer))
- ret = img_spfi_start_dma(master, spi, xfer);
+ img_spfi_config(host, spi, xfer);
+ if (host->can_dma && host->can_dma(host, spi, xfer))
+ ret = img_spfi_start_dma(host, spi, xfer);
else
- ret = img_spfi_start_pio(master, spi, xfer);
+ ret = img_spfi_start_pio(host, spi, xfer);
return ret;
}
-static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
+static bool img_spfi_can_dma(struct spi_controller *host, struct spi_device *spi,
struct spi_transfer *xfer)
{
if (xfer->len > SPFI_32BIT_FIFO_SIZE)
@@ -524,20 +524,20 @@ static irqreturn_t img_spfi_irq(int irq, void *dev_id)
static int img_spfi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct img_spfi *spfi;
struct resource *res;
int ret;
u32 max_speed_hz;
- master = spi_alloc_master(&pdev->dev, sizeof(*spfi));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*spfi));
+ if (!host)
return -ENOMEM;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- spfi = spi_master_get_devdata(master);
+ spfi = spi_controller_get_devdata(host);
spfi->dev = &pdev->dev;
- spfi->master = master;
+ spfi->host = host;
spin_lock_init(&spfi->lock);
spfi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
@@ -582,15 +582,15 @@ static int img_spfi_probe(struct platform_device *pdev)
*/
spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE);
- master->auto_runtime_pm = true;
- master->bus_num = pdev->id;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
+ host->auto_runtime_pm = true;
+ host->bus_num = pdev->id;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
- master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
- master->dev.of_node = pdev->dev.of_node;
- master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
- master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
- master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;
+ host->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
+ host->dev.of_node = pdev->dev.of_node;
+ host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
+ host->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
+ host->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;
/*
* Maximum speed supported by spfi is limited to the lower value
@@ -601,15 +601,15 @@ static int img_spfi_probe(struct platform_device *pdev)
*/
if (!of_property_read_u32(spfi->dev->of_node, "spfi-max-frequency",
&max_speed_hz)) {
- if (master->max_speed_hz > max_speed_hz)
- master->max_speed_hz = max_speed_hz;
+ if (host->max_speed_hz > max_speed_hz)
+ host->max_speed_hz = max_speed_hz;
}
- master->transfer_one = img_spfi_transfer_one;
- master->prepare_message = img_spfi_prepare;
- master->unprepare_message = img_spfi_unprepare;
- master->handle_err = img_spfi_handle_err;
- master->use_gpio_descriptors = true;
+ host->transfer_one = img_spfi_transfer_one;
+ host->prepare_message = img_spfi_prepare;
+ host->unprepare_message = img_spfi_unprepare;
+ host->handle_err = img_spfi_handle_err;
+ host->use_gpio_descriptors = true;
spfi->tx_ch = dma_request_chan(spfi->dev, "tx");
if (IS_ERR(spfi->tx_ch)) {
@@ -636,15 +636,15 @@ static int img_spfi_probe(struct platform_device *pdev)
spfi->rx_ch = NULL;
dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
} else {
- master->dma_tx = spfi->tx_ch;
- master->dma_rx = spfi->rx_ch;
- master->can_dma = img_spfi_can_dma;
+ host->dma_tx = spfi->tx_ch;
+ host->dma_rx = spfi->rx_ch;
+ host->can_dma = img_spfi_can_dma;
}
pm_runtime_set_active(spfi->dev);
pm_runtime_enable(spfi->dev);
- ret = devm_spi_register_master(spfi->dev, master);
+ ret = devm_spi_register_controller(spfi->dev, host);
if (ret)
goto disable_pm;
@@ -660,15 +660,15 @@ disable_pm:
disable_pclk:
clk_disable_unprepare(spfi->sys_clk);
put_spi:
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
static void img_spfi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct img_spfi *spfi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct img_spfi *spfi = spi_controller_get_devdata(host);
if (spfi->tx_ch)
dma_release_channel(spfi->tx_ch);
@@ -685,8 +685,8 @@ static void img_spfi_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int img_spfi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct img_spfi *spfi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct img_spfi *spfi = spi_controller_get_devdata(host);
clk_disable_unprepare(spfi->spfi_clk);
clk_disable_unprepare(spfi->sys_clk);
@@ -696,8 +696,8 @@ static int img_spfi_runtime_suspend(struct device *dev)
static int img_spfi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct img_spfi *spfi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct img_spfi *spfi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(spfi->sys_clk);
@@ -716,15 +716,15 @@ static int img_spfi_runtime_resume(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int img_spfi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
- return spi_master_suspend(master);
+ return spi_controller_suspend(host);
}
static int img_spfi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct img_spfi *spfi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct img_spfi *spfi = spi_controller_get_devdata(host);
int ret;
ret = pm_runtime_resume_and_get(dev);
@@ -733,7 +733,7 @@ static int img_spfi_resume(struct device *dev)
spfi_reset(spfi);
pm_runtime_put(dev);
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 528ae46c087f..a8a74c7cb79f 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -20,7 +20,6 @@
#include <linux/spi/spi.h>
#include <linux/types.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/dma/imx-dma.h>
@@ -53,7 +52,7 @@ MODULE_PARM_DESC(polling_limit_us,
/* The maximum bytes that a sdma BD can transfer. */
#define MAX_SDMA_BD_BYTES (1 << 15)
#define MX51_ECSPI_CTRL_MAX_BURST 512
-/* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
+/* The maximum bytes that IMX53_ECSPI can transfer in target mode.*/
#define MX53_MAX_TRANSFER_BYTES 512
enum spi_imx_devtype {
@@ -78,7 +77,7 @@ struct spi_imx_devtype_data {
void (*setup_wml)(struct spi_imx_data *spi_imx);
void (*disable)(struct spi_imx_data *spi_imx);
bool has_dmamode;
- bool has_slavemode;
+ bool has_targetmode;
unsigned int fifo_size;
bool dynamic_burst;
/*
@@ -114,10 +113,10 @@ struct spi_imx_data {
unsigned int dynamic_burst;
bool rx_only;
- /* Slave mode */
- bool slave_mode;
- bool slave_aborted;
- unsigned int slave_burst;
+ /* Target mode */
+ bool target_mode;
+ bool target_aborted;
+ unsigned int target_burst;
/* DMA */
bool usedma;
@@ -241,7 +240,7 @@ static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device
if (!controller->dma_rx)
return false;
- if (spi_imx->slave_mode)
+ if (spi_imx->target_mode)
return false;
if (transfer->len < spi_imx->devtype_data->fifo_size)
@@ -405,12 +404,12 @@ static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx)
writel(val, spi_imx->base + MXC_CSPITXDATA);
}
-static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
+static void mx53_ecspi_rx_target(struct spi_imx_data *spi_imx)
{
u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA));
if (spi_imx->rx_buf) {
- int n_bytes = spi_imx->slave_burst % sizeof(val);
+ int n_bytes = spi_imx->target_burst % sizeof(val);
if (!n_bytes)
n_bytes = sizeof(val);
@@ -419,13 +418,13 @@ static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx)
((u8 *)&val) + sizeof(val) - n_bytes, n_bytes);
spi_imx->rx_buf += n_bytes;
- spi_imx->slave_burst -= n_bytes;
+ spi_imx->target_burst -= n_bytes;
}
spi_imx->remainder -= sizeof(u32);
}
-static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx)
+static void mx53_ecspi_tx_target(struct spi_imx_data *spi_imx)
{
u32 val = 0;
int n_bytes = spi_imx->count % sizeof(val);
@@ -536,8 +535,8 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
u32 current_cfg = cfg;
int channel = mx51_ecspi_channel(spi);
- /* set Master or Slave mode */
- if (spi_imx->slave_mode)
+ /* set Host or Target mode */
+ if (spi_imx->target_mode)
ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK;
else
ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
@@ -565,11 +564,11 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
/*
- * eCSPI burst completion by Chip Select signal in Slave mode
+ * eCSPI burst completion by Chip Select signal in Target mode
* is not functional for imx53 Soc, config SPI burst completed when
* BURST_LENGTH + 1 bits are received
*/
- if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
+ if (spi_imx->target_mode && is_imx53_ecspi(spi_imx))
cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(channel);
else
cfg |= MX51_ECSPI_CONFIG_SBBCTRL(channel);
@@ -656,12 +655,16 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
/* Clear BL field and set the right value */
ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
- if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
- ctrl |= (spi_imx->slave_burst * 8 - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- else
- ctrl |= (spi_imx->bits_per_word - 1)
+ if (spi_imx->target_mode && is_imx53_ecspi(spi_imx))
+ ctrl |= (spi_imx->target_burst * 8 - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
+ else {
+ if (spi_imx->count >= 512)
+ ctrl |= 0xFFF << MX51_ECSPI_CTRL_BL_OFFSET;
+ else
+ ctrl |= (spi_imx->count*8 - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
+ }
/* set clock speed */
ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
@@ -718,7 +721,7 @@ static void mx51_ecspi_reset(struct spi_imx_data *spi_imx)
#define MX31_INTREG_RREN (1 << 3)
#define MX31_CSPICTRL_ENABLE (1 << 0)
-#define MX31_CSPICTRL_MASTER (1 << 1)
+#define MX31_CSPICTRL_HOST (1 << 1)
#define MX31_CSPICTRL_XCH (1 << 2)
#define MX31_CSPICTRL_SMC (1 << 3)
#define MX31_CSPICTRL_POL (1 << 4)
@@ -775,7 +778,7 @@ static int mx31_prepare_message(struct spi_imx_data *spi_imx,
static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi)
{
- unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
+ unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_HOST;
unsigned int clk;
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
@@ -845,7 +848,7 @@ static void mx31_reset(struct spi_imx_data *spi_imx)
#define MX21_CSPICTRL_SSPOL (1 << 8)
#define MX21_CSPICTRL_XCH (1 << 9)
#define MX21_CSPICTRL_ENABLE (1 << 10)
-#define MX21_CSPICTRL_MASTER (1 << 11)
+#define MX21_CSPICTRL_HOST (1 << 11)
#define MX21_CSPICTRL_DR_SHIFT 14
#define MX21_CSPICTRL_CS_SHIFT 19
@@ -879,7 +882,7 @@ static int mx21_prepare_message(struct spi_imx_data *spi_imx,
static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi)
{
- unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
+ unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_HOST;
unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
unsigned int clk;
@@ -921,7 +924,7 @@ static void mx21_reset(struct spi_imx_data *spi_imx)
#define MX1_CSPICTRL_PHA (1 << 5)
#define MX1_CSPICTRL_XCH (1 << 8)
#define MX1_CSPICTRL_ENABLE (1 << 9)
-#define MX1_CSPICTRL_MASTER (1 << 10)
+#define MX1_CSPICTRL_HOST (1 << 10)
#define MX1_CSPICTRL_DR_SHIFT 13
static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
@@ -954,7 +957,7 @@ static int mx1_prepare_message(struct spi_imx_data *spi_imx,
static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
struct spi_device *spi)
{
- unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
+ unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_HOST;
unsigned int clk;
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) <<
@@ -993,7 +996,7 @@ static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
- .has_slavemode = false,
+ .has_targetmode = false,
.devtype = IMX1_CSPI,
};
@@ -1007,7 +1010,7 @@ static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
- .has_slavemode = false,
+ .has_targetmode = false,
.devtype = IMX21_CSPI,
};
@@ -1022,7 +1025,7 @@ static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
- .has_slavemode = false,
+ .has_targetmode = false,
.devtype = IMX27_CSPI,
};
@@ -1036,7 +1039,7 @@ static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = false,
.dynamic_burst = false,
- .has_slavemode = false,
+ .has_targetmode = false,
.devtype = IMX31_CSPI,
};
@@ -1051,7 +1054,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
.fifo_size = 8,
.has_dmamode = true,
.dynamic_burst = false,
- .has_slavemode = false,
+ .has_targetmode = false,
.devtype = IMX35_CSPI,
};
@@ -1066,7 +1069,7 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.fifo_size = 64,
.has_dmamode = true,
.dynamic_burst = true,
- .has_slavemode = true,
+ .has_targetmode = true,
.disable = mx51_ecspi_disable,
.devtype = IMX51_ECSPI,
};
@@ -1080,7 +1083,7 @@ static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
.reset = mx51_ecspi_reset,
.fifo_size = 64,
.has_dmamode = true,
- .has_slavemode = true,
+ .has_targetmode = true,
.disable = mx51_ecspi_disable,
.devtype = IMX53_ECSPI,
};
@@ -1096,7 +1099,7 @@ static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = {
.fifo_size = 64,
.has_dmamode = true,
.dynamic_burst = true,
- .has_slavemode = true,
+ .has_targetmode = true,
.tx_glitch_fixed = true,
.disable = mx51_ecspi_disable,
.devtype = IMX51_ECSPI,
@@ -1161,7 +1164,7 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
spi_imx->txfifo++;
}
- if (!spi_imx->slave_mode)
+ if (!spi_imx->target_mode)
spi_imx->devtype_data->trigger(spi_imx);
}
@@ -1258,13 +1261,14 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->spi_bus_clk = t->speed_hz;
spi_imx->bits_per_word = t->bits_per_word;
+ spi_imx->count = t->len;
/*
* Initialize the functions for transfer. To transfer non byte-aligned
* words, we have to use multiple word-size bursts, we can't use
* dynamic_burst in that case.
*/
- if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode &&
+ if (spi_imx->devtype_data->dynamic_burst && !spi_imx->target_mode &&
!(spi->mode & SPI_CS_WORD) &&
(spi_imx->bits_per_word == 8 ||
spi_imx->bits_per_word == 16 ||
@@ -1296,10 +1300,10 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->rx_only = ((t->tx_buf == NULL)
|| (t->tx_buf == spi->controller->dummy_tx));
- if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
- spi_imx->rx = mx53_ecspi_rx_slave;
- spi_imx->tx = mx53_ecspi_tx_slave;
- spi_imx->slave_burst = t->len;
+ if (is_imx53_ecspi(spi_imx) && spi_imx->target_mode) {
+ spi_imx->rx = mx53_ecspi_rx_target;
+ spi_imx->tx = mx53_ecspi_tx_target;
+ spi_imx->target_burst = t->len;
}
spi_imx->devtype_data->prepare_transfer(spi_imx, spi);
@@ -1564,8 +1568,8 @@ static int spi_imx_poll_transfer(struct spi_device *spi,
return 0;
}
-static int spi_imx_pio_transfer_slave(struct spi_device *spi,
- struct spi_transfer *transfer)
+static int spi_imx_pio_transfer_target(struct spi_device *spi,
+ struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller);
int ret = 0;
@@ -1584,22 +1588,22 @@ static int spi_imx_pio_transfer_slave(struct spi_device *spi,
spi_imx->remainder = 0;
reinit_completion(&spi_imx->xfer_done);
- spi_imx->slave_aborted = false;
+ spi_imx->target_aborted = false;
spi_imx_push(spi_imx);
spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR);
if (wait_for_completion_interruptible(&spi_imx->xfer_done) ||
- spi_imx->slave_aborted) {
+ spi_imx->target_aborted) {
dev_dbg(&spi->dev, "interrupted\n");
ret = -EINTR;
}
- /* ecspi has a HW issue when works in Slave mode,
+ /* ecspi has a HW issue when works in Target mode,
* after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
* ECSPI_TXDATA keeps shift out the last word data,
- * so we have to disable ECSPI when in slave mode after the
+ * so we have to disable ECSPI when in target mode after the
* transfer completes
*/
if (spi_imx->devtype_data->disable)
@@ -1622,8 +1626,8 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
while (spi_imx->devtype_data->rx_available(spi_imx))
readl(spi_imx->base + MXC_CSPIRXDATA);
- if (spi_imx->slave_mode)
- return spi_imx_pio_transfer_slave(spi, transfer);
+ if (spi_imx->target_mode)
+ return spi_imx_pio_transfer_target(spi, transfer);
/*
* If we decided in spi_imx_can_dma() that we want to do a DMA
@@ -1689,11 +1693,11 @@ spi_imx_unprepare_message(struct spi_controller *controller, struct spi_message
return 0;
}
-static int spi_imx_slave_abort(struct spi_controller *controller)
+static int spi_imx_target_abort(struct spi_controller *controller)
{
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
- spi_imx->slave_aborted = true;
+ spi_imx->target_aborted = true;
complete(&spi_imx->xfer_done);
return 0;
@@ -1708,17 +1712,17 @@ static int spi_imx_probe(struct platform_device *pdev)
int ret, irq, spi_drctl;
const struct spi_imx_devtype_data *devtype_data =
of_device_get_match_data(&pdev->dev);
- bool slave_mode;
+ bool target_mode;
u32 val;
- slave_mode = devtype_data->has_slavemode &&
- of_property_read_bool(np, "spi-slave");
- if (slave_mode)
- controller = spi_alloc_slave(&pdev->dev,
- sizeof(struct spi_imx_data));
- else
- controller = spi_alloc_master(&pdev->dev,
+ target_mode = devtype_data->has_targetmode &&
+ of_property_read_bool(np, "spi-slave");
+ if (target_mode)
+ controller = spi_alloc_target(&pdev->dev,
sizeof(struct spi_imx_data));
+ else
+ controller = spi_alloc_host(&pdev->dev,
+ sizeof(struct spi_imx_data));
if (!controller)
return -ENOMEM;
@@ -1737,7 +1741,7 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx = spi_controller_get_devdata(controller);
spi_imx->controller = controller;
spi_imx->dev = &pdev->dev;
- spi_imx->slave_mode = slave_mode;
+ spi_imx->target_mode = target_mode;
spi_imx->devtype_data = devtype_data;
@@ -1757,7 +1761,7 @@ static int spi_imx_probe(struct platform_device *pdev)
controller->cleanup = spi_imx_cleanup;
controller->prepare_message = spi_imx_prepare_message;
controller->unprepare_message = spi_imx_unprepare_message;
- controller->slave_abort = spi_imx_slave_abort;
+ controller->target_abort = spi_imx_target_abort;
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS |
SPI_MOSI_IDLE_LOW;
@@ -1779,7 +1783,7 @@ static int spi_imx_probe(struct platform_device *pdev)
if (is_imx51_ecspi(spi_imx) || is_imx53_ecspi(spi_imx)) {
controller->max_native_cs = 4;
- controller->flags |= SPI_MASTER_GPIO_SS;
+ controller->flags |= SPI_CONTROLLER_GPIO_SS;
}
spi_imx->spi_drctl = spi_drctl;
diff --git a/drivers/spi/spi-ingenic.c b/drivers/spi/spi-ingenic.c
index 7d4b515a160d..cc366936d72b 100644
--- a/drivers/spi/spi-ingenic.c
+++ b/drivers/spi/spi-ingenic.c
@@ -12,7 +12,7 @@
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
@@ -392,7 +392,7 @@ static int spi_ingenic_probe(struct platform_device *pdev)
return -EINVAL;
}
- ctlr = devm_spi_alloc_master(dev, sizeof(*priv));
+ ctlr = devm_spi_alloc_host(dev, sizeof(*priv));
if (!ctlr) {
dev_err(dev, "Unable to allocate SPI controller.\n");
return -ENOMEM;
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index bc6d22149e7e..98ec4dc22b81 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -143,7 +143,7 @@
* @base: Beginning of MMIO space
* @pregs: Start of protection registers
* @sregs: Start of software sequencer registers
- * @master: Pointer to the SPI controller structure
+ * @host: Pointer to the SPI controller structure
* @nregions: Maximum number of regions
* @pr_num: Maximum number of protected range registers
* @chip0_size: Size of the first flash chip in bytes
@@ -161,7 +161,7 @@ struct intel_spi {
void __iomem *base;
void __iomem *pregs;
void __iomem *sregs;
- struct spi_controller *master;
+ struct spi_controller *host;
size_t nregions;
size_t pr_num;
size_t chip0_size;
@@ -747,7 +747,7 @@ intel_spi_match_mem_op(struct intel_spi *ispi, const struct spi_mem_op *op)
static bool intel_spi_supports_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
- struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
+ struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
const struct intel_spi_mem_op *iop;
iop = intel_spi_match_mem_op(ispi, op);
@@ -778,7 +778,7 @@ static bool intel_spi_supports_mem_op(struct spi_mem *mem,
static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
+ struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
const struct intel_spi_mem_op *iop;
iop = intel_spi_match_mem_op(ispi, op);
@@ -790,7 +790,7 @@ static int intel_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *o
static const char *intel_spi_get_name(struct spi_mem *mem)
{
- const struct intel_spi *ispi = spi_master_get_devdata(mem->spi->master);
+ const struct intel_spi *ispi = spi_controller_get_devdata(mem->spi->controller);
/*
* Return name of the flash controller device to be compatible
@@ -801,7 +801,7 @@ static const char *intel_spi_get_name(struct spi_mem *mem)
static int intel_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
- struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
+ struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
const struct intel_spi_mem_op *iop;
iop = intel_spi_match_mem_op(ispi, &desc->info.op_tmpl);
@@ -815,7 +815,7 @@ static int intel_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
static ssize_t intel_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs,
size_t len, void *buf)
{
- struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
+ struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
const struct intel_spi_mem_op *iop = desc->priv;
struct spi_mem_op op = desc->info.op_tmpl;
int ret;
@@ -832,7 +832,7 @@ static ssize_t intel_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs,
static ssize_t intel_spi_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs,
size_t len, const void *buf)
{
- struct intel_spi *ispi = spi_master_get_devdata(desc->mem->spi->master);
+ struct intel_spi *ispi = spi_controller_get_devdata(desc->mem->spi->controller);
const struct intel_spi_mem_op *iop = desc->priv;
struct spi_mem_op op = desc->info.op_tmpl;
int ret;
@@ -1332,14 +1332,14 @@ static int intel_spi_read_desc(struct intel_spi *ispi)
nc = (buf[1] & FLMAP0_NC_MASK) >> FLMAP0_NC_SHIFT;
if (!nc)
- ispi->master->num_chipselect = 1;
+ ispi->host->num_chipselect = 1;
else if (nc == 1)
- ispi->master->num_chipselect = 2;
+ ispi->host->num_chipselect = 2;
else
return -EINVAL;
dev_dbg(ispi->dev, "%u flash components found\n",
- ispi->master->num_chipselect);
+ ispi->host->num_chipselect);
return 0;
}
@@ -1365,7 +1365,7 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
snprintf(chip.modalias, 8, "spi-nor");
chip.platform_data = pdata;
- if (!spi_new_device(ispi->master, &chip))
+ if (!spi_new_device(ispi->host, &chip))
return -ENODEV;
ret = intel_spi_read_desc(ispi);
@@ -1373,13 +1373,13 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
return ret;
/* Add the second chip if present */
- if (ispi->master->num_chipselect < 2)
+ if (ispi->host->num_chipselect < 2)
return 0;
chip.platform_data = NULL;
chip.chip_select = 1;
- if (!spi_new_device(ispi->master, &chip))
+ if (!spi_new_device(ispi->host, &chip))
return -ENODEV;
return 0;
}
@@ -1396,31 +1396,31 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
int intel_spi_probe(struct device *dev, struct resource *mem,
const struct intel_spi_boardinfo *info)
{
- struct spi_controller *master;
+ struct spi_controller *host;
struct intel_spi *ispi;
int ret;
- master = devm_spi_alloc_master(dev, sizeof(*ispi));
- if (!master)
+ host = devm_spi_alloc_host(dev, sizeof(*ispi));
+ if (!host)
return -ENOMEM;
- master->mem_ops = &intel_spi_mem_ops;
+ host->mem_ops = &intel_spi_mem_ops;
- ispi = spi_master_get_devdata(master);
+ ispi = spi_controller_get_devdata(host);
ispi->base = devm_ioremap_resource(dev, mem);
if (IS_ERR(ispi->base))
return PTR_ERR(ispi->base);
ispi->dev = dev;
- ispi->master = master;
+ ispi->host = host;
ispi->info = info;
ret = intel_spi_init(ispi);
if (ret)
return ret;
- ret = devm_spi_register_master(dev, master);
+ ret = devm_spi_register_controller(dev, host);
if (ret)
return ret;
diff --git a/drivers/spi/spi-iproc-qspi.c b/drivers/spi/spi-iproc-qspi.c
index 5980a0dbbccb..39ee2b43a516 100644
--- a/drivers/spi/spi-iproc-qspi.c
+++ b/drivers/spi/spi-iproc-qspi.c
@@ -94,7 +94,6 @@ static int bcm_iproc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct bcm_iproc_intc *priv;
struct bcm_qspi_soc_intc *soc_intc;
- struct resource *res;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -104,14 +103,12 @@ static int bcm_iproc_probe(struct platform_device *pdev)
spin_lock_init(&priv->soclock);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr_regs");
- priv->int_reg = devm_ioremap_resource(dev, res);
+ priv->int_reg = devm_platform_ioremap_resource_byname(pdev, "intr_regs");
if (IS_ERR(priv->int_reg))
return PTR_ERR(priv->int_reg);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "intr_status_reg");
- priv->int_status_reg = devm_ioremap_resource(dev, res);
+ priv->int_status_reg = devm_platform_ioremap_resource_byname(pdev,
+ "intr_status_reg");
if (IS_ERR(priv->int_status_reg))
return PTR_ERR(priv->int_status_reg);
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
index c42a3358e8c9..e37ca22e04ba 100644
--- a/drivers/spi/spi-jcore.c
+++ b/drivers/spi/spi-jcore.c
@@ -33,7 +33,7 @@
#define JCORE_SPI_WAIT_RDY_MAX_LOOP 2000000
struct jcore_spi {
- struct spi_master *master;
+ struct spi_controller *host;
void __iomem *base;
unsigned int cs_reg;
unsigned int speed_reg;
@@ -59,7 +59,7 @@ static void jcore_spi_program(struct jcore_spi *hw)
void __iomem *ctrl_reg = hw->base + CTRL_REG;
if (jcore_spi_wait(ctrl_reg))
- dev_err(hw->master->dev.parent,
+ dev_err(hw->host->dev.parent,
"timeout waiting to program ctrl reg.\n");
writel(hw->cs_reg | hw->speed_reg, ctrl_reg);
@@ -67,10 +67,10 @@ static void jcore_spi_program(struct jcore_spi *hw)
static void jcore_spi_chipsel(struct spi_device *spi, bool value)
{
- struct jcore_spi *hw = spi_master_get_devdata(spi->master);
+ struct jcore_spi *hw = spi_controller_get_devdata(spi->controller);
u32 csbit = 1U << (2 * spi_get_chipselect(spi, 0));
- dev_dbg(hw->master->dev.parent, "chipselect %d\n", spi_get_chipselect(spi, 0));
+ dev_dbg(hw->host->dev.parent, "chipselect %d\n", spi_get_chipselect(spi, 0));
if (value)
hw->cs_reg |= csbit;
@@ -90,14 +90,14 @@ static void jcore_spi_baudrate(struct jcore_spi *hw, int speed)
else
hw->speed_reg = ((hw->clock_freq / 2 / speed) - 1) << 27;
jcore_spi_program(hw);
- dev_dbg(hw->master->dev.parent, "speed=%d reg=0x%x\n",
+ dev_dbg(hw->host->dev.parent, "speed=%d reg=0x%x\n",
speed, hw->speed_reg);
}
-static int jcore_spi_txrx(struct spi_master *master, struct spi_device *spi,
+static int jcore_spi_txrx(struct spi_controller *host, struct spi_device *spi,
struct spi_transfer *t)
{
- struct jcore_spi *hw = spi_master_get_devdata(master);
+ struct jcore_spi *hw = spi_controller_get_devdata(host);
void __iomem *ctrl_reg = hw->base + CTRL_REG;
void __iomem *data_reg = hw->base + DATA_REG;
@@ -130,7 +130,7 @@ static int jcore_spi_txrx(struct spi_master *master, struct spi_device *spi,
*rx++ = readl(data_reg);
}
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
if (count < len)
return -EREMOTEIO;
@@ -142,26 +142,26 @@ static int jcore_spi_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct jcore_spi *hw;
- struct spi_master *master;
+ struct spi_controller *host;
struct resource *res;
u32 clock_freq;
struct clk *clk;
int err = -ENODEV;
- master = spi_alloc_master(&pdev->dev, sizeof(struct jcore_spi));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(struct jcore_spi));
+ if (!host)
return err;
- /* Setup the master state. */
- master->num_chipselect = 3;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->transfer_one = jcore_spi_txrx;
- master->set_cs = jcore_spi_chipsel;
- master->dev.of_node = node;
- master->bus_num = pdev->id;
+ /* Setup the host state. */
+ host->num_chipselect = 3;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ host->transfer_one = jcore_spi_txrx;
+ host->set_cs = jcore_spi_chipsel;
+ host->dev.of_node = node;
+ host->bus_num = pdev->id;
- hw = spi_master_get_devdata(master);
- hw->master = master;
+ hw = spi_controller_get_devdata(host);
+ hw->host = host;
platform_set_drvdata(pdev, hw);
/* Find and map our resources */
@@ -200,7 +200,7 @@ static int jcore_spi_probe(struct platform_device *pdev)
jcore_spi_baudrate(hw, 400000);
/* Register our spi controller */
- err = devm_spi_register_master(&pdev->dev, master);
+ err = devm_spi_register_controller(&pdev->dev, host);
if (err)
goto exit;
@@ -209,7 +209,7 @@ static int jcore_spi_probe(struct platform_device *pdev)
exit_busy:
err = -EBUSY;
exit:
- spi_master_put(master);
+ spi_controller_put(host);
return err;
}
diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c
index 8d6ecc5d6f70..938e9e577e4f 100644
--- a/drivers/spi/spi-lantiq-ssc.c
+++ b/drivers/spi/spi-lantiq-ssc.c
@@ -6,7 +6,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/delay.h>
@@ -91,7 +92,7 @@
#define LTQ_SPI_STAT_RE BIT(9) /* Receive error flag */
#define LTQ_SPI_STAT_TE BIT(8) /* Transmit error flag */
#define LTQ_SPI_STAT_ME BIT(7) /* Mode error flag */
-#define LTQ_SPI_STAT_MS BIT(1) /* Master/slave select bit */
+#define LTQ_SPI_STAT_MS BIT(1) /* Host/target select bit */
#define LTQ_SPI_STAT_EN BIT(0) /* Enable bit */
#define LTQ_SPI_STAT_ERRORS (LTQ_SPI_STAT_ME | LTQ_SPI_STAT_TE | \
LTQ_SPI_STAT_RE | LTQ_SPI_STAT_AE | \
@@ -109,8 +110,8 @@
#define LTQ_SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */
#define LTQ_SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */
#define LTQ_SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */
-#define LTQ_SPI_WHBSTATE_SETMS BIT(3) /* Set master select bit */
-#define LTQ_SPI_WHBSTATE_CLRMS BIT(2) /* Clear master select bit */
+#define LTQ_SPI_WHBSTATE_SETMS BIT(3) /* Set host select bit */
+#define LTQ_SPI_WHBSTATE_CLRMS BIT(2) /* Clear host select bit */
#define LTQ_SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */
#define LTQ_SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */
#define LTQ_SPI_WHBSTATE_CLR_ERRORS (LTQ_SPI_WHBSTATE_CLRRUE | \
@@ -162,7 +163,7 @@ struct lantiq_ssc_hwcfg {
};
struct lantiq_ssc_spi {
- struct spi_master *master;
+ struct spi_controller *host;
struct device *dev;
void __iomem *regbase;
struct clk *spi_clk;
@@ -366,7 +367,7 @@ static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi)
hw_setup_bits_per_word(spi, spi->bits_per_word);
hw_setup_clock_mode(spi, SPI_MODE_0);
- /* Enable master mode and clear error flags */
+ /* Enable host mode and clear error flags */
lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETMS |
LTQ_SPI_WHBSTATE_CLR_ERRORS,
LTQ_SPI_WHBSTATE);
@@ -386,8 +387,8 @@ static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi)
static int lantiq_ssc_setup(struct spi_device *spidev)
{
- struct spi_master *master = spidev->master;
- struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *host = spidev->controller;
+ struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
unsigned int cs = spi_get_chipselect(spidev, 0);
u32 gpocon;
@@ -415,10 +416,10 @@ static int lantiq_ssc_setup(struct spi_device *spidev)
return 0;
}
-static int lantiq_ssc_prepare_message(struct spi_master *master,
+static int lantiq_ssc_prepare_message(struct spi_controller *host,
struct spi_message *message)
{
- struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+ struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
hw_enter_config_mode(spi);
hw_setup_clock_mode(spi, message->spi->mode);
@@ -460,10 +461,10 @@ static void hw_setup_transfer(struct lantiq_ssc_spi *spi,
lantiq_ssc_writel(spi, con, LTQ_SPI_CON);
}
-static int lantiq_ssc_unprepare_message(struct spi_master *master,
+static int lantiq_ssc_unprepare_message(struct spi_controller *host,
struct spi_message *message)
{
- struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+ struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
flush_workqueue(spi->wq);
@@ -692,8 +693,8 @@ static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
/* set bad status so it can be retried */
- if (spi->master->cur_msg)
- spi->master->cur_msg->status = -EIO;
+ if (spi->host->cur_msg)
+ spi->host->cur_msg->status = -EIO;
queue_work(spi->wq, &spi->work);
spin_unlock(&spi->lock);
@@ -771,22 +772,22 @@ static void lantiq_ssc_bussy_work(struct work_struct *work)
u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
if (!(stat & LTQ_SPI_STAT_BSY)) {
- spi_finalize_current_transfer(spi->master);
+ spi_finalize_current_transfer(spi->host);
return;
}
cond_resched();
} while (!time_after_eq(jiffies, end));
- if (spi->master->cur_msg)
- spi->master->cur_msg->status = -EIO;
- spi_finalize_current_transfer(spi->master);
+ if (spi->host->cur_msg)
+ spi->host->cur_msg->status = -EIO;
+ spi_finalize_current_transfer(spi->host);
}
-static void lantiq_ssc_handle_err(struct spi_master *master,
+static void lantiq_ssc_handle_err(struct spi_controller *host,
struct spi_message *message)
{
- struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+ struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
/* flush FIFOs on timeout */
rx_fifo_flush(spi);
@@ -795,7 +796,7 @@ static void lantiq_ssc_handle_err(struct spi_master *master,
static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable)
{
- struct lantiq_ssc_spi *spi = spi_master_get_devdata(spidev->master);
+ struct lantiq_ssc_spi *spi = spi_controller_get_devdata(spidev->controller);
unsigned int cs = spi_get_chipselect(spidev, 0);
u32 fgpo;
@@ -807,11 +808,11 @@ static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable)
lantiq_ssc_writel(spi, fgpo, LTQ_SPI_FPGO);
}
-static int lantiq_ssc_transfer_one(struct spi_master *master,
+static int lantiq_ssc_transfer_one(struct spi_controller *host,
struct spi_device *spidev,
struct spi_transfer *t)
{
- struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
+ struct lantiq_ssc_spi *spi = spi_controller_get_devdata(host);
hw_setup_transfer(spi, spidev, t);
@@ -903,7 +904,7 @@ MODULE_DEVICE_TABLE(of, lantiq_ssc_match);
static int lantiq_ssc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct spi_master *master;
+ struct spi_controller *host;
struct lantiq_ssc_spi *spi;
const struct lantiq_ssc_hwcfg *hwcfg;
u32 id, supports_dma, revision;
@@ -912,33 +913,33 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
hwcfg = of_device_get_match_data(dev);
- master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
- if (!master)
+ host = spi_alloc_host(dev, sizeof(struct lantiq_ssc_spi));
+ if (!host)
return -ENOMEM;
- spi = spi_master_get_devdata(master);
- spi->master = master;
+ spi = spi_controller_get_devdata(host);
+ spi->host = host;
spi->dev = dev;
spi->hwcfg = hwcfg;
platform_set_drvdata(pdev, spi);
spi->regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi->regbase)) {
err = PTR_ERR(spi->regbase);
- goto err_master_put;
+ goto err_host_put;
}
err = hwcfg->cfg_irq(pdev, spi);
if (err)
- goto err_master_put;
+ goto err_host_put;
spi->spi_clk = devm_clk_get(dev, "gate");
if (IS_ERR(spi->spi_clk)) {
err = PTR_ERR(spi->spi_clk);
- goto err_master_put;
+ goto err_host_put;
}
err = clk_prepare_enable(spi->spi_clk);
if (err)
- goto err_master_put;
+ goto err_host_put;
/*
* Use the old clk_get_fpi() function on Lantiq platform, till it
@@ -964,19 +965,19 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
spi->bits_per_word = 8;
spi->speed_hz = 0;
- master->dev.of_node = pdev->dev.of_node;
- master->num_chipselect = num_cs;
- master->use_gpio_descriptors = true;
- master->setup = lantiq_ssc_setup;
- master->set_cs = lantiq_ssc_set_cs;
- master->handle_err = lantiq_ssc_handle_err;
- master->prepare_message = lantiq_ssc_prepare_message;
- master->unprepare_message = lantiq_ssc_unprepare_message;
- master->transfer_one = lantiq_ssc_transfer_one;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
- SPI_LOOP;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
- SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
+ host->dev.of_node = pdev->dev.of_node;
+ host->num_chipselect = num_cs;
+ host->use_gpio_descriptors = true;
+ host->setup = lantiq_ssc_setup;
+ host->set_cs = lantiq_ssc_set_cs;
+ host->handle_err = lantiq_ssc_handle_err;
+ host->prepare_message = lantiq_ssc_prepare_message;
+ host->unprepare_message = lantiq_ssc_unprepare_message;
+ host->transfer_one = lantiq_ssc_transfer_one;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
+ SPI_LOOP;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
+ SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
spi->wq = alloc_ordered_workqueue(dev_name(dev), WQ_MEM_RECLAIM);
if (!spi->wq) {
@@ -997,9 +998,9 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
"Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
revision, spi->tx_fifo_size, spi->rx_fifo_size, supports_dma);
- err = devm_spi_register_master(dev, master);
+ err = devm_spi_register_controller(dev, host);
if (err) {
- dev_err(dev, "failed to register spi_master\n");
+ dev_err(dev, "failed to register spi host\n");
goto err_wq_destroy;
}
@@ -1011,8 +1012,8 @@ err_clk_put:
clk_put(spi->fpi_clk);
err_clk_disable:
clk_disable_unprepare(spi->spi_clk);
-err_master_put:
- spi_master_put(master);
+err_host_put:
+ spi_controller_put(host);
return err;
}
diff --git a/drivers/spi/spi-loongson-core.c b/drivers/spi/spi-loongson-core.c
new file mode 100644
index 000000000000..f97800b6fd65
--- /dev/null
+++ b/drivers/spi/spi-loongson-core.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Loongson SPI Support
+// Copyright (C) 2023 Loongson Technology Corporation Limited
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "spi-loongson.h"
+
+static inline void loongson_spi_write_reg(struct loongson_spi *spi, unsigned char reg,
+ unsigned char data)
+{
+ writeb(data, spi->base + reg);
+}
+
+static inline char loongson_spi_read_reg(struct loongson_spi *spi, unsigned char reg)
+{
+ return readb(spi->base + reg);
+}
+
+static void loongson_spi_set_cs(struct spi_device *spi, bool en)
+{
+ int cs;
+ unsigned char mask = (BIT(4) | BIT(0)) << spi_get_chipselect(spi, 0);
+ unsigned char val = en ? mask : (BIT(0) << spi_get_chipselect(spi, 0));
+ struct loongson_spi *loongson_spi = spi_controller_get_devdata(spi->controller);
+
+ cs = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SFCS_REG) & ~mask;
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SFCS_REG, val | cs);
+}
+
+static void loongson_spi_set_clk(struct loongson_spi *loongson_spi, unsigned int hz)
+{
+ unsigned char val;
+ unsigned int div, div_tmp;
+ static const char rdiv[12] = {0, 1, 4, 2, 3, 5, 6, 7, 8, 9, 10, 11};
+
+ div = clamp_val(DIV_ROUND_UP_ULL(loongson_spi->clk_rate, hz), 2, 4096);
+ div_tmp = rdiv[fls(div - 1)];
+ loongson_spi->spcr = (div_tmp & GENMASK(1, 0)) >> 0;
+ loongson_spi->sper = (div_tmp & GENMASK(3, 2)) >> 2;
+ val = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SPCR_REG);
+ val &= ~GENMASK(1, 0);
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SPCR_REG, val |
+ loongson_spi->spcr);
+ val = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SPER_REG);
+ val &= ~GENMASK(1, 0);
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SPER_REG, val |
+ loongson_spi->sper);
+ loongson_spi->hz = hz;
+}
+
+static void loongson_spi_set_mode(struct loongson_spi *loongson_spi,
+ struct spi_device *spi)
+{
+ unsigned char val;
+
+ val = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SPCR_REG);
+ val &= ~(LOONGSON_SPI_SPCR_CPOL | LOONGSON_SPI_SPCR_CPHA);
+ if (spi->mode & SPI_CPOL)
+ val |= LOONGSON_SPI_SPCR_CPOL;
+ if (spi->mode & SPI_CPHA)
+ val |= LOONGSON_SPI_SPCR_CPHA;
+
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SPCR_REG, val);
+ loongson_spi->mode |= spi->mode;
+}
+
+static int loongson_spi_update_state(struct loongson_spi *loongson_spi,
+ struct spi_device *spi, struct spi_transfer *t)
+{
+ if (t && loongson_spi->hz != t->speed_hz)
+ loongson_spi_set_clk(loongson_spi, t->speed_hz);
+
+ if ((spi->mode ^ loongson_spi->mode) & SPI_MODE_X_MASK)
+ loongson_spi_set_mode(loongson_spi, spi);
+
+ return 0;
+}
+
+static int loongson_spi_setup(struct spi_device *spi)
+{
+ struct loongson_spi *loongson_spi;
+
+ loongson_spi = spi_controller_get_devdata(spi->controller);
+ if (spi->bits_per_word % 8)
+ return -EINVAL;
+
+ if (spi_get_chipselect(spi, 0) >= spi->controller->num_chipselect)
+ return -EINVAL;
+
+ loongson_spi->hz = 0;
+ loongson_spi_set_cs(spi, true);
+
+ return 0;
+}
+
+static int loongson_spi_write_read_8bit(struct spi_device *spi, const u8 **tx_buf,
+ u8 **rx_buf, unsigned int num)
+{
+ int ret;
+ struct loongson_spi *loongson_spi = spi_controller_get_devdata(spi->controller);
+
+ if (tx_buf && *tx_buf)
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_FIFO_REG, *((*tx_buf)++));
+ else
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_FIFO_REG, 0);
+
+ ret = readb_poll_timeout(loongson_spi->base + LOONGSON_SPI_SPSR_REG,
+ loongson_spi->spsr, (loongson_spi->spsr &
+ LOONGSON_SPI_SPSR_RFEMPTY) != LOONGSON_SPI_SPSR_RFEMPTY,
+ 1, USEC_PER_MSEC);
+
+ if (rx_buf && *rx_buf)
+ *(*rx_buf)++ = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_FIFO_REG);
+ else
+ loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_FIFO_REG);
+
+ return ret;
+}
+
+static int loongson_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ int ret;
+ unsigned int count;
+ const u8 *tx = xfer->tx_buf;
+ u8 *rx = xfer->rx_buf;
+
+ count = xfer->len;
+ do {
+ ret = loongson_spi_write_read_8bit(spi, &tx, &rx, count);
+ if (ret)
+ break;
+ } while (--count);
+
+ return ret;
+}
+
+static int loongson_spi_prepare_message(struct spi_controller *ctlr, struct spi_message *m)
+{
+ struct loongson_spi *loongson_spi = spi_controller_get_devdata(ctlr);
+
+ loongson_spi->para = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_PARA_REG);
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_PARA_REG, loongson_spi->para &
+ ~LOONGSON_SPI_PARA_MEM_EN);
+
+ return 0;
+}
+
+static int loongson_spi_transfer_one(struct spi_controller *ctrl, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct loongson_spi *loongson_spi = spi_controller_get_devdata(spi->controller);
+
+ loongson_spi_update_state(loongson_spi, spi, xfer);
+ if (xfer->len)
+ return loongson_spi_write_read(spi, xfer);
+
+ return 0;
+}
+
+static int loongson_spi_unprepare_message(struct spi_controller *ctrl, struct spi_message *m)
+{
+ struct loongson_spi *loongson_spi = spi_controller_get_devdata(ctrl);
+
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_PARA_REG, loongson_spi->para);
+
+ return 0;
+}
+
+static void loongson_spi_reginit(struct loongson_spi *loongson_spi_dev)
+{
+ unsigned char val;
+
+ val = loongson_spi_read_reg(loongson_spi_dev, LOONGSON_SPI_SPCR_REG);
+ val &= ~LOONGSON_SPI_SPCR_SPE;
+ loongson_spi_write_reg(loongson_spi_dev, LOONGSON_SPI_SPCR_REG, val);
+
+ loongson_spi_write_reg(loongson_spi_dev, LOONGSON_SPI_SPSR_REG,
+ (LOONGSON_SPI_SPSR_SPIF | LOONGSON_SPI_SPSR_WCOL));
+
+ val = loongson_spi_read_reg(loongson_spi_dev, LOONGSON_SPI_SPCR_REG);
+ val |= LOONGSON_SPI_SPCR_SPE;
+ loongson_spi_write_reg(loongson_spi_dev, LOONGSON_SPI_SPCR_REG, val);
+}
+
+int loongson_spi_init_controller(struct device *dev, void __iomem *regs)
+{
+ struct spi_controller *controller;
+ struct loongson_spi *spi;
+ struct clk *clk;
+
+ controller = devm_spi_alloc_host(dev, sizeof(struct loongson_spi));
+ if (controller == NULL)
+ return -ENOMEM;
+
+ controller->mode_bits = SPI_MODE_X_MASK | SPI_CS_HIGH;
+ controller->setup = loongson_spi_setup;
+ controller->prepare_message = loongson_spi_prepare_message;
+ controller->transfer_one = loongson_spi_transfer_one;
+ controller->unprepare_message = loongson_spi_unprepare_message;
+ controller->set_cs = loongson_spi_set_cs;
+ controller->num_chipselect = 4;
+ device_set_node(&controller->dev, dev_fwnode(dev));
+ dev_set_drvdata(dev, controller);
+
+ spi = spi_controller_get_devdata(controller);
+ spi->base = regs;
+ spi->controller = controller;
+
+ clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "unable to get clock\n");
+
+ spi->clk_rate = clk_get_rate(clk);
+ loongson_spi_reginit(spi);
+
+ spi->mode = 0;
+
+ return devm_spi_register_controller(dev, controller);
+}
+EXPORT_SYMBOL_NS_GPL(loongson_spi_init_controller, SPI_LOONGSON_CORE);
+
+static int __maybe_unused loongson_spi_suspend(struct device *dev)
+{
+ struct loongson_spi *loongson_spi;
+ struct spi_controller *controller;
+
+ controller = dev_get_drvdata(dev);
+ spi_controller_suspend(controller);
+
+ loongson_spi = spi_controller_get_devdata(controller);
+
+ loongson_spi->spcr = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SPCR_REG);
+ loongson_spi->sper = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SPER_REG);
+ loongson_spi->spsr = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SPSR_REG);
+ loongson_spi->para = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_PARA_REG);
+ loongson_spi->sfcs = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_SFCS_REG);
+ loongson_spi->timi = loongson_spi_read_reg(loongson_spi, LOONGSON_SPI_TIMI_REG);
+
+ return 0;
+}
+
+static int __maybe_unused loongson_spi_resume(struct device *dev)
+{
+ struct loongson_spi *loongson_spi;
+ struct spi_controller *controller;
+
+ controller = dev_get_drvdata(dev);
+ loongson_spi = spi_controller_get_devdata(controller);
+
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SPCR_REG, loongson_spi->spcr);
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SPER_REG, loongson_spi->sper);
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SPSR_REG, loongson_spi->spsr);
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_PARA_REG, loongson_spi->para);
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_SFCS_REG, loongson_spi->sfcs);
+ loongson_spi_write_reg(loongson_spi, LOONGSON_SPI_TIMI_REG, loongson_spi->timi);
+
+ spi_controller_resume(controller);
+
+ return 0;
+}
+
+const struct dev_pm_ops loongson_spi_dev_pm_ops = {
+ .suspend = loongson_spi_suspend,
+ .resume = loongson_spi_resume,
+};
+EXPORT_SYMBOL_NS_GPL(loongson_spi_dev_pm_ops, SPI_LOONGSON_CORE);
+
+MODULE_DESCRIPTION("Loongson SPI core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-loongson-pci.c b/drivers/spi/spi-loongson-pci.c
new file mode 100644
index 000000000000..134cda0c13a5
--- /dev/null
+++ b/drivers/spi/spi-loongson-pci.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0+
+// PCI interface driver for Loongson SPI Support
+// Copyright (C) 2023 Loongson Technology Corporation Limited
+
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+
+#include "spi-loongson.h"
+
+static int loongson_spi_pci_register(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int ret;
+ void __iomem *reg_base;
+ struct device *dev = &pdev->dev;
+ int pci_bar = 0;
+
+ ret = pcim_enable_device(pdev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "cannot enable pci device\n");
+
+ ret = pcim_iomap_regions(pdev, BIT(pci_bar), pci_name(pdev));
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request and remap memory\n");
+
+ reg_base = pcim_iomap_table(pdev)[pci_bar];
+
+ ret = loongson_spi_init_controller(dev, reg_base);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize controller\n");
+
+ return 0;
+}
+
+static struct pci_device_id loongson_spi_devices[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x7a0b) },
+ { PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x7a1b) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, loongson_spi_devices);
+
+static struct pci_driver loongson_spi_pci_driver = {
+ .name = "loongson-spi-pci",
+ .id_table = loongson_spi_devices,
+ .probe = loongson_spi_pci_register,
+ .driver = {
+ .bus = &pci_bus_type,
+ .pm = &loongson_spi_dev_pm_ops,
+ },
+};
+module_pci_driver(loongson_spi_pci_driver);
+
+MODULE_DESCRIPTION("Loongson spi pci driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(SPI_LOONGSON_CORE);
diff --git a/drivers/spi/spi-loongson-plat.c b/drivers/spi/spi-loongson-plat.c
new file mode 100644
index 000000000000..c066e5f5891e
--- /dev/null
+++ b/drivers/spi/spi-loongson-plat.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Platform driver for Loongson SPI Support
+// Copyright (C) 2023 Loongson Technology Corporation Limited
+
+#include <linux/err.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include "spi-loongson.h"
+
+static int loongson_spi_platform_probe(struct platform_device *pdev)
+{
+ int ret;
+ void __iomem *reg_base;
+ struct device *dev = &pdev->dev;
+
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+
+ ret = loongson_spi_init_controller(dev, reg_base);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize controller\n");
+
+ return 0;
+}
+
+static const struct of_device_id loongson_spi_id_table[] = {
+ { .compatible = "loongson,ls2k1000-spi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, loongson_spi_id_table);
+
+static struct platform_driver loongson_spi_plat_driver = {
+ .probe = loongson_spi_platform_probe,
+ .driver = {
+ .name = "loongson-spi",
+ .bus = &platform_bus_type,
+ .pm = &loongson_spi_dev_pm_ops,
+ .of_match_table = loongson_spi_id_table,
+ },
+};
+module_platform_driver(loongson_spi_plat_driver);
+
+MODULE_DESCRIPTION("Loongson spi platform driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(SPI_LOONGSON_CORE);
diff --git a/drivers/spi/spi-loongson.h b/drivers/spi/spi-loongson.h
new file mode 100644
index 000000000000..35f95b161842
--- /dev/null
+++ b/drivers/spi/spi-loongson.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Header File for Loongson SPI Driver. */
+/* Copyright (C) 2023 Loongson Technology Corporation Limited */
+
+#ifndef __LINUX_SPI_LOONGSON_H
+#define __LINUX_SPI_LOONGSON_H
+
+#include <linux/bits.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+
+#define LOONGSON_SPI_SPCR_REG 0x00
+#define LOONGSON_SPI_SPSR_REG 0x01
+#define LOONGSON_SPI_FIFO_REG 0x02
+#define LOONGSON_SPI_SPER_REG 0x03
+#define LOONGSON_SPI_PARA_REG 0x04
+#define LOONGSON_SPI_SFCS_REG 0x05
+#define LOONGSON_SPI_TIMI_REG 0x06
+
+/* Bits definition for Loongson SPI register */
+#define LOONGSON_SPI_PARA_MEM_EN BIT(0)
+#define LOONGSON_SPI_SPCR_CPHA BIT(2)
+#define LOONGSON_SPI_SPCR_CPOL BIT(3)
+#define LOONGSON_SPI_SPCR_SPE BIT(6)
+#define LOONGSON_SPI_SPSR_RFEMPTY BIT(0)
+#define LOONGSON_SPI_SPSR_WCOL BIT(6)
+#define LOONGSON_SPI_SPSR_SPIF BIT(7)
+
+struct device;
+struct spi_controller;
+
+struct loongson_spi {
+ struct spi_controller *controller;
+ void __iomem *base;
+ int cs_active;
+ unsigned int hz;
+ unsigned char spcr;
+ unsigned char sper;
+ unsigned char spsr;
+ unsigned char para;
+ unsigned char sfcs;
+ unsigned char timi;
+ unsigned int mode;
+ u64 clk_rate;
+};
+
+int loongson_spi_init_controller(struct device *dev, void __iomem *reg);
+extern const struct dev_pm_ops loongson_spi_dev_pm_ops;
+#endif /* __LINUX_SPI_LOONGSON_H */
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 675a73cf1579..bbf2015d8e5c 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -14,8 +14,8 @@
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/list_sort.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/printk.h>
#include <linux/vmalloc.h>
#include <linux/spi/spi.h>
diff --git a/drivers/spi/spi-lp8841-rtc.c b/drivers/spi/spi-lp8841-rtc.c
index 2d436541d6c2..b357461f1b8b 100644
--- a/drivers/spi/spi-lp8841-rtc.c
+++ b/drivers/spi/spi-lp8841-rtc.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#define DRIVER_NAME "spi_lp8841_rtc"
@@ -75,14 +74,14 @@ bitbang_txrx_be_cpha0_lsb(struct spi_lp8841_rtc *data,
for (; likely(bits); bits--) {
/* setup LSB (to slave) on leading edge */
- if ((flags & SPI_MASTER_NO_TX) == 0)
+ if ((flags & SPI_CONTROLLER_NO_TX) == 0)
setmosi(data, (word & 1));
usleep_range(usecs, usecs + 1); /* T(setup) */
/* sample LSB (from slave) on trailing edge */
word >>= 1;
- if ((flags & SPI_MASTER_NO_RX) == 0)
+ if ((flags & SPI_CONTROLLER_NO_RX) == 0)
word |= (getmiso(data) << 31);
setsck(data, !cpol);
@@ -113,7 +112,7 @@ spi_lp8841_rtc_transfer_one(struct spi_master *master,
while (likely(count > 0)) {
word = *tx++;
bitbang_txrx_be_cpha0_lsb(data, 1, 0,
- SPI_MASTER_NO_RX, word, 8);
+ SPI_CONTROLLER_NO_RX, word, 8);
count--;
}
} else if (rx) {
@@ -121,7 +120,7 @@ spi_lp8841_rtc_transfer_one(struct spi_master *master,
writeb(data->state, data->iomem);
while (likely(count > 0)) {
word = bitbang_txrx_be_cpha0_lsb(data, 1, 0,
- SPI_MASTER_NO_TX, word, 8);
+ SPI_CONTROLLER_NO_TX, word, 8);
*rx++ = word;
count--;
}
@@ -191,7 +190,7 @@ spi_lp8841_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, master);
- master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->mode_bits = SPI_CS_HIGH | SPI_3WIRE | SPI_LSB_FIRST;
master->bus_num = pdev->id;
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 141562c882f1..43d134f4b42b 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
@@ -864,7 +863,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
SPI_BPW_MASK(24) |
SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
- master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
+ master->flags = (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX);
master->min_speed_hz = spicc->data->min_speed_hz;
master->max_speed_hz = spicc->data->max_speed_hz;
master->setup = meson_spicc_setup;
diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c
index b59e8a0c5b97..b451cd4860ec 100644
--- a/drivers/spi/spi-microchip-core.c
+++ b/drivers/spi/spi-microchip-core.c
@@ -530,10 +530,8 @@ static int mchp_corespi_probe(struct platform_device *pdev)
return PTR_ERR(spi->regs);
spi->irq = platform_get_irq(pdev, 0);
- if (spi->irq <= 0)
- return dev_err_probe(&pdev->dev, -ENXIO,
- "invalid IRQ %d for SPI controller\n",
- spi->irq);
+ if (spi->irq < 0)
+ return spi->irq;
ret = devm_request_irq(&pdev->dev, spi->irq, mchp_corespi_interrupt,
IRQF_SHARED, dev_name(&pdev->dev), master);
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 99aeef28a477..5cecca1bef02 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -53,7 +53,7 @@ struct mpc512x_psc_spi {
int type;
void __iomem *psc;
struct mpc512x_psc_fifo __iomem *fifo;
- unsigned int irq;
+ int irq;
u8 bits_per_word;
u32 mclk_rate;
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index 9a1a080fb688..795c08594a4d 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -29,7 +29,7 @@ struct mpc52xx_psc_spi {
/* driver internal data */
struct mpc52xx_psc __iomem *psc;
struct mpc52xx_psc_fifo __iomem *fifo;
- unsigned int irq;
+ int irq;
u8 bits_per_word;
struct completion done;
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 39272ad6641b..0757985947dd 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -1142,7 +1142,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
master->mode_bits |= SPI_CS_HIGH;
if (mdata->dev_comp->must_tx)
- master->flags = SPI_MASTER_MUST_TX;
+ master->flags = SPI_CONTROLLER_MUST_TX;
if (mdata->dev_comp->ipm_design)
master->mode_bits |= SPI_LOOP | SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD;
diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c
index 3e9d396b33bd..91600e5c22e4 100644
--- a/drivers/spi/spi-mt7621.c
+++ b/drivers/spi/spi-mt7621.c
@@ -14,7 +14,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index baa7a5353987..cf4ee8b19e42 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -13,7 +13,8 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
index bed8317cd205..4433a8a9299f 100644
--- a/drivers/spi/spi-mtk-snfi.c
+++ b/drivers/spi/spi-mtk-snfi.c
@@ -76,7 +76,8 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/mtd/nand-ecc-mtk.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 963a53dd680b..cd0e7ae07162 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -572,7 +572,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->num_chipselect = 3;
master->dev.of_node = np;
- master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->auto_runtime_pm = true;
spi = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index eb353561509a..0ca21ff0e9cc 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -12,7 +12,7 @@
#include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/regmap.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/spi/spi-mem.h>
#include <linux/mfd/syscon.h>
@@ -287,7 +287,7 @@ static ssize_t npcm_fiu_direct_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
struct npcm_fiu_spi *fiu =
- spi_controller_get_devdata(desc->mem->spi->master);
+ spi_controller_get_devdata(desc->mem->spi->controller);
struct npcm_fiu_chip *chip = &fiu->chip[spi_get_chipselect(desc->mem->spi, 0)];
void __iomem *src = (void __iomem *)(chip->flash_region_mapped_ptr +
offs);
@@ -314,7 +314,7 @@ static ssize_t npcm_fiu_direct_write(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf)
{
struct npcm_fiu_spi *fiu =
- spi_controller_get_devdata(desc->mem->spi->master);
+ spi_controller_get_devdata(desc->mem->spi->controller);
struct npcm_fiu_chip *chip = &fiu->chip[spi_get_chipselect(desc->mem->spi, 0)];
void __iomem *dst = (void __iomem *)(chip->flash_region_mapped_ptr +
offs);
@@ -335,7 +335,7 @@ static int npcm_fiu_uma_read(struct spi_mem *mem,
bool is_address_size, u8 *data, u32 data_size)
{
struct npcm_fiu_spi *fiu =
- spi_controller_get_devdata(mem->spi->master);
+ spi_controller_get_devdata(mem->spi->controller);
u32 uma_cfg = BIT(10);
u32 data_reg[4];
int ret;
@@ -390,7 +390,7 @@ static int npcm_fiu_uma_write(struct spi_mem *mem,
bool is_address_size, u8 *data, u32 data_size)
{
struct npcm_fiu_spi *fiu =
- spi_controller_get_devdata(mem->spi->master);
+ spi_controller_get_devdata(mem->spi->controller);
u32 uma_cfg = BIT(10);
u32 data_reg[4] = {0};
u32 val;
@@ -439,7 +439,7 @@ static int npcm_fiu_manualwrite(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct npcm_fiu_spi *fiu =
- spi_controller_get_devdata(mem->spi->master);
+ spi_controller_get_devdata(mem->spi->controller);
u8 *data = (u8 *)op->data.buf.out;
u32 num_data_chunks;
u32 remain_data;
@@ -544,7 +544,7 @@ static void npcm_fiux_set_direct_rd(struct npcm_fiu_spi *fiu)
static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct npcm_fiu_spi *fiu =
- spi_controller_get_devdata(mem->spi->master);
+ spi_controller_get_devdata(mem->spi->controller);
struct npcm_fiu_chip *chip = &fiu->chip[spi_get_chipselect(mem->spi, 0)];
int ret = 0;
u8 *buf;
@@ -604,7 +604,7 @@ static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
struct npcm_fiu_spi *fiu =
- spi_controller_get_devdata(desc->mem->spi->master);
+ spi_controller_get_devdata(desc->mem->spi->controller);
struct npcm_fiu_chip *chip = &fiu->chip[spi_get_chipselect(desc->mem->spi, 0)];
struct regmap *gcr_regmap;
@@ -665,7 +665,7 @@ static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
static int npcm_fiu_setup(struct spi_device *spi)
{
- struct spi_controller *ctrl = spi->master;
+ struct spi_controller *ctrl = spi->controller;
struct npcm_fiu_spi *fiu = spi_controller_get_devdata(ctrl);
struct npcm_fiu_chip *chip;
@@ -701,7 +701,7 @@ static int npcm_fiu_probe(struct platform_device *pdev)
void __iomem *regbase;
int id, ret;
- ctrl = devm_spi_alloc_master(dev, sizeof(*fiu));
+ ctrl = devm_spi_alloc_host(dev, sizeof(*fiu));
if (!ctrl)
return -ENOMEM;
@@ -755,7 +755,7 @@ static int npcm_fiu_probe(struct platform_device *pdev)
ctrl->num_chipselect = fiu->info->max_cs;
ctrl->dev.of_node = dev->of_node;
- ret = devm_spi_register_master(dev, ctrl);
+ ret = devm_spi_register_controller(dev, ctrl);
if (ret)
clk_disable_unprepare(fiu->clk);
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 544017655787..45a4acc95661 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -47,7 +47,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/regmap.h>
@@ -1157,12 +1156,10 @@ static int nxp_fspi_probe(struct platform_device *pdev)
/* find the resources - configuration register address space */
if (is_acpi_node(dev_fwnode(f->dev)))
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ f->iobase = devm_platform_ioremap_resource(pdev, 0);
else
- res = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, "fspi_base");
+ f->iobase = devm_platform_ioremap_resource_byname(pdev, "fspi_base");
- f->iobase = devm_ioremap_resource(dev, res);
if (IS_ERR(f->iobase)) {
ret = PTR_ERR(f->iobase);
goto err_put_ctrl;
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 902d2e0c1f2f..f89aa9e52c23 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -486,7 +486,7 @@ static int uwire_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
- master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->bus_num = 2; /* "official" */
master->num_chipselect = 4;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 8331e247bf5c..e5cd82eb9e54 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1508,10 +1508,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
}
status = platform_get_irq(pdev, 0);
- if (status < 0) {
- dev_err_probe(&pdev->dev, status, "no irq resource found\n");
+ if (status < 0)
goto free_master;
- }
init_completion(&mcspi->txdone);
status = devm_request_irq(&pdev->dev, status,
omap2_mcspi_irq_handler, 0, pdev->name,
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index ad9e83e34297..1f10f5c8e34d 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -16,7 +16,6 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/sizes.h>
#include <asm/unaligned.h>
@@ -91,7 +90,7 @@ struct orion_child_options {
};
struct orion_spi {
- struct spi_master *master;
+ struct spi_controller *host;
void __iomem *base;
struct clk *clk;
struct clk *axi_clk;
@@ -142,7 +141,7 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
struct orion_spi *orion_spi;
const struct orion_spi_dev *devdata;
- orion_spi = spi_master_get_devdata(spi->master);
+ orion_spi = spi_controller_get_devdata(spi->controller);
devdata = orion_spi->devdata;
tclk_hz = clk_get_rate(orion_spi->clk);
@@ -236,7 +235,7 @@ orion_spi_mode_set(struct spi_device *spi)
u32 reg;
struct orion_spi *orion_spi;
- orion_spi = spi_master_get_devdata(spi->master);
+ orion_spi = spi_controller_get_devdata(spi->controller);
reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
reg &= ~ORION_SPI_MODE_MASK;
@@ -258,7 +257,7 @@ orion_spi_50mhz_ac_timing_erratum(struct spi_device *spi, unsigned int speed)
u32 reg;
struct orion_spi *orion_spi;
- orion_spi = spi_master_get_devdata(spi->master);
+ orion_spi = spi_controller_get_devdata(spi->controller);
/*
* Erratum description: (Erratum NO. FE-9144572) The device
@@ -298,7 +297,7 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
unsigned int bits_per_word = spi->bits_per_word;
int rc;
- orion_spi = spi_master_get_devdata(spi->master);
+ orion_spi = spi_controller_get_devdata(spi->controller);
if ((t != NULL) && t->speed_hz)
speed = t->speed_hz;
@@ -331,7 +330,7 @@ static void orion_spi_set_cs(struct spi_device *spi, bool enable)
void __iomem *ctrl_reg;
u32 val;
- orion_spi = spi_master_get_devdata(spi->master);
+ orion_spi = spi_controller_get_devdata(spi->controller);
ctrl_reg = spi_reg(orion_spi, ORION_SPI_IF_CTRL_REG);
val = readl(ctrl_reg);
@@ -389,7 +388,7 @@ orion_spi_write_read_8bit(struct spi_device *spi,
cs_single_byte = spi->mode & SPI_CS_WORD;
- orion_spi = spi_master_get_devdata(spi->master);
+ orion_spi = spi_controller_get_devdata(spi->controller);
if (cs_single_byte)
orion_spi_set_cs(spi, 0);
@@ -440,7 +439,7 @@ orion_spi_write_read_16bit(struct spi_device *spi,
return -1;
}
- orion_spi = spi_master_get_devdata(spi->master);
+ orion_spi = spi_controller_get_devdata(spi->controller);
tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG);
@@ -476,7 +475,7 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
word_len = spi->bits_per_word;
count = xfer->len;
- orion_spi = spi_master_get_devdata(spi->master);
+ orion_spi = spi_controller_get_devdata(spi->controller);
/*
* Use SPI direct write mode if base address is available
@@ -529,7 +528,7 @@ out:
return xfer->len - count;
}
-static int orion_spi_transfer_one(struct spi_master *master,
+static int orion_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *t)
{
@@ -549,7 +548,7 @@ static int orion_spi_setup(struct spi_device *spi)
{
int ret;
#ifdef CONFIG_PM
- struct orion_spi *orion_spi = spi_master_get_devdata(spi->master);
+ struct orion_spi *orion_spi = spi_controller_get_devdata(spi->controller);
struct device *dev = orion_spi->dev;
orion_spi_runtime_resume(dev);
@@ -645,44 +644,44 @@ MODULE_DEVICE_TABLE(of, orion_spi_of_match_table);
static int orion_spi_probe(struct platform_device *pdev)
{
const struct orion_spi_dev *devdata;
- struct spi_master *master;
+ struct spi_controller *host;
struct orion_spi *spi;
struct resource *r;
unsigned long tclk_hz;
int status = 0;
struct device_node *np;
- master = spi_alloc_master(&pdev->dev, sizeof(*spi));
- if (master == NULL) {
- dev_dbg(&pdev->dev, "master allocation failed\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(*spi));
+ if (host == NULL) {
+ dev_dbg(&pdev->dev, "host allocation failed\n");
return -ENOMEM;
}
if (pdev->id != -1)
- master->bus_num = pdev->id;
+ host->bus_num = pdev->id;
if (pdev->dev.of_node) {
u32 cell_index;
if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
&cell_index))
- master->bus_num = cell_index;
+ host->bus_num = cell_index;
}
/* we support all 4 SPI modes and LSB first option */
- master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST | SPI_CS_WORD;
- master->set_cs = orion_spi_set_cs;
- master->transfer_one = orion_spi_transfer_one;
- master->num_chipselect = ORION_NUM_CHIPSELECTS;
- master->setup = orion_spi_setup;
- master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
- master->auto_runtime_pm = true;
- master->use_gpio_descriptors = true;
- master->flags = SPI_MASTER_GPIO_SS;
-
- platform_set_drvdata(pdev, master);
-
- spi = spi_master_get_devdata(master);
- spi->master = master;
+ host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST | SPI_CS_WORD;
+ host->set_cs = orion_spi_set_cs;
+ host->transfer_one = orion_spi_transfer_one;
+ host->num_chipselect = ORION_NUM_CHIPSELECTS;
+ host->setup = orion_spi_setup;
+ host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ host->auto_runtime_pm = true;
+ host->use_gpio_descriptors = true;
+ host->flags = SPI_CONTROLLER_GPIO_SS;
+
+ platform_set_drvdata(pdev, host);
+
+ spi = spi_controller_get_devdata(host);
+ spi->host = host;
spi->dev = &pdev->dev;
devdata = device_get_match_data(&pdev->dev);
@@ -719,14 +718,14 @@ static int orion_spi_probe(struct platform_device *pdev)
*/
if (of_device_is_compatible(pdev->dev.of_node,
"marvell,armada-370-spi"))
- master->max_speed_hz = min(devdata->max_hz,
+ host->max_speed_hz = min(devdata->max_hz,
DIV_ROUND_UP(tclk_hz, devdata->min_divisor));
else if (devdata->min_divisor)
- master->max_speed_hz =
+ host->max_speed_hz =
DIV_ROUND_UP(tclk_hz, devdata->min_divisor);
else
- master->max_speed_hz = devdata->max_hz;
- master->min_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->max_divisor);
+ host->max_speed_hz = devdata->max_hz;
+ host->min_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->max_divisor);
spi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(spi->base)) {
@@ -785,8 +784,8 @@ static int orion_spi_probe(struct platform_device *pdev)
if (status < 0)
goto out_rel_pm;
- master->dev.of_node = pdev->dev.of_node;
- status = spi_register_master(master);
+ host->dev.of_node = pdev->dev.of_node;
+ status = spi_register_controller(host);
if (status < 0)
goto out_rel_pm;
@@ -799,21 +798,21 @@ out_rel_axi_clk:
out_rel_clk:
clk_disable_unprepare(spi->clk);
out:
- spi_master_put(master);
+ spi_controller_put(host);
return status;
}
static void orion_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct orion_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct orion_spi *spi = spi_controller_get_devdata(host);
pm_runtime_get_sync(&pdev->dev);
clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
- spi_unregister_master(master);
+ spi_unregister_controller(host);
pm_runtime_disable(&pdev->dev);
}
@@ -822,8 +821,8 @@ MODULE_ALIAS("platform:" DRIVER_NAME);
#ifdef CONFIG_PM
static int orion_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct orion_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct orion_spi *spi = spi_controller_get_devdata(host);
clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
@@ -832,8 +831,8 @@ static int orion_spi_runtime_suspend(struct device *dev)
static int orion_spi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct orion_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct orion_spi *spi = spi_controller_get_devdata(host);
if (!IS_ERR(spi->axi_clk))
clk_prepare_enable(spi->axi_clk);
diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index 4445d82409d6..3638e974f5d4 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -65,7 +65,7 @@ struct pci1xxxx_spi_internal {
bool spi_xfer_in_progress;
int irq;
struct completion spi_xfer_done;
- struct spi_master *spi_host;
+ struct spi_controller *spi_host;
struct pci1xxxx_spi *parent;
struct {
unsigned int dev_sel : 3;
@@ -250,7 +250,7 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
struct pci1xxxx_spi_internal *spi_sub_ptr;
struct device *dev = &pdev->dev;
struct pci1xxxx_spi *spi_bus;
- struct spi_master *spi_host;
+ struct spi_controller *spi_host;
u32 regval;
int ret;
@@ -276,7 +276,7 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
sizeof(struct pci1xxxx_spi_internal),
GFP_KERNEL);
spi_sub_ptr = spi_bus->spi_int[iter];
- spi_sub_ptr->spi_host = devm_spi_alloc_master(dev, sizeof(struct spi_master));
+ spi_sub_ptr->spi_host = devm_spi_alloc_host(dev, sizeof(struct spi_controller));
if (!spi_sub_ptr->spi_host)
return -ENOMEM;
@@ -365,9 +365,9 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
spi_host->bits_per_word_mask = SPI_BPW_MASK(8);
spi_host->max_speed_hz = PCI1XXXX_SPI_MAX_CLOCK_HZ;
spi_host->min_speed_hz = PCI1XXXX_SPI_MIN_CLOCK_HZ;
- spi_host->flags = SPI_MASTER_MUST_TX;
- spi_master_set_devdata(spi_host, spi_sub_ptr);
- ret = devm_spi_register_master(dev, spi_host);
+ spi_host->flags = SPI_CONTROLLER_MUST_TX;
+ spi_controller_set_devdata(spi_host, spi_sub_ptr);
+ ret = devm_spi_register_controller(dev, spi_host);
if (ret)
goto error;
}
@@ -415,7 +415,7 @@ static int pci1xxxx_spi_resume(struct device *dev)
for (iter = 0; iter < spi_ptr->total_hw_instances; iter++) {
spi_sub_ptr = spi_ptr->spi_int[iter];
- spi_master_resume(spi_sub_ptr->spi_host);
+ spi_controller_resume(spi_sub_ptr->spi_host);
writel(regval, spi_ptr->reg_base +
SPI_MST_EVENT_MASK_REG_OFFSET(iter));
@@ -441,7 +441,7 @@ static int pci1xxxx_spi_suspend(struct device *dev)
/* Store existing config before suspend */
store_restore_config(spi_ptr, spi_sub_ptr, iter, 1);
- spi_master_suspend(spi_sub_ptr->spi_host);
+ spi_controller_suspend(spi_sub_ptr->spi_host);
writel(reg1, spi_ptr->reg_base +
SPI_MST_EVENT_MASK_REG_OFFSET(iter));
}
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
index 51dfb49523f3..883354d0ff52 100644
--- a/drivers/spi/spi-pic32-sqi.c
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -139,7 +139,7 @@ struct pic32_sqi {
void __iomem *regs;
struct clk *sys_clk;
struct clk *base_clk; /* drives spi clock */
- struct spi_master *master;
+ struct spi_controller *host;
int irq;
struct completion xfer_done;
struct ring_desc *ring;
@@ -316,9 +316,9 @@ static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
return 0;
}
-static int pic32_sqi_prepare_hardware(struct spi_master *master)
+static int pic32_sqi_prepare_hardware(struct spi_controller *host)
{
- struct pic32_sqi *sqi = spi_master_get_devdata(master);
+ struct pic32_sqi *sqi = spi_controller_get_devdata(host);
/* enable spi interface */
pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
@@ -328,7 +328,7 @@ static int pic32_sqi_prepare_hardware(struct spi_master *master)
return 0;
}
-static bool pic32_sqi_can_dma(struct spi_master *master,
+static bool pic32_sqi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *x)
{
@@ -336,7 +336,7 @@ static bool pic32_sqi_can_dma(struct spi_master *master,
return true;
}
-static int pic32_sqi_one_message(struct spi_master *master,
+static int pic32_sqi_one_message(struct spi_controller *host,
struct spi_message *msg)
{
struct spi_device *spi = msg->spi;
@@ -347,7 +347,7 @@ static int pic32_sqi_one_message(struct spi_master *master,
unsigned long timeout;
u32 val;
- sqi = spi_master_get_devdata(master);
+ sqi = spi_controller_get_devdata(host);
reinit_completion(&sqi->xfer_done);
msg->actual_length = 0;
@@ -412,7 +412,7 @@ static int pic32_sqi_one_message(struct spi_master *master,
/* wait for xfer completion */
timeout = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
if (timeout == 0) {
- dev_err(&sqi->master->dev, "wait timedout/interrupted\n");
+ dev_err(&sqi->host->dev, "wait timedout/interrupted\n");
ret = -ETIMEDOUT;
msg->status = ret;
} else {
@@ -434,14 +434,14 @@ xfer_out:
/* release ring descr */
ring_desc_put(sqi, rdesc);
}
- spi_finalize_current_message(spi->master);
+ spi_finalize_current_message(spi->controller);
return ret;
}
-static int pic32_sqi_unprepare_hardware(struct spi_master *master)
+static int pic32_sqi_unprepare_hardware(struct spi_controller *host)
{
- struct pic32_sqi *sqi = spi_master_get_devdata(master);
+ struct pic32_sqi *sqi = spi_controller_get_devdata(host);
/* disable clk */
pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
@@ -458,18 +458,18 @@ static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
int i;
/* allocate coherent DMAable memory for hardware buffer descriptors. */
- sqi->bd = dma_alloc_coherent(&sqi->master->dev,
+ sqi->bd = dma_alloc_coherent(&sqi->host->dev,
sizeof(*bd) * PESQI_BD_COUNT,
&sqi->bd_dma, GFP_KERNEL);
if (!sqi->bd) {
- dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
+ dev_err(&sqi->host->dev, "failed allocating dma buffer\n");
return -ENOMEM;
}
/* allocate software ring descriptors */
sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
if (!sqi->ring) {
- dma_free_coherent(&sqi->master->dev,
+ dma_free_coherent(&sqi->host->dev,
sizeof(*bd) * PESQI_BD_COUNT,
sqi->bd, sqi->bd_dma);
return -ENOMEM;
@@ -498,7 +498,7 @@ static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
static void ring_desc_ring_free(struct pic32_sqi *sqi)
{
- dma_free_coherent(&sqi->master->dev,
+ dma_free_coherent(&sqi->host->dev,
sizeof(struct buf_desc) * PESQI_BD_COUNT,
sqi->bd, sqi->bd_dma);
kfree(sqi->ring);
@@ -568,28 +568,28 @@ static void pic32_sqi_hw_init(struct pic32_sqi *sqi)
static int pic32_sqi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct pic32_sqi *sqi;
int ret;
- master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*sqi));
+ if (!host)
return -ENOMEM;
- sqi = spi_master_get_devdata(master);
- sqi->master = master;
+ sqi = spi_controller_get_devdata(host);
+ sqi->host = host;
sqi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sqi->regs)) {
ret = PTR_ERR(sqi->regs);
- goto err_free_master;
+ goto err_free_host;
}
/* irq */
sqi->irq = platform_get_irq(pdev, 0);
if (sqi->irq < 0) {
ret = sqi->irq;
- goto err_free_master;
+ goto err_free_host;
}
/* clocks */
@@ -597,27 +597,27 @@ static int pic32_sqi_probe(struct platform_device *pdev)
if (IS_ERR(sqi->sys_clk)) {
ret = PTR_ERR(sqi->sys_clk);
dev_err(&pdev->dev, "no sys_clk ?\n");
- goto err_free_master;
+ goto err_free_host;
}
sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck");
if (IS_ERR(sqi->base_clk)) {
ret = PTR_ERR(sqi->base_clk);
dev_err(&pdev->dev, "no base clk ?\n");
- goto err_free_master;
+ goto err_free_host;
}
ret = clk_prepare_enable(sqi->sys_clk);
if (ret) {
dev_err(&pdev->dev, "sys clk enable failed\n");
- goto err_free_master;
+ goto err_free_host;
}
ret = clk_prepare_enable(sqi->base_clk);
if (ret) {
dev_err(&pdev->dev, "base clk enable failed\n");
clk_disable_unprepare(sqi->sys_clk);
- goto err_free_master;
+ goto err_free_host;
}
init_completion(&sqi->xfer_done);
@@ -640,24 +640,24 @@ static int pic32_sqi_probe(struct platform_device *pdev)
goto err_free_ring;
}
- /* register master */
- master->num_chipselect = 2;
- master->max_speed_hz = clk_get_rate(sqi->base_clk);
- master->dma_alignment = 32;
- master->max_dma_len = PESQI_BD_BUF_LEN_MAX;
- master->dev.of_node = pdev->dev.of_node;
- master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
+ /* register host */
+ host->num_chipselect = 2;
+ host->max_speed_hz = clk_get_rate(sqi->base_clk);
+ host->dma_alignment = 32;
+ host->max_dma_len = PESQI_BD_BUF_LEN_MAX;
+ host->dev.of_node = pdev->dev.of_node;
+ host->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
- master->flags = SPI_MASTER_HALF_DUPLEX;
- master->can_dma = pic32_sqi_can_dma;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
- master->transfer_one_message = pic32_sqi_one_message;
- master->prepare_transfer_hardware = pic32_sqi_prepare_hardware;
- master->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware;
-
- ret = devm_spi_register_master(&pdev->dev, master);
+ host->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ host->can_dma = pic32_sqi_can_dma;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ host->transfer_one_message = pic32_sqi_one_message;
+ host->prepare_transfer_hardware = pic32_sqi_prepare_hardware;
+ host->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware;
+
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret) {
- dev_err(&master->dev, "failed registering spi master\n");
+ dev_err(&host->dev, "failed registering spi host\n");
free_irq(sqi->irq, sqi);
goto err_free_ring;
}
@@ -673,8 +673,8 @@ err_disable_clk:
clk_disable_unprepare(sqi->base_clk);
clk_disable_unprepare(sqi->sys_clk);
-err_free_master:
- spi_master_put(master);
+err_free_host:
+ spi_controller_put(host);
return ret;
}
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index f2af5e653f3d..52b788dac10a 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -100,7 +100,7 @@ struct pic32_spi {
int tx_irq;
u32 fifo_n_byte; /* FIFO depth in bytes */
struct clk *clk;
- struct spi_master *master;
+ struct spi_controller *host;
/* Current controller setting */
u32 speed_hz; /* spi-clk rate */
u32 mode;
@@ -224,9 +224,9 @@ static void pic32_err_stop(struct pic32_spi *pic32s, const char *msg)
disable_irq_nosync(pic32s->tx_irq);
/* Show err message and abort xfer with err */
- dev_err(&pic32s->master->dev, "%s\n", msg);
- if (pic32s->master->cur_msg)
- pic32s->master->cur_msg->status = -EIO;
+ dev_err(&pic32s->host->dev, "%s\n", msg);
+ if (pic32s->host->cur_msg)
+ pic32s->host->cur_msg->status = -EIO;
complete(&pic32s->xfer_done);
}
@@ -250,7 +250,7 @@ static irqreturn_t pic32_spi_fault_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
- if (!pic32s->master->cur_msg) {
+ if (!pic32s->host->cur_msg) {
pic32_err_stop(pic32s, "err_irq: no mesg");
return IRQ_NONE;
}
@@ -300,16 +300,16 @@ static void pic32_spi_dma_rx_notify(void *data)
static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
struct spi_transfer *xfer)
{
- struct spi_master *master = pic32s->master;
+ struct spi_controller *host = pic32s->host;
struct dma_async_tx_descriptor *desc_rx;
struct dma_async_tx_descriptor *desc_tx;
dma_cookie_t cookie;
int ret;
- if (!master->dma_rx || !master->dma_tx)
+ if (!host->dma_rx || !host->dma_tx)
return -ENODEV;
- desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
+ desc_rx = dmaengine_prep_slave_sg(host->dma_rx,
xfer->rx_sg.sgl,
xfer->rx_sg.nents,
DMA_DEV_TO_MEM,
@@ -319,7 +319,7 @@ static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
goto err_dma;
}
- desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
+ desc_tx = dmaengine_prep_slave_sg(host->dma_tx,
xfer->tx_sg.sgl,
xfer->tx_sg.nents,
DMA_MEM_TO_DEV,
@@ -343,13 +343,13 @@ static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
if (ret)
goto err_dma_tx;
- dma_async_issue_pending(master->dma_rx);
- dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(host->dma_rx);
+ dma_async_issue_pending(host->dma_tx);
return 0;
err_dma_tx:
- dmaengine_terminate_all(master->dma_rx);
+ dmaengine_terminate_all(host->dma_rx);
err_dma:
return ret;
}
@@ -357,7 +357,7 @@ err_dma:
static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
{
int buf_offset = offsetof(struct pic32_spi_regs, buf);
- struct spi_master *master = pic32s->master;
+ struct spi_controller *host = pic32s->host;
struct dma_slave_config cfg;
int ret;
@@ -371,16 +371,16 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
cfg.dst_addr_width = dma_width;
/* tx channel */
cfg.direction = DMA_MEM_TO_DEV;
- ret = dmaengine_slave_config(master->dma_tx, &cfg);
+ ret = dmaengine_slave_config(host->dma_tx, &cfg);
if (ret) {
- dev_err(&master->dev, "tx channel setup failed\n");
+ dev_err(&host->dev, "tx channel setup failed\n");
return ret;
}
/* rx channel */
cfg.direction = DMA_DEV_TO_MEM;
- ret = dmaengine_slave_config(master->dma_rx, &cfg);
+ ret = dmaengine_slave_config(host->dma_rx, &cfg);
if (ret)
- dev_err(&master->dev, "rx channel setup failed\n");
+ dev_err(&host->dev, "rx channel setup failed\n");
return ret;
}
@@ -430,19 +430,19 @@ static int pic32_spi_set_word_size(struct pic32_spi *pic32s, u8 bits_per_word)
return 0;
}
-static int pic32_spi_prepare_hardware(struct spi_master *master)
+static int pic32_spi_prepare_hardware(struct spi_controller *host)
{
- struct pic32_spi *pic32s = spi_master_get_devdata(master);
+ struct pic32_spi *pic32s = spi_controller_get_devdata(host);
pic32_spi_enable(pic32s);
return 0;
}
-static int pic32_spi_prepare_message(struct spi_master *master,
+static int pic32_spi_prepare_message(struct spi_controller *host,
struct spi_message *msg)
{
- struct pic32_spi *pic32s = spi_master_get_devdata(master);
+ struct pic32_spi *pic32s = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
u32 val;
@@ -481,18 +481,18 @@ static int pic32_spi_prepare_message(struct spi_master *master,
return 0;
}
-static bool pic32_spi_can_dma(struct spi_master *master,
+static bool pic32_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct pic32_spi *pic32s = spi_master_get_devdata(master);
+ struct pic32_spi *pic32s = spi_controller_get_devdata(host);
/* skip using DMA on small size transfer to avoid overhead.*/
return (xfer->len >= PIC32_DMA_LEN_MIN) &&
test_bit(PIC32F_DMA_PREP, &pic32s->flags);
}
-static int pic32_spi_one_transfer(struct spi_master *master,
+static int pic32_spi_one_transfer(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *transfer)
{
@@ -501,7 +501,7 @@ static int pic32_spi_one_transfer(struct spi_master *master,
unsigned long timeout;
int ret;
- pic32s = spi_master_get_devdata(master);
+ pic32s = spi_controller_get_devdata(host);
/* handle transfer specific word size change */
if (transfer->bits_per_word &&
@@ -549,8 +549,8 @@ static int pic32_spi_one_transfer(struct spi_master *master,
if (timeout == 0) {
dev_err(&spi->dev, "wait error/timedout\n");
if (dma_issued) {
- dmaengine_terminate_all(master->dma_rx);
- dmaengine_terminate_all(master->dma_tx);
+ dmaengine_terminate_all(host->dma_rx);
+ dmaengine_terminate_all(host->dma_tx);
}
ret = -ETIMEDOUT;
} else {
@@ -560,16 +560,16 @@ static int pic32_spi_one_transfer(struct spi_master *master,
return ret;
}
-static int pic32_spi_unprepare_message(struct spi_master *master,
+static int pic32_spi_unprepare_message(struct spi_controller *host,
struct spi_message *msg)
{
/* nothing to do */
return 0;
}
-static int pic32_spi_unprepare_hardware(struct spi_master *master)
+static int pic32_spi_unprepare_hardware(struct spi_controller *host)
{
- struct pic32_spi *pic32s = spi_master_get_devdata(master);
+ struct pic32_spi *pic32s = spi_controller_get_devdata(host);
pic32_spi_disable(pic32s);
@@ -605,28 +605,28 @@ static void pic32_spi_cleanup(struct spi_device *spi)
static int pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
{
- struct spi_master *master = pic32s->master;
+ struct spi_controller *host = pic32s->host;
int ret = 0;
- master->dma_rx = dma_request_chan(dev, "spi-rx");
- if (IS_ERR(master->dma_rx)) {
- if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
+ host->dma_rx = dma_request_chan(dev, "spi-rx");
+ if (IS_ERR(host->dma_rx)) {
+ if (PTR_ERR(host->dma_rx) == -EPROBE_DEFER)
ret = -EPROBE_DEFER;
else
dev_warn(dev, "RX channel not found.\n");
- master->dma_rx = NULL;
+ host->dma_rx = NULL;
goto out_err;
}
- master->dma_tx = dma_request_chan(dev, "spi-tx");
- if (IS_ERR(master->dma_tx)) {
- if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
+ host->dma_tx = dma_request_chan(dev, "spi-tx");
+ if (IS_ERR(host->dma_tx)) {
+ if (PTR_ERR(host->dma_tx) == -EPROBE_DEFER)
ret = -EPROBE_DEFER;
else
dev_warn(dev, "TX channel not found.\n");
- master->dma_tx = NULL;
+ host->dma_tx = NULL;
goto out_err;
}
@@ -639,14 +639,14 @@ static int pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
return 0;
out_err:
- if (master->dma_rx) {
- dma_release_channel(master->dma_rx);
- master->dma_rx = NULL;
+ if (host->dma_rx) {
+ dma_release_channel(host->dma_rx);
+ host->dma_rx = NULL;
}
- if (master->dma_tx) {
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ if (host->dma_tx) {
+ dma_release_channel(host->dma_tx);
+ host->dma_tx = NULL;
}
return ret;
@@ -658,11 +658,11 @@ static void pic32_spi_dma_unprep(struct pic32_spi *pic32s)
return;
clear_bit(PIC32F_DMA_PREP, &pic32s->flags);
- if (pic32s->master->dma_rx)
- dma_release_channel(pic32s->master->dma_rx);
+ if (pic32s->host->dma_rx)
+ dma_release_channel(pic32s->host->dma_rx);
- if (pic32s->master->dma_tx)
- dma_release_channel(pic32s->master->dma_tx);
+ if (pic32s->host->dma_tx)
+ dma_release_channel(pic32s->host->dma_tx);
}
static void pic32_spi_hw_init(struct pic32_spi *pic32s)
@@ -680,7 +680,7 @@ static void pic32_spi_hw_init(struct pic32_spi *pic32s)
/* disable framing mode */
ctrl &= ~CTRL_FRMEN;
- /* enable master mode while disabled */
+ /* enable host mode while disabled */
ctrl |= CTRL_MSTEN;
/* set tx fifo threshold interrupt */
@@ -752,36 +752,36 @@ err_unmap_mem:
static int pic32_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct pic32_spi *pic32s;
int ret;
- master = spi_alloc_master(&pdev->dev, sizeof(*pic32s));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*pic32s));
+ if (!host)
return -ENOMEM;
- pic32s = spi_master_get_devdata(master);
- pic32s->master = master;
+ pic32s = spi_controller_get_devdata(host);
+ pic32s->host = host;
ret = pic32_spi_hw_probe(pdev, pic32s);
if (ret)
- goto err_master;
-
- master->dev.of_node = pdev->dev.of_node;
- master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_CS_HIGH;
- master->num_chipselect = 1; /* single chip-select */
- master->max_speed_hz = clk_get_rate(pic32s->clk);
- master->setup = pic32_spi_setup;
- master->cleanup = pic32_spi_cleanup;
- master->flags = SPI_MASTER_MUST_TX | SPI_MASTER_MUST_RX;
- master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ goto err_host;
+
+ host->dev.of_node = pdev->dev.of_node;
+ host->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_CS_HIGH;
+ host->num_chipselect = 1; /* single chip-select */
+ host->max_speed_hz = clk_get_rate(pic32s->clk);
+ host->setup = pic32_spi_setup;
+ host->cleanup = pic32_spi_cleanup;
+ host->flags = SPI_CONTROLLER_MUST_TX | SPI_CONTROLLER_MUST_RX;
+ host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(32);
- master->transfer_one = pic32_spi_one_transfer;
- master->prepare_message = pic32_spi_prepare_message;
- master->unprepare_message = pic32_spi_unprepare_message;
- master->prepare_transfer_hardware = pic32_spi_prepare_hardware;
- master->unprepare_transfer_hardware = pic32_spi_unprepare_hardware;
- master->use_gpio_descriptors = true;
+ host->transfer_one = pic32_spi_one_transfer;
+ host->prepare_message = pic32_spi_prepare_message;
+ host->unprepare_message = pic32_spi_unprepare_message;
+ host->prepare_transfer_hardware = pic32_spi_prepare_hardware;
+ host->unprepare_transfer_hardware = pic32_spi_unprepare_hardware;
+ host->use_gpio_descriptors = true;
/* optional DMA support */
ret = pic32_spi_dma_prep(pic32s, &pdev->dev);
@@ -789,7 +789,7 @@ static int pic32_spi_probe(struct platform_device *pdev)
goto err_bailout;
if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
- master->can_dma = pic32_spi_can_dma;
+ host->can_dma = pic32_spi_can_dma;
init_completion(&pic32s->xfer_done);
pic32s->mode = -1;
@@ -824,10 +824,10 @@ static int pic32_spi_probe(struct platform_device *pdev)
goto err_bailout;
}
- /* register master */
- ret = devm_spi_register_master(&pdev->dev, master);
+ /* register host */
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret) {
- dev_err(&master->dev, "failed registering spi master\n");
+ dev_err(&host->dev, "failed registering spi host\n");
goto err_bailout;
}
@@ -838,8 +838,8 @@ static int pic32_spi_probe(struct platform_device *pdev)
err_bailout:
pic32_spi_dma_unprep(pic32s);
clk_disable_unprepare(pic32s->clk);
-err_master:
- spi_master_put(master);
+err_host:
+ spi_controller_put(host);
return ret;
}
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 1af75eff26b6..bb347b6bb6f3 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -336,8 +336,8 @@ struct vendor_data {
* @phybase: the physical memory where the SSP device resides
* @virtbase: the virtual memory where the SSP is mapped
* @clk: outgoing clock "SPICLK" for the SPI bus
- * @master: SPI framework hookup
- * @master_info: controller-specific data from machine setup
+ * @host: SPI framework hookup
+ * @host_info: controller-specific data from machine setup
* @pump_transfers: Tasklet used in Interrupt Transfer mode
* @cur_msg: Pointer to current spi_message being processed
* @cur_transfer: Pointer to current spi_transfer
@@ -370,8 +370,8 @@ struct pl022 {
resource_size_t phybase;
void __iomem *virtbase;
struct clk *clk;
- struct spi_master *master;
- struct pl022_ssp_controller *master_info;
+ struct spi_controller *host;
+ struct pl022_ssp_controller *host_info;
/* Message per-transfer pump */
struct tasklet_struct pump_transfers;
struct spi_message *cur_msg;
@@ -500,7 +500,7 @@ static void giveback(struct pl022 *pl022)
* could invalidate the cs_control() callback...
*/
/* get a pointer to the next message, if any */
- next_msg = spi_get_next_queued_message(pl022->master);
+ next_msg = spi_get_next_queued_message(pl022->host);
/*
* see if the next and current messages point
@@ -523,7 +523,7 @@ static void giveback(struct pl022 *pl022)
writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
- spi_finalize_current_message(pl022->master);
+ spi_finalize_current_message(pl022->host);
}
/**
@@ -1110,16 +1110,16 @@ static int pl022_dma_probe(struct pl022 *pl022)
* of them.
*/
pl022->dma_rx_channel = dma_request_channel(mask,
- pl022->master_info->dma_filter,
- pl022->master_info->dma_rx_param);
+ pl022->host_info->dma_filter,
+ pl022->host_info->dma_rx_param);
if (!pl022->dma_rx_channel) {
dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n");
goto err_no_rxchan;
}
pl022->dma_tx_channel = dma_request_channel(mask,
- pl022->master_info->dma_filter,
- pl022->master_info->dma_tx_param);
+ pl022->host_info->dma_filter,
+ pl022->host_info->dma_tx_param);
if (!pl022->dma_tx_channel) {
dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n");
goto err_no_txchan;
@@ -1573,10 +1573,10 @@ out:
return;
}
-static int pl022_transfer_one_message(struct spi_master *master,
+static int pl022_transfer_one_message(struct spi_controller *host,
struct spi_message *msg)
{
- struct pl022 *pl022 = spi_master_get_devdata(master);
+ struct pl022 *pl022 = spi_controller_get_devdata(host);
/* Initial message state */
pl022->cur_msg = msg;
@@ -1602,9 +1602,9 @@ static int pl022_transfer_one_message(struct spi_master *master,
return 0;
}
-static int pl022_unprepare_transfer_hardware(struct spi_master *master)
+static int pl022_unprepare_transfer_hardware(struct spi_controller *host)
{
- struct pl022 *pl022 = spi_master_get_devdata(master);
+ struct pl022 *pl022 = spi_controller_get_devdata(host);
/* nothing more to do - disable spi/ssp and power off */
writew((readw(SSP_CR1(pl022->virtbase)) &
@@ -1826,10 +1826,10 @@ static const struct pl022_config_chip pl022_default_chip_info = {
};
/**
- * pl022_setup - setup function registered to SPI master framework
+ * pl022_setup - setup function registered to SPI host framework
* @spi: spi device which is requesting setup
*
- * This function is registered to the SPI framework for this SPI master
+ * This function is registered to the SPI framework for this SPI host
* controller. If it is the first time when setup is called by this device,
* this function will initialize the runtime state for this chip and save
* the same in the device structure. Else it will update the runtime info
@@ -1844,7 +1844,7 @@ static int pl022_setup(struct spi_device *spi)
struct chip_data *chip;
struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
int status = 0;
- struct pl022 *pl022 = spi_master_get_devdata(spi->master);
+ struct pl022 *pl022 = spi_controller_get_devdata(spi->controller);
unsigned int bits = spi->bits_per_word;
u32 tmp;
struct device_node *np = spi->dev.of_node;
@@ -1964,7 +1964,7 @@ static int pl022_setup(struct spi_device *spi)
chip->dmacr = 0;
chip->cpsr = 0;
if ((chip_info->com_mode == DMA_TRANSFER)
- && ((pl022->master_info)->enable_dma)) {
+ && ((pl022->host_info)->enable_dma)) {
chip->enable_dma = true;
dev_dbg(&spi->dev, "DMA mode set in controller state\n");
SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
@@ -2061,10 +2061,10 @@ static int pl022_setup(struct spi_device *spi)
}
/**
- * pl022_cleanup - cleanup function registered to SPI master framework
+ * pl022_cleanup - cleanup function registered to SPI host framework
* @spi: spi device which is requesting cleanup
*
- * This function is registered to the SPI framework for this SPI master
+ * This function is registered to the SPI framework for this SPI host
* controller. It will free the runtime state of chip.
*/
static void pl022_cleanup(struct spi_device *spi)
@@ -2103,7 +2103,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
struct device *dev = &adev->dev;
struct pl022_ssp_controller *platform_info =
dev_get_platdata(&adev->dev);
- struct spi_master *master;
+ struct spi_controller *host;
struct pl022 *pl022 = NULL; /*Data for this driver */
int status = 0;
@@ -2117,16 +2117,16 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
return -ENODEV;
}
- /* Allocate master with space for data */
- master = spi_alloc_master(dev, sizeof(struct pl022));
- if (master == NULL) {
- dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
+ /* Allocate host with space for data */
+ host = spi_alloc_host(dev, sizeof(struct pl022));
+ if (host == NULL) {
+ dev_err(&adev->dev, "probe - cannot alloc SPI host\n");
return -ENOMEM;
}
- pl022 = spi_master_get_devdata(master);
- pl022->master = master;
- pl022->master_info = platform_info;
+ pl022 = spi_controller_get_devdata(host);
+ pl022->host = host;
+ pl022->host_info = platform_info;
pl022->adev = adev;
pl022->vendor = id->data;
@@ -2134,25 +2134,25 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
* Bus Number Which has been Assigned to this SSP controller
* on this board
*/
- master->bus_num = platform_info->bus_id;
- master->cleanup = pl022_cleanup;
- master->setup = pl022_setup;
- master->auto_runtime_pm = true;
- master->transfer_one_message = pl022_transfer_one_message;
- master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
- master->rt = platform_info->rt;
- master->dev.of_node = dev->of_node;
- master->use_gpio_descriptors = true;
+ host->bus_num = platform_info->bus_id;
+ host->cleanup = pl022_cleanup;
+ host->setup = pl022_setup;
+ host->auto_runtime_pm = true;
+ host->transfer_one_message = pl022_transfer_one_message;
+ host->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
+ host->rt = platform_info->rt;
+ host->dev.of_node = dev->of_node;
+ host->use_gpio_descriptors = true;
/*
* Supports mode 0-3, loopback, and active low CS. Transfers are
* always MS bit first on the original pl022.
*/
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
if (pl022->vendor->extended_cr)
- master->mode_bits |= SPI_LSB_FIRST;
+ host->mode_bits |= SPI_LSB_FIRST;
- dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
+ dev_dbg(&adev->dev, "BUSNO: %d\n", host->bus_num);
status = amba_request_regions(adev, NULL);
if (status)
@@ -2215,10 +2215,10 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
/* Register with the SPI framework */
amba_set_drvdata(adev, pl022);
- status = devm_spi_register_master(&adev->dev, master);
+ status = devm_spi_register_controller(&adev->dev, host);
if (status != 0) {
dev_err_probe(&adev->dev, status,
- "problem registering spi master\n");
+ "problem registering spi host\n");
goto err_spi_register;
}
dev_dbg(dev, "probe succeeded\n");
@@ -2246,7 +2246,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
err_no_ioremap:
amba_release_regions(adev);
err_no_ioregion:
- spi_master_put(master);
+ spi_controller_put(host);
return status;
}
@@ -2265,7 +2265,7 @@ pl022_remove(struct amba_device *adev)
pm_runtime_get_noresume(&adev->dev);
load_ssp_default_config(pl022);
- if (pl022->master_info->enable_dma)
+ if (pl022->host_info->enable_dma)
pl022_dma_remove(pl022);
clk_disable_unprepare(pl022->clk);
@@ -2279,13 +2279,13 @@ static int pl022_suspend(struct device *dev)
struct pl022 *pl022 = dev_get_drvdata(dev);
int ret;
- ret = spi_master_suspend(pl022->master);
+ ret = spi_controller_suspend(pl022->host);
if (ret)
return ret;
ret = pm_runtime_force_suspend(dev);
if (ret) {
- spi_master_resume(pl022->master);
+ spi_controller_resume(pl022->host);
return ret;
}
@@ -2305,7 +2305,7 @@ static int pl022_resume(struct device *dev)
dev_err(dev, "problem resuming\n");
/* Start the queue running */
- ret = spi_master_resume(pl022->master);
+ ret = spi_controller_resume(pl022->host);
if (!ret)
dev_dbg(dev, "resumed\n");
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index d725e915025d..03aab661be9d 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -126,7 +126,7 @@ struct ppc4xx_spi {
unsigned char *rx;
struct spi_ppc4xx_regs __iomem *regs; /* pointer to the registers */
- struct spi_master *master;
+ struct spi_controller *host;
struct device *dev;
};
@@ -143,7 +143,7 @@ static int spi_ppc4xx_txrx(struct spi_device *spi, struct spi_transfer *t)
dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
t->tx_buf, t->rx_buf, t->len);
- hw = spi_master_get_devdata(spi->master);
+ hw = spi_controller_get_devdata(spi->controller);
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
@@ -161,7 +161,7 @@ static int spi_ppc4xx_txrx(struct spi_device *spi, struct spi_transfer *t)
static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
{
- struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master);
+ struct ppc4xx_spi *hw = spi_controller_get_devdata(spi->controller);
struct spi_ppc4xx_cs *cs = spi->controller_state;
int scr;
u8 cdm = 0;
@@ -340,7 +340,7 @@ static void spi_ppc4xx_enable(struct ppc4xx_spi *hw)
static int spi_ppc4xx_of_probe(struct platform_device *op)
{
struct ppc4xx_spi *hw;
- struct spi_master *master;
+ struct spi_controller *host;
struct spi_bitbang *bbp;
struct resource resource;
struct device_node *np = op->dev.of_node;
@@ -349,20 +349,20 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
int ret;
const unsigned int *clk;
- master = spi_alloc_master(dev, sizeof(*hw));
- if (master == NULL)
+ host = spi_alloc_host(dev, sizeof(*hw));
+ if (host == NULL)
return -ENOMEM;
- master->dev.of_node = np;
- platform_set_drvdata(op, master);
- hw = spi_master_get_devdata(master);
- hw->master = master;
+ host->dev.of_node = np;
+ platform_set_drvdata(op, host);
+ hw = spi_controller_get_devdata(host);
+ hw->host = host;
hw->dev = dev;
init_completion(&hw->done);
/* Setup the state for the bitbang driver */
bbp = &hw->bitbang;
- bbp->master = hw->master;
+ bbp->master = hw->host;
bbp->setup_transfer = spi_ppc4xx_setupxfer;
bbp->txrx_bufs = spi_ppc4xx_txrx;
bbp->use_dma = 0;
@@ -385,7 +385,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
if (opbnp == NULL) {
dev_err(dev, "OPB: cannot find node\n");
ret = -ENODEV;
- goto free_master;
+ goto free_host;
}
/* Get the clock (Hz) for the OPB */
clk = of_get_property(opbnp, "clock-frequency", NULL);
@@ -393,7 +393,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
dev_err(dev, "OPB: no clock-frequency property set\n");
of_node_put(opbnp);
ret = -ENODEV;
- goto free_master;
+ goto free_host;
}
hw->opb_freq = *clk;
hw->opb_freq >>= 2;
@@ -402,7 +402,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
dev_err(dev, "error while parsing device node resource\n");
- goto free_master;
+ goto free_host;
}
hw->mapbase = resource.start;
hw->mapsize = resource_size(&resource);
@@ -411,7 +411,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) {
dev_err(dev, "too small to map registers\n");
ret = -EINVAL;
- goto free_master;
+ goto free_host;
}
/* Request IRQ */
@@ -420,7 +420,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
0, "spi_ppc4xx_of", (void *)hw);
if (ret) {
dev_err(dev, "unable to allocate interrupt\n");
- goto free_master;
+ goto free_host;
}
if (!request_mem_region(hw->mapbase, hw->mapsize, DRIVER_NAME)) {
@@ -443,7 +443,7 @@ static int spi_ppc4xx_of_probe(struct platform_device *op)
dev->dma_mask = 0;
ret = spi_bitbang_start(bbp);
if (ret) {
- dev_err(dev, "failed to register SPI master\n");
+ dev_err(dev, "failed to register SPI host\n");
goto unmap_regs;
}
@@ -457,8 +457,8 @@ map_io_error:
release_mem_region(hw->mapbase, hw->mapsize);
request_mem_error:
free_irq(hw->irqnum, hw);
-free_master:
- spi_master_put(master);
+free_host:
+ spi_controller_put(host);
dev_err(dev, "initialization failed\n");
return ret;
@@ -466,14 +466,14 @@ free_master:
static void spi_ppc4xx_of_remove(struct platform_device *op)
{
- struct spi_master *master = platform_get_drvdata(op);
- struct ppc4xx_spi *hw = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(op);
+ struct ppc4xx_spi *hw = spi_controller_get_devdata(host);
spi_bitbang_stop(&hw->bitbang);
release_mem_region(hw->mapbase, hw->mapsize);
free_irq(hw->irqnum, hw);
iounmap(hw->regs);
- spi_master_put(master);
+ spi_controller_put(host);
}
static const struct of_device_id spi_ppc4xx_of_match[] = {
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 1bab18a0f262..f2a856f6a99e 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1102,7 +1102,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
}
}
- if (spi_controller_is_slave(controller)) {
+ if (spi_controller_is_target(controller)) {
while (drv_data->write(drv_data))
;
if (drv_data->gpiod_ready) {
@@ -1121,7 +1121,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
return 1;
}
-static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
+static int pxa2xx_spi_target_abort(struct spi_controller *controller)
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
@@ -1199,7 +1199,7 @@ static int setup(struct spi_device *spi)
break;
default:
tx_hi_thres = 0;
- if (spi_controller_is_slave(drv_data->controller)) {
+ if (spi_controller_is_target(drv_data->controller)) {
tx_thres = 1;
rx_thres = 2;
} else {
@@ -1248,7 +1248,7 @@ static int setup(struct spi_device *spi)
}
chip->cr1 = 0;
- if (spi_controller_is_slave(drv_data->controller)) {
+ if (spi_controller_is_target(drv_data->controller)) {
chip->cr1 |= SSCR1_SCFR;
chip->cr1 |= SSCR1_SCLKDIR;
chip->cr1 |= SSCR1_SFRMDIR;
@@ -1344,7 +1344,7 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
match = device_get_match_data(dev);
if (match)
- type = (enum pxa_ssp_type)match;
+ type = (uintptr_t)match;
else if (is_lpss_priv) {
u32 value;
@@ -1395,7 +1395,7 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
else
ssp->port_id = uid;
- pdata->is_slave = device_property_read_bool(dev, "spi-slave");
+ pdata->is_target = device_property_read_bool(dev, "spi-slave");
pdata->num_chipselect = 1;
pdata->enable_dma = true;
pdata->dma_burst_size = 1;
@@ -1461,10 +1461,10 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (platform_info->is_slave)
- controller = devm_spi_alloc_slave(dev, sizeof(*drv_data));
+ if (platform_info->is_target)
+ controller = devm_spi_alloc_target(dev, sizeof(*drv_data));
else
- controller = devm_spi_alloc_master(dev, sizeof(*drv_data));
+ controller = devm_spi_alloc_host(dev, sizeof(*drv_data));
if (!controller) {
dev_err(&pdev->dev, "cannot alloc spi_controller\n");
@@ -1487,7 +1487,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
controller->setup = setup;
controller->set_cs = pxa2xx_spi_set_cs;
controller->transfer_one = pxa2xx_spi_transfer_one;
- controller->slave_abort = pxa2xx_spi_slave_abort;
+ controller->target_abort = pxa2xx_spi_target_abort;
controller->handle_err = pxa2xx_spi_handle_err;
controller->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
controller->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
@@ -1579,7 +1579,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
break;
default:
- if (spi_controller_is_slave(controller)) {
+ if (spi_controller_is_target(controller)) {
tmp = SSCR1_SCFR |
SSCR1_SCLKDIR |
SSCR1_SFRMDIR |
@@ -1592,7 +1592,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
pxa2xx_spi_write(drv_data, SSCR1, tmp);
tmp = SSCR0_Motorola | SSCR0_DataSize(8);
- if (!spi_controller_is_slave(controller))
+ if (!spi_controller_is_target(controller))
tmp |= SSCR0_SCR(2);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
@@ -1620,7 +1620,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
controller->num_chipselect = platform_info->num_chipselect;
controller->use_gpio_descriptors = true;
- if (platform_info->is_slave) {
+ if (platform_info->is_target) {
drv_data->gpiod_ready = devm_gpiod_get_optional(dev,
"ready", GPIOD_OUT_LOW);
if (IS_ERR(drv_data->gpiod_ready)) {
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index 1954c39b3d08..49b775134485 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -9,7 +9,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_opp.h>
@@ -247,11 +247,11 @@ static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl)
qcom_qspi_pio_xfer_ctrl(ctrl);
}
-static void qcom_qspi_handle_err(struct spi_master *master,
+static void qcom_qspi_handle_err(struct spi_controller *host,
struct spi_message *msg)
{
u32 int_status;
- struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ struct qcom_qspi *ctrl = spi_controller_get_devdata(host);
unsigned long flags;
int i;
@@ -411,11 +411,11 @@ static bool qcom_qspi_can_dma(struct spi_controller *ctlr,
return xfer->len > QSPI_MAX_BYTES_FIFO;
}
-static int qcom_qspi_transfer_one(struct spi_master *master,
+static int qcom_qspi_transfer_one(struct spi_controller *host,
struct spi_device *slv,
struct spi_transfer *xfer)
{
- struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ struct qcom_qspi *ctrl = spi_controller_get_devdata(host);
int ret;
unsigned long speed_hz;
unsigned long flags;
@@ -443,7 +443,7 @@ static int qcom_qspi_transfer_one(struct spi_master *master,
ctrl->xfer.tx_buf = xfer->tx_buf;
}
ctrl->xfer.is_last = list_is_last(&xfer->transfer_list,
- &master->cur_msg->transfers);
+ &host->cur_msg->transfers);
ctrl->xfer.rem_bytes = xfer->len;
if (xfer->rx_sg.nents || xfer->tx_sg.nents) {
@@ -481,7 +481,7 @@ exit:
return 1;
}
-static int qcom_qspi_prepare_message(struct spi_master *master,
+static int qcom_qspi_prepare_message(struct spi_controller *host,
struct spi_message *message)
{
u32 mstr_cfg;
@@ -490,7 +490,7 @@ static int qcom_qspi_prepare_message(struct spi_master *master,
int tx_data_delay = 1;
unsigned long flags;
- ctrl = spi_master_get_devdata(master);
+ ctrl = spi_controller_get_devdata(host);
spin_lock_irqsave(&ctrl->lock, flags);
mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
@@ -694,18 +694,18 @@ static int qcom_qspi_probe(struct platform_device *pdev)
{
int ret;
struct device *dev;
- struct spi_master *master;
+ struct spi_controller *host;
struct qcom_qspi *ctrl;
dev = &pdev->dev;
- master = devm_spi_alloc_master(dev, sizeof(*ctrl));
- if (!master)
+ host = devm_spi_alloc_host(dev, sizeof(*ctrl));
+ if (!host)
return -ENOMEM;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- ctrl = spi_master_get_devdata(master);
+ ctrl = spi_controller_get_devdata(host);
spin_lock_init(&ctrl->lock);
ctrl->dev = dev;
@@ -758,23 +758,23 @@ static int qcom_qspi_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "could not set DMA mask\n");
- master->max_speed_hz = 300000000;
- master->max_dma_len = 65536; /* as per HPG */
- master->dma_alignment = QSPI_ALIGN_REQ;
- master->num_chipselect = QSPI_NUM_CS;
- master->bus_num = -1;
- master->dev.of_node = pdev->dev.of_node;
- master->mode_bits = SPI_MODE_0 |
- SPI_TX_DUAL | SPI_RX_DUAL |
- SPI_TX_QUAD | SPI_RX_QUAD;
- master->flags = SPI_MASTER_HALF_DUPLEX;
- master->prepare_message = qcom_qspi_prepare_message;
- master->transfer_one = qcom_qspi_transfer_one;
- master->handle_err = qcom_qspi_handle_err;
+ host->max_speed_hz = 300000000;
+ host->max_dma_len = 65536; /* as per HPG */
+ host->dma_alignment = QSPI_ALIGN_REQ;
+ host->num_chipselect = QSPI_NUM_CS;
+ host->bus_num = -1;
+ host->dev.of_node = pdev->dev.of_node;
+ host->mode_bits = SPI_MODE_0 |
+ SPI_TX_DUAL | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_RX_QUAD;
+ host->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ host->prepare_message = qcom_qspi_prepare_message;
+ host->transfer_one = qcom_qspi_transfer_one;
+ host->handle_err = qcom_qspi_handle_err;
if (of_property_read_bool(pdev->dev.of_node, "iommus"))
- master->can_dma = qcom_qspi_can_dma;
- master->auto_runtime_pm = true;
- master->mem_ops = &qcom_qspi_mem_ops;
+ host->can_dma = qcom_qspi_can_dma;
+ host->auto_runtime_pm = true;
+ host->mem_ops = &qcom_qspi_mem_ops;
ret = devm_pm_opp_set_clkname(&pdev->dev, "core");
if (ret)
@@ -794,7 +794,7 @@ static int qcom_qspi_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(dev, 250);
pm_runtime_enable(dev);
- ret = spi_register_master(master);
+ ret = spi_register_controller(host);
if (!ret)
return 0;
@@ -805,18 +805,18 @@ static int qcom_qspi_probe(struct platform_device *pdev)
static void qcom_qspi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_controller *host = platform_get_drvdata(pdev);
/* Unregister _before_ disabling pm_runtime() so we stop transfers */
- spi_unregister_master(master);
+ spi_unregister_controller(host);
pm_runtime_disable(&pdev->dev);
}
static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct qcom_qspi *ctrl = spi_controller_get_devdata(host);
int ret;
/* Drop the performance state vote */
@@ -837,8 +837,8 @@ static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev)
static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct qcom_qspi *ctrl = spi_controller_get_devdata(host);
int ret;
pinctrl_pm_select_default_state(dev);
@@ -859,30 +859,30 @@ static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
static int __maybe_unused qcom_qspi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
int ret;
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
if (ret)
return ret;
ret = pm_runtime_force_suspend(dev);
if (ret)
- spi_master_resume(master);
+ spi_controller_resume(host);
return ret;
}
static int __maybe_unused qcom_qspi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
- ret = spi_master_resume(master);
+ ret = spi_controller_resume(host);
if (ret)
pm_runtime_force_suspend(dev);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 00e5e88e72c4..4b6f6b25219b 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -11,7 +11,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -386,20 +385,20 @@ static void spi_qup_write(struct spi_qup *controller)
} while (remainder);
}
-static int spi_qup_prep_sg(struct spi_master *master, struct scatterlist *sgl,
+static int spi_qup_prep_sg(struct spi_controller *host, struct scatterlist *sgl,
unsigned int nents, enum dma_transfer_direction dir,
dma_async_tx_callback callback)
{
- struct spi_qup *qup = spi_master_get_devdata(master);
+ struct spi_qup *qup = spi_controller_get_devdata(host);
unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan;
dma_cookie_t cookie;
if (dir == DMA_MEM_TO_DEV)
- chan = master->dma_tx;
+ chan = host->dma_tx;
else
- chan = master->dma_rx;
+ chan = host->dma_rx;
desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
if (IS_ERR_OR_NULL(desc))
@@ -413,13 +412,13 @@ static int spi_qup_prep_sg(struct spi_master *master, struct scatterlist *sgl,
return dma_submit_error(cookie);
}
-static void spi_qup_dma_terminate(struct spi_master *master,
+static void spi_qup_dma_terminate(struct spi_controller *host,
struct spi_transfer *xfer)
{
if (xfer->tx_buf)
- dmaengine_terminate_all(master->dma_tx);
+ dmaengine_terminate_all(host->dma_tx);
if (xfer->rx_buf)
- dmaengine_terminate_all(master->dma_rx);
+ dmaengine_terminate_all(host->dma_rx);
}
static u32 spi_qup_sgl_get_nents_len(struct scatterlist *sgl, u32 max,
@@ -446,8 +445,8 @@ static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
unsigned long timeout)
{
dma_async_tx_callback rx_done = NULL, tx_done = NULL;
- struct spi_master *master = spi->master;
- struct spi_qup *qup = spi_master_get_devdata(master);
+ struct spi_controller *host = spi->controller;
+ struct spi_qup *qup = spi_controller_get_devdata(host);
struct scatterlist *tx_sgl, *rx_sgl;
int ret;
@@ -482,20 +481,20 @@ static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
return ret;
}
if (rx_sgl) {
- ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
+ ret = spi_qup_prep_sg(host, rx_sgl, rx_nents,
DMA_DEV_TO_MEM, rx_done);
if (ret)
return ret;
- dma_async_issue_pending(master->dma_rx);
+ dma_async_issue_pending(host->dma_rx);
}
if (tx_sgl) {
- ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
+ ret = spi_qup_prep_sg(host, tx_sgl, tx_nents,
DMA_MEM_TO_DEV, tx_done);
if (ret)
return ret;
- dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(host->dma_tx);
}
if (!wait_for_completion_timeout(&qup->done, timeout))
@@ -514,8 +513,8 @@ static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,
unsigned long timeout)
{
- struct spi_master *master = spi->master;
- struct spi_qup *qup = spi_master_get_devdata(master);
+ struct spi_controller *host = spi->controller;
+ struct spi_qup *qup = spi_controller_get_devdata(host);
int ret, n_words, iterations, offset = 0;
n_words = qup->n_words;
@@ -659,7 +658,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
/* set clock freq ... bits per word, determine mode */
static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
{
- struct spi_qup *controller = spi_master_get_devdata(spi->master);
+ struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
int ret;
if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
@@ -680,9 +679,9 @@ static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
controller->mode = QUP_IO_M_MODE_FIFO;
- else if (spi->master->can_dma &&
- spi->master->can_dma(spi->master, spi, xfer) &&
- spi->master->cur_msg_mapped)
+ else if (spi->controller->can_dma &&
+ spi->controller->can_dma(spi->controller, spi, xfer) &&
+ spi->controller->cur_msg_mapped)
controller->mode = QUP_IO_M_MODE_BAM;
else
controller->mode = QUP_IO_M_MODE_BLOCK;
@@ -693,7 +692,7 @@ static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
/* prep qup for another spi transaction of specific type */
static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
{
- struct spi_qup *controller = spi_master_get_devdata(spi->master);
+ struct spi_qup *controller = spi_controller_get_devdata(spi->controller);
u32 config, iomode, control;
unsigned long flags;
@@ -841,11 +840,11 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
return 0;
}
-static int spi_qup_transfer_one(struct spi_master *master,
+static int spi_qup_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct spi_qup *controller = spi_master_get_devdata(master);
+ struct spi_qup *controller = spi_controller_get_devdata(host);
unsigned long timeout, flags;
int ret;
@@ -879,21 +878,21 @@ static int spi_qup_transfer_one(struct spi_master *master,
spin_unlock_irqrestore(&controller->lock, flags);
if (ret && spi_qup_is_dma_xfer(controller->mode))
- spi_qup_dma_terminate(master, xfer);
+ spi_qup_dma_terminate(host, xfer);
return ret;
}
-static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
+static bool spi_qup_can_dma(struct spi_controller *host, struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct spi_qup *qup = spi_master_get_devdata(master);
+ struct spi_qup *qup = spi_controller_get_devdata(host);
size_t dma_align = dma_get_cache_alignment();
int n_words;
if (xfer->rx_buf) {
if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
- IS_ERR_OR_NULL(master->dma_rx))
+ IS_ERR_OR_NULL(host->dma_rx))
return false;
if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
return false;
@@ -901,7 +900,7 @@ static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
if (xfer->tx_buf) {
if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
- IS_ERR_OR_NULL(master->dma_tx))
+ IS_ERR_OR_NULL(host->dma_tx))
return false;
if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
return false;
@@ -914,30 +913,30 @@ static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
return true;
}
-static void spi_qup_release_dma(struct spi_master *master)
+static void spi_qup_release_dma(struct spi_controller *host)
{
- if (!IS_ERR_OR_NULL(master->dma_rx))
- dma_release_channel(master->dma_rx);
- if (!IS_ERR_OR_NULL(master->dma_tx))
- dma_release_channel(master->dma_tx);
+ if (!IS_ERR_OR_NULL(host->dma_rx))
+ dma_release_channel(host->dma_rx);
+ if (!IS_ERR_OR_NULL(host->dma_tx))
+ dma_release_channel(host->dma_tx);
}
-static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
+static int spi_qup_init_dma(struct spi_controller *host, resource_size_t base)
{
- struct spi_qup *spi = spi_master_get_devdata(master);
+ struct spi_qup *spi = spi_controller_get_devdata(host);
struct dma_slave_config *rx_conf = &spi->rx_conf,
*tx_conf = &spi->tx_conf;
struct device *dev = spi->dev;
int ret;
/* allocate dma resources, if available */
- master->dma_rx = dma_request_chan(dev, "rx");
- if (IS_ERR(master->dma_rx))
- return PTR_ERR(master->dma_rx);
+ host->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(host->dma_rx))
+ return PTR_ERR(host->dma_rx);
- master->dma_tx = dma_request_chan(dev, "tx");
- if (IS_ERR(master->dma_tx)) {
- ret = PTR_ERR(master->dma_tx);
+ host->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(host->dma_tx)) {
+ ret = PTR_ERR(host->dma_tx);
goto err_tx;
}
@@ -952,13 +951,13 @@ static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
tx_conf->dst_maxburst = spi->out_blk_sz;
- ret = dmaengine_slave_config(master->dma_rx, rx_conf);
+ ret = dmaengine_slave_config(host->dma_rx, rx_conf);
if (ret) {
dev_err(dev, "failed to configure RX channel\n");
goto err;
}
- ret = dmaengine_slave_config(master->dma_tx, tx_conf);
+ ret = dmaengine_slave_config(host->dma_tx, tx_conf);
if (ret) {
dev_err(dev, "failed to configure TX channel\n");
goto err;
@@ -967,9 +966,9 @@ static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
return 0;
err:
- dma_release_channel(master->dma_tx);
+ dma_release_channel(host->dma_tx);
err_tx:
- dma_release_channel(master->dma_rx);
+ dma_release_channel(host->dma_rx);
return ret;
}
@@ -979,7 +978,7 @@ static void spi_qup_set_cs(struct spi_device *spi, bool val)
u32 spi_ioc;
u32 spi_ioc_orig;
- controller = spi_master_get_devdata(spi->master);
+ controller = spi_controller_get_devdata(spi->controller);
spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL);
spi_ioc_orig = spi_ioc;
if (!val)
@@ -993,7 +992,7 @@ static void spi_qup_set_cs(struct spi_device *spi, bool val)
static int spi_qup_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct clk *iclk, *cclk;
struct spi_qup *controller;
struct resource *res;
@@ -1028,34 +1027,34 @@ static int spi_qup_probe(struct platform_device *pdev)
return -ENXIO;
}
- master = spi_alloc_master(dev, sizeof(struct spi_qup));
- if (!master) {
- dev_err(dev, "cannot allocate master\n");
+ host = spi_alloc_host(dev, sizeof(struct spi_qup));
+ if (!host) {
+ dev_err(dev, "cannot allocate host\n");
return -ENOMEM;
}
/* use num-cs unless not present or out of range */
if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
num_cs > SPI_NUM_CHIPSELECTS)
- master->num_chipselect = SPI_NUM_CHIPSELECTS;
+ host->num_chipselect = SPI_NUM_CHIPSELECTS;
else
- master->num_chipselect = num_cs;
+ host->num_chipselect = num_cs;
- master->use_gpio_descriptors = true;
- master->max_native_cs = SPI_NUM_CHIPSELECTS;
- master->bus_num = pdev->id;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
- master->max_speed_hz = max_freq;
- master->transfer_one = spi_qup_transfer_one;
- master->dev.of_node = pdev->dev.of_node;
- master->auto_runtime_pm = true;
- master->dma_alignment = dma_get_cache_alignment();
- master->max_dma_len = SPI_MAX_XFER;
+ host->use_gpio_descriptors = true;
+ host->max_native_cs = SPI_NUM_CHIPSELECTS;
+ host->bus_num = pdev->id;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ host->max_speed_hz = max_freq;
+ host->transfer_one = spi_qup_transfer_one;
+ host->dev.of_node = pdev->dev.of_node;
+ host->auto_runtime_pm = true;
+ host->dma_alignment = dma_get_cache_alignment();
+ host->max_dma_len = SPI_MAX_XFER;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- controller = spi_master_get_devdata(master);
+ controller = spi_controller_get_devdata(host);
controller->dev = dev;
controller->base = base;
@@ -1063,16 +1062,16 @@ static int spi_qup_probe(struct platform_device *pdev)
controller->cclk = cclk;
controller->irq = irq;
- ret = spi_qup_init_dma(master, res->start);
+ ret = spi_qup_init_dma(host, res->start);
if (ret == -EPROBE_DEFER)
goto error;
else if (!ret)
- master->can_dma = spi_qup_can_dma;
+ host->can_dma = spi_qup_can_dma;
controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev);
if (!controller->qup_v1)
- master->set_cs = spi_qup_set_cs;
+ host->set_cs = spi_qup_set_cs;
spin_lock_init(&controller->lock);
init_completion(&controller->done);
@@ -1150,7 +1149,7 @@ static int spi_qup_probe(struct platform_device *pdev)
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- ret = devm_spi_register_master(dev, master);
+ ret = devm_spi_register_controller(dev, host);
if (ret)
goto disable_pm;
@@ -1162,17 +1161,17 @@ error_clk:
clk_disable_unprepare(cclk);
clk_disable_unprepare(iclk);
error_dma:
- spi_qup_release_dma(master);
+ spi_qup_release_dma(host);
error:
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
#ifdef CONFIG_PM
static int spi_qup_pm_suspend_runtime(struct device *device)
{
- struct spi_master *master = dev_get_drvdata(device);
- struct spi_qup *controller = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_controller_get_devdata(host);
u32 config;
/* Enable clocks auto gaiting */
@@ -1188,8 +1187,8 @@ static int spi_qup_pm_suspend_runtime(struct device *device)
static int spi_qup_pm_resume_runtime(struct device *device)
{
- struct spi_master *master = dev_get_drvdata(device);
- struct spi_qup *controller = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_controller_get_devdata(host);
u32 config;
int ret;
@@ -1214,8 +1213,8 @@ static int spi_qup_pm_resume_runtime(struct device *device)
#ifdef CONFIG_PM_SLEEP
static int spi_qup_suspend(struct device *device)
{
- struct spi_master *master = dev_get_drvdata(device);
- struct spi_qup *controller = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_controller_get_devdata(host);
int ret;
if (pm_runtime_suspended(device)) {
@@ -1223,7 +1222,7 @@ static int spi_qup_suspend(struct device *device)
if (ret)
return ret;
}
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
if (ret)
return ret;
@@ -1238,8 +1237,8 @@ static int spi_qup_suspend(struct device *device)
static int spi_qup_resume(struct device *device)
{
- struct spi_master *master = dev_get_drvdata(device);
- struct spi_qup *controller = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(controller->iclk);
@@ -1256,7 +1255,7 @@ static int spi_qup_resume(struct device *device)
if (ret)
goto disable_clk;
- ret = spi_master_resume(master);
+ ret = spi_controller_resume(host);
if (ret)
goto disable_clk;
@@ -1271,8 +1270,8 @@ disable_clk:
static void spi_qup_remove(struct platform_device *pdev)
{
- struct spi_master *master = dev_get_drvdata(&pdev->dev);
- struct spi_qup *controller = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(&pdev->dev);
+ struct spi_qup *controller = spi_controller_get_devdata(host);
int ret;
ret = pm_runtime_get_sync(&pdev->dev);
@@ -1290,7 +1289,7 @@ static void spi_qup_remove(struct platform_device *pdev)
ERR_PTR(ret));
}
- spi_qup_release_dma(master);
+ spi_qup_release_dma(host);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c
index 5073736d3d1f..225f75550780 100644
--- a/drivers/spi/spi-rb4xx.c
+++ b/drivers/spi/spi-rb4xx.c
@@ -80,7 +80,7 @@ static void do_spi_byte_two(struct rb4xx_spi *rbspi, u32 spi_ioc, u8 byte)
static void rb4xx_set_cs(struct spi_device *spi, bool enable)
{
- struct rb4xx_spi *rbspi = spi_master_get_devdata(spi->master);
+ struct rb4xx_spi *rbspi = spi_controller_get_devdata(spi->controller);
/*
* Setting CS is done along with bitbanging the actual values,
@@ -92,10 +92,10 @@ static void rb4xx_set_cs(struct spi_device *spi, bool enable)
AR71XX_SPI_IOC_CS0 | AR71XX_SPI_IOC_CS1);
}
-static int rb4xx_transfer_one(struct spi_master *master,
+static int rb4xx_transfer_one(struct spi_controller *host,
struct spi_device *spi, struct spi_transfer *t)
{
- struct rb4xx_spi *rbspi = spi_master_get_devdata(master);
+ struct rb4xx_spi *rbspi = spi_controller_get_devdata(host);
int i;
u32 spi_ioc;
u8 *rx_buf;
@@ -126,14 +126,14 @@ static int rb4xx_transfer_one(struct spi_master *master,
continue;
rx_buf[i] = rb4xx_read(rbspi, AR71XX_SPI_REG_RDS);
}
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
return 0;
}
static int rb4xx_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct clk *ahb_clk;
struct rb4xx_spi *rbspi;
int err;
@@ -143,31 +143,31 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
if (IS_ERR(spi_base))
return PTR_ERR(spi_base);
- master = devm_spi_alloc_master(&pdev->dev, sizeof(*rbspi));
- if (!master)
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(*rbspi));
+ if (!host)
return -ENOMEM;
ahb_clk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(ahb_clk))
return PTR_ERR(ahb_clk);
- master->dev.of_node = pdev->dev.of_node;
- master->bus_num = 0;
- master->num_chipselect = 3;
- master->mode_bits = SPI_TX_DUAL;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->flags = SPI_MASTER_MUST_TX;
- master->transfer_one = rb4xx_transfer_one;
- master->set_cs = rb4xx_set_cs;
+ host->dev.of_node = pdev->dev.of_node;
+ host->bus_num = 0;
+ host->num_chipselect = 3;
+ host->mode_bits = SPI_TX_DUAL;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->flags = SPI_CONTROLLER_MUST_TX;
+ host->transfer_one = rb4xx_transfer_one;
+ host->set_cs = rb4xx_set_cs;
- rbspi = spi_master_get_devdata(master);
+ rbspi = spi_controller_get_devdata(host);
rbspi->base = spi_base;
rbspi->clk = ahb_clk;
platform_set_drvdata(pdev, rbspi);
- err = devm_spi_register_master(&pdev->dev, master);
+ err = devm_spi_register_controller(&pdev->dev, host);
if (err) {
- dev_err(&pdev->dev, "failed to register SPI master\n");
+ dev_err(&pdev->dev, "failed to register SPI host\n");
return err;
}
diff --git a/drivers/spi/spi-realtek-rtl.c b/drivers/spi/spi-realtek-rtl.c
index 866b0477dbd7..0b0123e20b54 100644
--- a/drivers/spi/spi-realtek-rtl.c
+++ b/drivers/spi/spi-realtek-rtl.c
@@ -153,7 +153,7 @@ static int realtek_rtl_spi_probe(struct platform_device *pdev)
struct rtspi *rtspi;
int err;
- ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(*rtspi));
+ ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*rtspi));
if (!ctrl) {
dev_err(&pdev->dev, "Error allocating SPI controller\n");
return -ENOMEM;
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
index 583f4187f030..0d7fadcd4ed3 100644
--- a/drivers/spi/spi-rockchip-sfc.c
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -487,7 +487,7 @@ static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct rockchip_sfc *sfc = spi_master_get_devdata(mem->spi->master);
+ struct rockchip_sfc *sfc = spi_controller_get_devdata(mem->spi->controller);
u32 len = op->data.nbytes;
int ret;
@@ -523,7 +523,7 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op
static int rockchip_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
- struct rockchip_sfc *sfc = spi_master_get_devdata(mem->spi->master);
+ struct rockchip_sfc *sfc = spi_controller_get_devdata(mem->spi->controller);
op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
@@ -557,22 +557,22 @@ static irqreturn_t rockchip_sfc_irq_handler(int irq, void *dev_id)
static int rockchip_sfc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct spi_master *master;
+ struct spi_controller *host;
struct rockchip_sfc *sfc;
int ret;
- master = devm_spi_alloc_master(&pdev->dev, sizeof(*sfc));
- if (!master)
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(*sfc));
+ if (!host)
return -ENOMEM;
- master->flags = SPI_MASTER_HALF_DUPLEX;
- master->mem_ops = &rockchip_sfc_mem_ops;
- master->dev.of_node = pdev->dev.of_node;
- master->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL;
- master->max_speed_hz = SFC_MAX_SPEED;
- master->num_chipselect = SFC_MAX_CHIPSELECT_NUM;
+ host->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ host->mem_ops = &rockchip_sfc_mem_ops;
+ host->dev.of_node = pdev->dev.of_node;
+ host->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL;
+ host->max_speed_hz = SFC_MAX_SPEED;
+ host->num_chipselect = SFC_MAX_CHIPSELECT_NUM;
- sfc = spi_master_get_devdata(master);
+ sfc = spi_controller_get_devdata(host);
sfc->dev = dev;
sfc->regbase = devm_platform_ioremap_resource(pdev, 0);
@@ -640,7 +640,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
sfc->version = rockchip_sfc_get_version(sfc);
- ret = spi_register_master(master);
+ ret = spi_register_controller(host);
if (ret)
goto err_irq;
@@ -656,10 +656,10 @@ err_hclk:
static void rockchip_sfc_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_controller *host = platform_get_drvdata(pdev);
struct rockchip_sfc *sfc = platform_get_drvdata(pdev);
- spi_unregister_master(master);
+ spi_unregister_controller(host);
clk_disable_unprepare(sfc->clk);
clk_disable_unprepare(sfc->hclk);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 143ede958ac1..5b010094dace 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -104,8 +104,8 @@
#define CR0_XFM_RO 0x2
#define CR0_OPM_OFFSET 20
-#define CR0_OPM_MASTER 0x0
-#define CR0_OPM_SLAVE 0x1
+#define CR0_OPM_HOST 0x0
+#define CR0_OPM_TARGET 0x1
#define CR0_SOI_OFFSET 23
@@ -125,7 +125,7 @@
#define SR_TF_EMPTY (1 << 2)
#define SR_RF_EMPTY (1 << 3)
#define SR_RF_FULL (1 << 4)
-#define SR_SLAVE_TX_BUSY (1 << 5)
+#define SR_TARGET_TX_BUSY (1 << 5)
/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
#define INT_MASK 0x1f
@@ -151,7 +151,7 @@
#define RXDMA (1 << 0)
#define TXDMA (1 << 1)
-/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
+/* sclk_out: spi host internal logic in rk3x can support 50Mhz */
#define MAX_SCLK_OUT 50000000U
/*
@@ -194,8 +194,8 @@ struct rockchip_spi {
bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
- bool slave_abort;
- bool cs_inactive; /* spi slave tansmition stop when cs inactive */
+ bool target_abort;
+ bool cs_inactive; /* spi target tansmition stop when cs inactive */
bool cs_high_supported; /* native CS supports active-high polarity */
struct spi_transfer *xfer; /* Store xfer temporarily */
@@ -206,13 +206,13 @@ static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
writel_relaxed((enable ? 1U : 0U), rs->regs + ROCKCHIP_SPI_SSIENR);
}
-static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool slave_mode)
+static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool target_mode)
{
unsigned long timeout = jiffies + msecs_to_jiffies(5);
do {
- if (slave_mode) {
- if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_SLAVE_TX_BUSY) &&
+ if (target_mode) {
+ if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_TARGET_TX_BUSY) &&
!((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY)))
return;
} else {
@@ -351,9 +351,9 @@ static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
struct spi_controller *ctlr = dev_id;
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
- /* When int_cs_inactive comes, spi slave abort */
+ /* When int_cs_inactive comes, spi target abort */
if (rs->cs_inactive && readl_relaxed(rs->regs + ROCKCHIP_SPI_IMR) & INT_CS_INACTIVE) {
- ctlr->slave_abort(ctlr);
+ ctlr->target_abort(ctlr);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
@@ -405,7 +405,7 @@ static void rockchip_spi_dma_rxcb(void *data)
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
int state = atomic_fetch_andnot(RXDMA, &rs->state);
- if (state & TXDMA && !rs->slave_abort)
+ if (state & TXDMA && !rs->target_abort)
return;
if (rs->cs_inactive)
@@ -421,11 +421,11 @@ static void rockchip_spi_dma_txcb(void *data)
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
int state = atomic_fetch_andnot(TXDMA, &rs->state);
- if (state & RXDMA && !rs->slave_abort)
+ if (state & RXDMA && !rs->target_abort)
return;
/* Wait until the FIFO data completely. */
- wait_for_tx_idle(rs, ctlr->slave);
+ wait_for_tx_idle(rs, ctlr->target);
spi_enable_chip(rs, false);
spi_finalize_current_transfer(ctlr);
@@ -525,7 +525,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
static int rockchip_spi_config(struct rockchip_spi *rs,
struct spi_device *spi, struct spi_transfer *xfer,
- bool use_dma, bool slave_mode)
+ bool use_dma, bool target_mode)
{
u32 cr0 = CR0_FRF_SPI << CR0_FRF_OFFSET
| CR0_BHT_8BIT << CR0_BHT_OFFSET
@@ -534,9 +534,9 @@ static int rockchip_spi_config(struct rockchip_spi *rs,
u32 cr1;
u32 dmacr = 0;
- if (slave_mode)
- cr0 |= CR0_OPM_SLAVE << CR0_OPM_OFFSET;
- rs->slave_abort = false;
+ if (target_mode)
+ cr0 |= CR0_OPM_TARGET << CR0_OPM_OFFSET;
+ rs->target_abort = false;
cr0 |= rs->rsd << CR0_RSD_OFFSET;
cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
@@ -614,7 +614,7 @@ static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
return ROCKCHIP_SPI_MAX_TRANLEN;
}
-static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
+static int rockchip_spi_target_abort(struct spi_controller *ctlr)
{
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
u32 rx_fifo_left;
@@ -659,7 +659,7 @@ out:
dmaengine_terminate_sync(ctlr->dma_tx);
atomic_set(&rs->state, 0);
spi_enable_chip(rs, false);
- rs->slave_abort = true;
+ rs->target_abort = true;
spi_finalize_current_transfer(ctlr);
return 0;
@@ -697,7 +697,7 @@ static int rockchip_spi_transfer_one(
rs->xfer = xfer;
use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
- ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
+ ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->target);
if (ret)
return ret;
@@ -757,15 +757,15 @@ static int rockchip_spi_probe(struct platform_device *pdev)
struct resource *mem;
struct device_node *np = pdev->dev.of_node;
u32 rsd_nsecs, num_cs;
- bool slave_mode;
+ bool target_mode;
- slave_mode = of_property_read_bool(np, "spi-slave");
+ target_mode = of_property_read_bool(np, "spi-slave");
- if (slave_mode)
- ctlr = spi_alloc_slave(&pdev->dev,
+ if (target_mode)
+ ctlr = spi_alloc_target(&pdev->dev,
sizeof(struct rockchip_spi));
else
- ctlr = spi_alloc_master(&pdev->dev,
+ ctlr = spi_alloc_host(&pdev->dev,
sizeof(struct rockchip_spi));
if (!ctlr)
@@ -854,11 +854,11 @@ static int rockchip_spi_probe(struct platform_device *pdev)
ctlr->auto_runtime_pm = true;
ctlr->bus_num = pdev->id;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST;
- if (slave_mode) {
+ if (target_mode) {
ctlr->mode_bits |= SPI_NO_CS;
- ctlr->slave_abort = rockchip_spi_slave_abort;
+ ctlr->target_abort = rockchip_spi_target_abort;
} else {
- ctlr->flags = SPI_MASTER_GPIO_SS;
+ ctlr->flags = SPI_CONTROLLER_GPIO_SS;
ctlr->max_native_cs = ROCKCHIP_SPI_MAX_CS_NUM;
/*
* rk spi0 has two native cs, spi1..5 one cs only
@@ -911,7 +911,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
case ROCKCHIP_SPI_VER2_TYPE2:
rs->cs_high_supported = true;
ctlr->mode_bits |= SPI_CS_HIGH;
- if (ctlr->can_dma && slave_mode)
+ if (ctlr->can_dma && target_mode)
rs->cs_inactive = true;
else
rs->cs_inactive = false;
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index 2f78124a1b59..e11146932828 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -134,7 +134,7 @@ static int rpcif_spi_probe(struct platform_device *pdev)
struct rpcif *rpc;
int error;
- ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*rpc));
+ ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*rpc));
if (!ctlr)
return -ENOMEM;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 08ceebbaf69b..8e81f1a8623f 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -19,7 +19,7 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/sh_dma.h>
@@ -1294,7 +1294,7 @@ static int rspi_probe(struct platform_device *pdev)
const struct spi_ops *ops;
unsigned long clksrc;
- ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
+ ctlr = spi_alloc_host(&pdev->dev, sizeof(struct rspi_data));
if (ctlr == NULL)
return -ENOMEM;
@@ -1317,8 +1317,7 @@ static int rspi_probe(struct platform_device *pdev)
rspi->ops = ops;
rspi->ctlr = ctlr;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rspi->addr = devm_ioremap_resource(&pdev->dev, res);
+ rspi->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(rspi->addr)) {
ret = PTR_ERR(rspi->addr);
goto error1;
diff --git a/drivers/spi/spi-rzv2m-csi.c b/drivers/spi/spi-rzv2m-csi.c
index 14ad65da930d..d0f51b17aa7c 100644
--- a/drivers/spi/spi-rzv2m-csi.c
+++ b/drivers/spi/spi-rzv2m-csi.c
@@ -5,13 +5,17 @@
* Copyright (C) 2023 Renesas Electronics Corporation
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/count_zeros.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/log2.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
+#include <linux/units.h>
/* Registers */
#define CSI_MODE 0x00 /* CSI mode control */
@@ -36,6 +40,7 @@
/* CSI_CLKSEL */
#define CSI_CLKSEL_CKP BIT(17)
#define CSI_CLKSEL_DAP BIT(16)
+#define CSI_CLKSEL_MODE (CSI_CLKSEL_CKP|CSI_CLKSEL_DAP)
#define CSI_CLKSEL_SLAVE BIT(15)
#define CSI_CLKSEL_CKS GENMASK(14, 1)
@@ -60,17 +65,22 @@
/* CSI_FIFOTRG */
#define CSI_FIFOTRG_R_TRG GENMASK(2, 0)
-#define CSI_FIFO_SIZE_BYTES 32
-#define CSI_FIFO_HALF_SIZE 16
+#define CSI_FIFO_SIZE_BYTES 32U
+#define CSI_FIFO_HALF_SIZE 16U
#define CSI_EN_DIS_TIMEOUT_US 100
-#define CSI_CKS_MAX 0x3FFF
+/*
+ * Clock "csiclk" gets divided by 2 * CSI_CLKSEL_CKS in order to generate the
+ * serial clock (output from master), with CSI_CLKSEL_CKS ranging from 0x1 (that
+ * means "csiclk" is divided by 2) to 0x3FFF ("csiclk" is divided by 32766).
+ */
+#define CSI_CKS_MAX GENMASK(13, 0)
#define UNDERRUN_ERROR BIT(0)
#define OVERFLOW_ERROR BIT(1)
#define TX_TIMEOUT_ERROR BIT(2)
#define RX_TIMEOUT_ERROR BIT(3)
-#define CSI_MAX_SPI_SCKO 8000000
+#define CSI_MAX_SPI_SCKO (8 * HZ_PER_MHZ)
struct rzv2m_csi_priv {
void __iomem *base;
@@ -78,33 +88,19 @@ struct rzv2m_csi_priv {
struct clk *pclk;
struct device *dev;
struct spi_controller *controller;
- const u8 *txbuf;
- u8 *rxbuf;
- int buffer_len;
- int bytes_sent;
- int bytes_received;
- int bytes_to_transfer;
- int words_to_transfer;
- unsigned char bytes_per_word;
+ const void *txbuf;
+ void *rxbuf;
+ unsigned int buffer_len;
+ unsigned int bytes_sent;
+ unsigned int bytes_received;
+ unsigned int bytes_to_transfer;
+ unsigned int words_to_transfer;
+ unsigned int bytes_per_word;
wait_queue_head_t wait;
- u8 errors;
+ u32 errors;
u32 status;
};
-static const unsigned char x_trg[] = {
- 0, 1, 1, 2, 2, 2, 2, 3,
- 3, 3, 3, 3, 3, 3, 3, 4,
- 4, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 4, 4, 5
-};
-
-static const unsigned char x_trg_words[] = {
- 1, 2, 2, 4, 4, 4, 4, 8,
- 8, 8, 8, 8, 8, 8, 8, 16,
- 16, 16, 16, 16, 16, 16, 16, 16,
- 16, 16, 16, 16, 16, 16, 16, 32
-};
-
static void rzv2m_csi_reg_write_bit(const struct rzv2m_csi_priv *csi,
int reg_offs, int bit_mask, u32 value)
{
@@ -124,13 +120,12 @@ static int rzv2m_csi_sw_reset(struct rzv2m_csi_priv *csi, int assert)
rzv2m_csi_reg_write_bit(csi, CSI_CNT, CSI_CNT_CSIRST, assert);
- if (assert) {
- return readl_poll_timeout(csi->base + CSI_MODE, reg,
- !(reg & CSI_MODE_CSOT), 0,
- CSI_EN_DIS_TIMEOUT_US);
- }
+ if (!assert)
+ return 0;
- return 0;
+ return readl_poll_timeout(csi->base + CSI_MODE, reg,
+ !(reg & CSI_MODE_CSOT), 0,
+ CSI_EN_DIS_TIMEOUT_US);
}
static int rzv2m_csi_start_stop_operation(const struct rzv2m_csi_priv *csi,
@@ -140,28 +135,28 @@ static int rzv2m_csi_start_stop_operation(const struct rzv2m_csi_priv *csi,
rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_CSIE, enable);
- if (!enable && wait)
- return readl_poll_timeout(csi->base + CSI_MODE, reg,
- !(reg & CSI_MODE_CSOT), 0,
- CSI_EN_DIS_TIMEOUT_US);
+ if (enable || !wait)
+ return 0;
- return 0;
+ return readl_poll_timeout(csi->base + CSI_MODE, reg,
+ !(reg & CSI_MODE_CSOT), 0,
+ CSI_EN_DIS_TIMEOUT_US);
}
static int rzv2m_csi_fill_txfifo(struct rzv2m_csi_priv *csi)
{
- int i;
+ unsigned int i;
if (readl(csi->base + CSI_OFIFOL))
return -EIO;
if (csi->bytes_per_word == 2) {
- u16 *buf = (u16 *)csi->txbuf;
+ const u16 *buf = csi->txbuf;
for (i = 0; i < csi->words_to_transfer; i++)
writel(buf[i], csi->base + CSI_OFIFO);
} else {
- u8 *buf = (u8 *)csi->txbuf;
+ const u8 *buf = csi->txbuf;
for (i = 0; i < csi->words_to_transfer; i++)
writel(buf[i], csi->base + CSI_OFIFO);
@@ -175,18 +170,18 @@ static int rzv2m_csi_fill_txfifo(struct rzv2m_csi_priv *csi)
static int rzv2m_csi_read_rxfifo(struct rzv2m_csi_priv *csi)
{
- int i;
+ unsigned int i;
if (readl(csi->base + CSI_IFIFOL) != csi->bytes_to_transfer)
return -EIO;
if (csi->bytes_per_word == 2) {
- u16 *buf = (u16 *)csi->rxbuf;
+ u16 *buf = csi->rxbuf;
for (i = 0; i < csi->words_to_transfer; i++)
buf[i] = (u16)readl(csi->base + CSI_IFIFO);
} else {
- u8 *buf = (u8 *)csi->rxbuf;
+ u8 *buf = csi->rxbuf;
for (i = 0; i < csi->words_to_transfer; i++)
buf[i] = (u8)readl(csi->base + CSI_IFIFO);
@@ -200,9 +195,9 @@ static int rzv2m_csi_read_rxfifo(struct rzv2m_csi_priv *csi)
static inline void rzv2m_csi_calc_current_transfer(struct rzv2m_csi_priv *csi)
{
- int bytes_transferred = max_t(int, csi->bytes_received, csi->bytes_sent);
- int bytes_remaining = csi->buffer_len - bytes_transferred;
- int to_transfer;
+ unsigned int bytes_transferred = max(csi->bytes_received, csi->bytes_sent);
+ unsigned int bytes_remaining = csi->buffer_len - bytes_transferred;
+ unsigned int to_transfer;
if (csi->txbuf)
/*
@@ -210,9 +205,9 @@ static inline void rzv2m_csi_calc_current_transfer(struct rzv2m_csi_priv *csi)
* hard to raise an overflow error (which is only possible
* when IP transmits and receives at the same time).
*/
- to_transfer = min_t(int, CSI_FIFO_HALF_SIZE, bytes_remaining);
+ to_transfer = min(CSI_FIFO_HALF_SIZE, bytes_remaining);
else
- to_transfer = min_t(int, CSI_FIFO_SIZE_BYTES, bytes_remaining);
+ to_transfer = min(CSI_FIFO_SIZE_BYTES, bytes_remaining);
if (csi->bytes_per_word == 2)
to_transfer >>= 1;
@@ -223,7 +218,7 @@ static inline void rzv2m_csi_calc_current_transfer(struct rzv2m_csi_priv *csi)
* less than or equal to the number of bytes we need to transfer.
* This may result in multiple smaller transfers.
*/
- csi->words_to_transfer = x_trg_words[to_transfer - 1];
+ csi->words_to_transfer = rounddown_pow_of_two(to_transfer);
if (csi->bytes_per_word == 2)
csi->bytes_to_transfer = csi->words_to_transfer << 1;
@@ -234,7 +229,7 @@ static inline void rzv2m_csi_calc_current_transfer(struct rzv2m_csi_priv *csi)
static inline void rzv2m_csi_set_rx_fifo_trigger_level(struct rzv2m_csi_priv *csi)
{
rzv2m_csi_reg_write_bit(csi, CSI_FIFOTRG, CSI_FIFOTRG_R_TRG,
- x_trg[csi->words_to_transfer - 1]);
+ ilog2(csi->words_to_transfer));
}
static inline void rzv2m_csi_enable_rx_trigger(struct rzv2m_csi_priv *csi,
@@ -307,7 +302,6 @@ static int rzv2m_csi_wait_for_tx_empty(struct rzv2m_csi_priv *csi)
return 0;
ret = rzv2m_csi_wait_for_interrupt(csi, CSI_INT_TREND, CSI_CNT_TREND_E);
-
if (ret == -ETIMEDOUT)
csi->errors |= TX_TIMEOUT_ERROR;
@@ -323,7 +317,6 @@ static inline int rzv2m_csi_wait_for_rx_ready(struct rzv2m_csi_priv *csi)
ret = rzv2m_csi_wait_for_interrupt(csi, CSI_INT_R_TRGR,
CSI_CNT_R_TRGR_E);
-
if (ret == -ETIMEDOUT)
csi->errors |= RX_TIMEOUT_ERROR;
@@ -332,7 +325,7 @@ static inline int rzv2m_csi_wait_for_rx_ready(struct rzv2m_csi_priv *csi)
static irqreturn_t rzv2m_csi_irq_handler(int irq, void *data)
{
- struct rzv2m_csi_priv *csi = (struct rzv2m_csi_priv *)data;
+ struct rzv2m_csi_priv *csi = data;
csi->status = readl(csi->base + CSI_INT);
rzv2m_csi_disable_irqs(csi, csi->status);
@@ -402,10 +395,8 @@ static int rzv2m_csi_setup(struct spi_device *spi)
writel(CSI_MODE_SETUP, csi->base + CSI_MODE);
/* Setup clock polarity and phase timing */
- rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_CKP,
- !(spi->mode & SPI_CPOL));
- rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_DAP,
- !(spi->mode & SPI_CPHA));
+ rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_MODE,
+ ~spi->mode & SPI_MODE_X_MASK);
/* Setup serial data order */
rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_DIR,
@@ -433,8 +424,8 @@ static int rzv2m_csi_setup(struct spi_device *spi)
static int rzv2m_csi_pio_transfer(struct rzv2m_csi_priv *csi)
{
- bool tx_completed = csi->txbuf ? false : true;
- bool rx_completed = csi->rxbuf ? false : true;
+ bool tx_completed = !csi->txbuf;
+ bool rx_completed = !csi->rxbuf;
int ret = 0;
/* Make sure the TX FIFO is empty */
@@ -564,7 +555,7 @@ static int rzv2m_csi_probe(struct platform_device *pdev)
int irq;
int ret;
- controller = devm_spi_alloc_master(dev, sizeof(*csi));
+ controller = devm_spi_alloc_host(dev, sizeof(*csi));
if (!controller)
return -ENOMEM;
@@ -599,12 +590,13 @@ static int rzv2m_csi_probe(struct platform_device *pdev)
init_waitqueue_head(&csi->wait);
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
- controller->dev.of_node = pdev->dev.of_node;
controller->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
controller->setup = rzv2m_csi_setup;
controller->transfer_one = rzv2m_csi_transfer_one;
controller->use_gpio_descriptors = true;
+ device_set_node(&controller->dev, dev_fwnode(dev));
+
ret = devm_request_irq(dev, irq, rzv2m_csi_irq_handler, 0,
dev_name(dev), csi);
if (ret)
@@ -635,15 +627,13 @@ static int rzv2m_csi_probe(struct platform_device *pdev)
return 0;
}
-static int rzv2m_csi_remove(struct platform_device *pdev)
+static void rzv2m_csi_remove(struct platform_device *pdev)
{
struct rzv2m_csi_priv *csi = platform_get_drvdata(pdev);
spi_unregister_controller(csi->controller);
rzv2m_csi_sw_reset(csi, 1);
clk_disable_unprepare(csi->csiclk);
-
- return 0;
}
static const struct of_device_id rzv2m_csi_match[] = {
@@ -654,7 +644,7 @@ MODULE_DEVICE_TABLE(of, rzv2m_csi_match);
static struct platform_driver rzv2m_csi_drv = {
.probe = rzv2m_csi_probe,
- .remove = rzv2m_csi_remove,
+ .remove_new = rzv2m_csi_remove,
.driver = {
.name = "rzv2m_csi",
.of_match_table = rzv2m_csi_match,
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index b6c2659a66ca..0e48ffd499b9 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -14,7 +14,6 @@
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_data/spi-s3c64xx.h>
@@ -164,9 +163,9 @@ struct s3c64xx_spi_port_config {
* struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
* @clk: Pointer to the spi clock.
* @src_clk: Pointer to the clock used to generate SPI signals.
- * @ioclk: Pointer to the i/o clock between master and slave
+ * @ioclk: Pointer to the i/o clock between host and target
* @pdev: Pointer to device's platform device data
- * @master: Pointer to the SPI Protocol master.
+ * @host: Pointer to the SPI Protocol host.
* @cntrlr_info: Platform specific data for the controller this driver manages.
* @lock: Controller specific lock.
* @state: Set of FLAGS to indicate status.
@@ -187,7 +186,7 @@ struct s3c64xx_spi_driver_data {
struct clk *src_clk;
struct clk *ioclk;
struct platform_device *pdev;
- struct spi_master *master;
+ struct spi_controller *host;
struct s3c64xx_spi_info *cntrlr_info;
spinlock_t lock;
unsigned long sfr_start;
@@ -330,7 +329,7 @@ static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
{
struct s3c64xx_spi_driver_data *sdd =
- spi_master_get_devdata(spi->master);
+ spi_controller_get_devdata(spi->controller);
if (sdd->cntrlr_info->no_cs)
return;
@@ -352,9 +351,9 @@ static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
}
}
-static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
+static int s3c64xx_spi_prepare_transfer(struct spi_controller *spi)
{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+ struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(spi);
if (is_polling(sdd))
return 0;
@@ -382,9 +381,9 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
return 0;
}
-static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
+static int s3c64xx_spi_unprepare_transfer(struct spi_controller *spi)
{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+ struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(spi);
if (is_polling(sdd))
return 0;
@@ -400,11 +399,11 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
return 0;
}
-static bool s3c64xx_spi_can_dma(struct spi_master *master,
+static bool s3c64xx_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
if (sdd->rx_dma.ch && sdd->tx_dma.ch) {
return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
@@ -713,10 +712,10 @@ static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
-static int s3c64xx_spi_prepare_message(struct spi_master *master,
+static int s3c64xx_spi_prepare_message(struct spi_controller *host,
struct spi_message *msg)
{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
struct spi_device *spi = msg->spi;
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
@@ -737,11 +736,11 @@ static size_t s3c64xx_spi_max_transfer_size(struct spi_device *spi)
return ctlr->can_dma ? S3C64XX_SPI_PACKET_CNT_MASK : SIZE_MAX;
}
-static int s3c64xx_spi_transfer_one(struct spi_master *master,
+static int s3c64xx_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
const unsigned int fifo_len = (FIFO_LVL_MASK(sdd) >> 1) + 1;
const void *tx_buf = NULL;
void *rx_buf = NULL;
@@ -891,15 +890,15 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
return status;
}
-static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
+static struct s3c64xx_spi_csinfo *s3c64xx_get_target_ctrldata(
struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs;
- struct device_node *slave_np, *data_np = NULL;
+ struct device_node *target_np, *data_np = NULL;
u32 fb_delay = 0;
- slave_np = spi->dev.of_node;
- if (!slave_np) {
+ target_np = spi->dev.of_node;
+ if (!target_np) {
dev_err(&spi->dev, "device node not found\n");
return ERR_PTR(-EINVAL);
}
@@ -908,7 +907,7 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
if (!cs)
return ERR_PTR(-ENOMEM);
- data_np = of_get_child_by_name(slave_np, "controller-data");
+ data_np = of_get_child_by_name(target_np, "controller-data");
if (!data_np) {
dev_info(&spi->dev, "feedback delay set to default (0)\n");
return cs;
@@ -933,9 +932,9 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
int err;
int div;
- sdd = spi_master_get_devdata(spi->master);
+ sdd = spi_controller_get_devdata(spi->controller);
if (spi->dev.of_node) {
- cs = s3c64xx_get_slave_ctrldata(spi);
+ cs = s3c64xx_get_target_ctrldata(spi);
spi->controller_data = cs;
}
@@ -1023,7 +1022,7 @@ static void s3c64xx_spi_cleanup(struct spi_device *spi)
static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
{
struct s3c64xx_spi_driver_data *sdd = data;
- struct spi_master *spi = sdd->master;
+ struct spi_controller *spi = sdd->host;
unsigned int val, clr = 0;
val = readl(sdd->regs + S3C64XX_SPI_STATUS);
@@ -1152,7 +1151,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
struct resource *mem_res;
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
- struct spi_master *master;
+ struct spi_controller *host;
int ret, irq;
char clk_name[16];
@@ -1166,28 +1165,22 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, -ENODEV,
"Platform_data missing!\n");
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_res)
- return dev_err_probe(&pdev->dev, -ENXIO,
- "Unable to get SPI MEM resource\n");
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return dev_err_probe(&pdev->dev, irq, "Failed to get IRQ\n");
+ return irq;
- master = devm_spi_alloc_master(&pdev->dev, sizeof(*sdd));
- if (!master)
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(*sdd));
+ if (!host)
return dev_err_probe(&pdev->dev, -ENOMEM,
- "Unable to allocate SPI Master\n");
+ "Unable to allocate SPI Host\n");
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- sdd = spi_master_get_devdata(master);
+ sdd = spi_controller_get_devdata(host);
sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
- sdd->master = master;
+ sdd->host = host;
sdd->cntrlr_info = sci;
sdd->pdev = pdev;
- sdd->sfr_start = mem_res->start;
if (pdev->dev.of_node) {
ret = of_alias_get_id(pdev->dev.of_node, "spi");
if (ret < 0)
@@ -1203,31 +1196,32 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
sdd->tx_dma.direction = DMA_MEM_TO_DEV;
sdd->rx_dma.direction = DMA_DEV_TO_MEM;
- master->dev.of_node = pdev->dev.of_node;
- master->bus_num = sdd->port_id;
- master->setup = s3c64xx_spi_setup;
- master->cleanup = s3c64xx_spi_cleanup;
- master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
- master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
- master->prepare_message = s3c64xx_spi_prepare_message;
- master->transfer_one = s3c64xx_spi_transfer_one;
- master->max_transfer_size = s3c64xx_spi_max_transfer_size;
- master->num_chipselect = sci->num_cs;
- master->use_gpio_descriptors = true;
- master->dma_alignment = 8;
- master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
- SPI_BPW_MASK(8);
+ host->dev.of_node = pdev->dev.of_node;
+ host->bus_num = sdd->port_id;
+ host->setup = s3c64xx_spi_setup;
+ host->cleanup = s3c64xx_spi_cleanup;
+ host->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
+ host->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
+ host->prepare_message = s3c64xx_spi_prepare_message;
+ host->transfer_one = s3c64xx_spi_transfer_one;
+ host->max_transfer_size = s3c64xx_spi_max_transfer_size;
+ host->num_chipselect = sci->num_cs;
+ host->use_gpio_descriptors = true;
+ host->dma_alignment = 8;
+ host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
/* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
if (sdd->port_conf->has_loopback)
- master->mode_bits |= SPI_LOOP;
- master->auto_runtime_pm = true;
+ host->mode_bits |= SPI_LOOP;
+ host->auto_runtime_pm = true;
if (!is_polling(sdd))
- master->can_dma = s3c64xx_spi_can_dma;
+ host->can_dma = s3c64xx_spi_can_dma;
- sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
+ sdd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
if (IS_ERR(sdd->regs))
return PTR_ERR(sdd->regs);
+ sdd->sfr_start = mem_res->start;
if (sci->cfg_gpio && sci->cfg_gpio())
return dev_err_probe(&pdev->dev, -EBUSY,
@@ -1277,14 +1271,14 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
sdd->regs + S3C64XX_SPI_INT_EN);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret != 0) {
- dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
+ dev_err(&pdev->dev, "cannot register SPI host: %d\n", ret);
goto err_pm_put;
}
- dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
- sdd->port_id, master->num_chipselect);
+ dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Targets attached\n",
+ sdd->port_id, host->num_chipselect);
dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\n",
mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1);
@@ -1303,8 +1297,8 @@ err_pm_put:
static void s3c64xx_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
pm_runtime_get_sync(&pdev->dev);
@@ -1323,10 +1317,10 @@ static void s3c64xx_spi_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int s3c64xx_spi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
- int ret = spi_master_suspend(master);
+ int ret = spi_controller_suspend(host);
if (ret)
return ret;
@@ -1341,8 +1335,8 @@ static int s3c64xx_spi_suspend(struct device *dev)
static int s3c64xx_spi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
int ret;
@@ -1353,15 +1347,15 @@ static int s3c64xx_spi_resume(struct device *dev)
if (ret < 0)
return ret;
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int s3c64xx_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
clk_disable_unprepare(sdd->clk);
clk_disable_unprepare(sdd->src_clk);
@@ -1372,8 +1366,8 @@ static int s3c64xx_spi_runtime_suspend(struct device *dev)
static int s3c64xx_spi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_controller_get_devdata(host);
int ret;
if (sdd->port_conf->clk_ioclk) {
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index d52ed67243f7..eecf9ea95ae3 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -12,7 +12,6 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_data/sc18is602.h>
#include <linux/gpio/consumer.h>
@@ -31,7 +30,7 @@ enum chips { sc18is602, sc18is602b, sc18is603 };
#define SC18IS602_MODE_CLOCK_DIV_128 0x3
struct sc18is602 {
- struct spi_master *master;
+ struct spi_controller *host;
struct device *dev;
u8 ctrl;
u32 freq;
@@ -180,10 +179,10 @@ static int sc18is602_check_transfer(struct spi_device *spi,
return 0;
}
-static int sc18is602_transfer_one(struct spi_master *master,
+static int sc18is602_transfer_one(struct spi_controller *host,
struct spi_message *m)
{
- struct sc18is602 *hw = spi_master_get_devdata(master);
+ struct sc18is602 *hw = spi_controller_get_devdata(host);
struct spi_device *spi = m->spi;
struct spi_transfer *t;
int status = 0;
@@ -214,7 +213,7 @@ static int sc18is602_transfer_one(struct spi_master *master,
spi_transfer_delay_exec(t);
}
m->status = status;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(host);
return status;
}
@@ -226,7 +225,7 @@ static size_t sc18is602_max_transfer_size(struct spi_device *spi)
static int sc18is602_setup(struct spi_device *spi)
{
- struct sc18is602 *hw = spi_master_get_devdata(spi->master);
+ struct sc18is602 *hw = spi_controller_get_devdata(spi->controller);
/* SC18IS602 does not support CS2 */
if (hw->id == sc18is602 && (spi_get_chipselect(spi, 0) == 2))
@@ -242,17 +241,17 @@ static int sc18is602_probe(struct i2c_client *client)
struct device_node *np = dev->of_node;
struct sc18is602_platform_data *pdata = dev_get_platdata(dev);
struct sc18is602 *hw;
- struct spi_master *master;
+ struct spi_controller *host;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
return -EINVAL;
- master = devm_spi_alloc_master(dev, sizeof(struct sc18is602));
- if (!master)
+ host = devm_spi_alloc_host(dev, sizeof(struct sc18is602));
+ if (!host)
return -ENOMEM;
- hw = spi_master_get_devdata(master);
+ hw = spi_controller_get_devdata(host);
i2c_set_clientdata(client, hw);
/* assert reset and then release */
@@ -261,24 +260,24 @@ static int sc18is602_probe(struct i2c_client *client)
return PTR_ERR(hw->reset);
gpiod_set_value_cansleep(hw->reset, 0);
- hw->master = master;
+ hw->host = host;
hw->client = client;
hw->dev = dev;
hw->ctrl = 0xff;
if (client->dev.of_node)
- hw->id = (enum chips)of_device_get_match_data(&client->dev);
+ hw->id = (uintptr_t)of_device_get_match_data(&client->dev);
else
hw->id = id->driver_data;
switch (hw->id) {
case sc18is602:
case sc18is602b:
- master->num_chipselect = 4;
+ host->num_chipselect = 4;
hw->freq = SC18IS602_CLOCK;
break;
case sc18is603:
- master->num_chipselect = 2;
+ host->num_chipselect = 2;
if (pdata) {
hw->freq = pdata->clock_frequency;
} else {
@@ -293,18 +292,18 @@ static int sc18is602_probe(struct i2c_client *client)
hw->freq = SC18IS602_CLOCK;
break;
}
- master->bus_num = np ? -1 : client->adapter->nr;
- master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->setup = sc18is602_setup;
- master->transfer_one_message = sc18is602_transfer_one;
- master->max_transfer_size = sc18is602_max_transfer_size;
- master->max_message_size = sc18is602_max_transfer_size;
- master->dev.of_node = np;
- master->min_speed_hz = hw->freq / 128;
- master->max_speed_hz = hw->freq / 4;
-
- return devm_spi_register_master(dev, master);
+ host->bus_num = np ? -1 : client->adapter->nr;
+ host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->setup = sc18is602_setup;
+ host->transfer_one_message = sc18is602_transfer_one;
+ host->max_transfer_size = sc18is602_max_transfer_size;
+ host->max_message_size = sc18is602_max_transfer_size;
+ host->dev.of_node = np;
+ host->min_speed_hz = hw->freq / 128;
+ host->max_speed_hz = hw->freq / 4;
+
+ return devm_spi_register_controller(dev, host);
}
static const struct i2c_device_id sc18is602_id[] = {
@@ -343,6 +342,6 @@ static struct i2c_driver sc18is602_driver = {
module_i2c_driver(sc18is602_driver);
-MODULE_DESCRIPTION("SC18IS602/603 SPI Master Driver");
+MODULE_DESCRIPTION("SC18IS602/603 SPI Host Driver");
MODULE_AUTHOR("Guenter Roeck");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index d6ffeae66ed3..5d63aa1d28e2 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -82,7 +82,7 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
}
/*
- * spi master function
+ * spi host function
*/
#define hspi_hw_cs_enable(hspi) hspi_hw_cs_ctrl(hspi, 0)
@@ -224,7 +224,7 @@ static int hspi_probe(struct platform_device *pdev)
return -EINVAL;
}
- ctlr = spi_alloc_master(&pdev->dev, sizeof(*hspi));
+ ctlr = spi_alloc_host(&pdev->dev, sizeof(*hspi));
if (!ctlr)
return -ENOMEM;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 9e90b4f8b357..fb452bc78372 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
@@ -55,7 +54,7 @@ struct sh_msiof_spi_priv {
dma_addr_t rx_dma_addr;
bool native_cs_inited;
bool native_cs_high;
- bool slave_aborted;
+ bool target_aborted;
};
#define MAX_SS 3 /* Maximum number of native chip selects */
@@ -362,7 +361,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT;
tmp |= lsb_first << SIMDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
- if (spi_controller_is_slave(p->ctlr)) {
+ if (spi_controller_is_target(p->ctlr)) {
sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON);
} else {
sh_msiof_write(p, SITMDR1,
@@ -554,7 +553,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
spi_controller_get_devdata(spi->controller);
u32 clr, set, tmp;
- if (spi_get_csgpiod(spi, 0) || spi_controller_is_slave(p->ctlr))
+ if (spi_get_csgpiod(spi, 0) || spi_controller_is_target(p->ctlr))
return 0;
if (p->native_cs_inited &&
@@ -603,11 +602,11 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr,
static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
{
- bool slave = spi_controller_is_slave(p->ctlr);
+ bool target = spi_controller_is_target(p->ctlr);
int ret = 0;
/* setup clock and rx/tx signals */
- if (!slave)
+ if (!target)
ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE);
if (rx_buf && !ret)
ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE);
@@ -615,7 +614,7 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE);
/* start by setting frame bit */
- if (!ret && !slave)
+ if (!ret && !target)
ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE);
return ret;
@@ -623,27 +622,27 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
{
- bool slave = spi_controller_is_slave(p->ctlr);
+ bool target = spi_controller_is_target(p->ctlr);
int ret = 0;
/* shut down frame, rx/tx and clock signals */
- if (!slave)
+ if (!target)
ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0);
if (!ret)
ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0);
if (rx_buf && !ret)
ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0);
- if (!ret && !slave)
+ if (!ret && !target)
ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0);
return ret;
}
-static int sh_msiof_slave_abort(struct spi_controller *ctlr)
+static int sh_msiof_target_abort(struct spi_controller *ctlr)
{
struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
- p->slave_aborted = true;
+ p->target_aborted = true;
complete(&p->done);
complete(&p->done_txdma);
return 0;
@@ -652,9 +651,9 @@ static int sh_msiof_slave_abort(struct spi_controller *ctlr)
static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
struct completion *x)
{
- if (spi_controller_is_slave(p->ctlr)) {
+ if (spi_controller_is_target(p->ctlr)) {
if (wait_for_completion_interruptible(x) ||
- p->slave_aborted) {
+ p->target_aborted) {
dev_dbg(&p->pdev->dev, "interrupted\n");
return -EINTR;
}
@@ -700,7 +699,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
tx_fifo(p, tx_buf, words, fifo_shift);
reinit_completion(&p->done);
- p->slave_aborted = false;
+ p->target_aborted = false;
ret = sh_msiof_spi_start(p, rx_buf);
if (ret) {
@@ -797,7 +796,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
reinit_completion(&p->done);
if (tx)
reinit_completion(&p->done_txdma);
- p->slave_aborted = false;
+ p->target_aborted = false;
/* Now start DMA */
if (rx)
@@ -926,7 +925,7 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr,
sh_msiof_spi_reset_regs(p);
/* setup clocks (clock already enabled in chipselect()) */
- if (!spi_controller_is_slave(p->ctlr))
+ if (!spi_controller_is_target(p->ctlr))
sh_msiof_spi_set_clk_regs(p, t);
while (ctlr->dma_tx && len > 15) {
@@ -1102,11 +1101,11 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
if (!info)
return NULL;
- info->mode = of_property_read_bool(np, "spi-slave") ? MSIOF_SPI_SLAVE
- : MSIOF_SPI_MASTER;
+ info->mode = of_property_read_bool(np, "spi-slave") ? MSIOF_SPI_TARGET
+ : MSIOF_SPI_HOST;
/* Parse the MSIOF properties */
- if (info->mode == MSIOF_SPI_MASTER)
+ if (info->mode == MSIOF_SPI_HOST)
of_property_read_u32(np, "num-cs", &num_cs);
of_property_read_u32(np, "renesas,tx-fifo-size",
&info->tx_fifo_override);
@@ -1280,12 +1279,12 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
return -ENXIO;
}
- if (info->mode == MSIOF_SPI_SLAVE)
- ctlr = spi_alloc_slave(&pdev->dev,
- sizeof(struct sh_msiof_spi_priv));
+ if (info->mode == MSIOF_SPI_TARGET)
+ ctlr = spi_alloc_target(&pdev->dev,
+ sizeof(struct sh_msiof_spi_priv));
else
- ctlr = spi_alloc_master(&pdev->dev,
- sizeof(struct sh_msiof_spi_priv));
+ ctlr = spi_alloc_host(&pdev->dev,
+ sizeof(struct sh_msiof_spi_priv));
if (ctlr == NULL)
return -ENOMEM;
@@ -1348,7 +1347,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->setup = sh_msiof_spi_setup;
ctlr->prepare_message = sh_msiof_prepare_message;
- ctlr->slave_abort = sh_msiof_slave_abort;
+ ctlr->target_abort = sh_msiof_target_abort;
ctlr->bits_per_word_mask = chipdata->bits_per_word_mask;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = sh_msiof_transfer_one;
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 92ca3f2d61ba..148d615d2f38 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -56,17 +56,17 @@ static inline void setbits(struct sh_sci_spi *sp, int bits, int on)
static inline void setsck(struct spi_device *dev, int on)
{
- setbits(spi_master_get_devdata(dev->master), PIN_SCK, on);
+ setbits(spi_controller_get_devdata(dev->controller), PIN_SCK, on);
}
static inline void setmosi(struct spi_device *dev, int on)
{
- setbits(spi_master_get_devdata(dev->master), PIN_TXD, on);
+ setbits(spi_controller_get_devdata(dev->controller), PIN_TXD, on);
}
static inline u32 getmiso(struct spi_device *dev)
{
- struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
+ struct sh_sci_spi *sp = spi_controller_get_devdata(dev->controller);
return (ioread8(SCSPTR(sp)) & PIN_RXD) ? 1 : 0;
}
@@ -105,7 +105,7 @@ static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi,
static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
{
- struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
+ struct sh_sci_spi *sp = spi_controller_get_devdata(dev->controller);
if (sp->info->chip_select)
(sp->info->chip_select)(sp->info, spi_get_chipselect(dev, 0), value);
@@ -114,18 +114,18 @@ static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
static int sh_sci_spi_probe(struct platform_device *dev)
{
struct resource *r;
- struct spi_master *master;
+ struct spi_controller *host;
struct sh_sci_spi *sp;
int ret;
- master = spi_alloc_master(&dev->dev, sizeof(struct sh_sci_spi));
- if (master == NULL) {
- dev_err(&dev->dev, "failed to allocate spi master\n");
+ host = spi_alloc_host(&dev->dev, sizeof(struct sh_sci_spi));
+ if (host == NULL) {
+ dev_err(&dev->dev, "failed to allocate spi host\n");
ret = -ENOMEM;
goto err0;
}
- sp = spi_master_get_devdata(master);
+ sp = spi_controller_get_devdata(host);
platform_set_drvdata(dev, sp);
sp->info = dev_get_platdata(&dev->dev);
@@ -136,7 +136,7 @@ static int sh_sci_spi_probe(struct platform_device *dev)
}
/* setup spi bitbang adaptor */
- sp->bitbang.master = master;
+ sp->bitbang.master = host;
sp->bitbang.master->bus_num = sp->info->bus_num;
sp->bitbang.master->num_chipselect = sp->info->num_chipselect;
sp->bitbang.chipselect = sh_sci_spi_chipselect;
@@ -166,7 +166,7 @@ static int sh_sci_spi_probe(struct platform_device *dev)
setbits(sp, PIN_INIT, 0);
iounmap(sp->membase);
err1:
- spi_master_put(sp->bitbang.master);
+ spi_controller_put(sp->bitbang.master);
err0:
return ret;
}
@@ -178,7 +178,7 @@ static void sh_sci_spi_remove(struct platform_device *dev)
spi_bitbang_stop(&sp->bitbang);
setbits(sp, PIN_INIT, 0);
iounmap(sp->membase);
- spi_master_put(sp->bitbang.master);
+ spi_controller_put(sp->bitbang.master);
}
static struct platform_driver sh_sci_spi_drv = {
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index d358a2a9c3f5..4b873d9a7602 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -72,7 +72,7 @@
struct spi_sh_data {
void __iomem *addr;
int irq;
- struct spi_master *master;
+ struct spi_controller *host;
unsigned long cr1;
wait_queue_head_t wait;
int width;
@@ -327,7 +327,7 @@ static int spi_sh_transfer_one_message(struct spi_controller *ctlr,
static int spi_sh_setup(struct spi_device *spi)
{
- struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+ struct spi_sh_data *ss = spi_controller_get_devdata(spi->controller);
pr_debug("%s: enter\n", __func__);
@@ -346,7 +346,7 @@ static int spi_sh_setup(struct spi_device *spi)
static void spi_sh_cleanup(struct spi_device *spi)
{
- struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+ struct spi_sh_data *ss = spi_controller_get_devdata(spi->controller);
pr_debug("%s: enter\n", __func__);
@@ -381,14 +381,14 @@ static void spi_sh_remove(struct platform_device *pdev)
{
struct spi_sh_data *ss = platform_get_drvdata(pdev);
- spi_unregister_master(ss->master);
+ spi_unregister_controller(ss->host);
free_irq(ss->irq, ss);
}
static int spi_sh_probe(struct platform_device *pdev)
{
struct resource *res;
- struct spi_master *master;
+ struct spi_controller *host;
struct spi_sh_data *ss;
int ret, irq;
@@ -403,13 +403,13 @@ static int spi_sh_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- master = devm_spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
- if (master == NULL) {
- dev_err(&pdev->dev, "spi_alloc_master error.\n");
+ host = devm_spi_alloc_host(&pdev->dev, sizeof(struct spi_sh_data));
+ if (host == NULL) {
+ dev_err(&pdev->dev, "devm_spi_alloc_host error.\n");
return -ENOMEM;
}
- ss = spi_master_get_devdata(master);
+ ss = spi_controller_get_devdata(host);
platform_set_drvdata(pdev, ss);
switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
@@ -424,7 +424,7 @@ static int spi_sh_probe(struct platform_device *pdev)
return -ENODEV;
}
ss->irq = irq;
- ss->master = master;
+ ss->host = host;
ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (ss->addr == NULL) {
dev_err(&pdev->dev, "ioremap error.\n");
@@ -438,15 +438,15 @@ static int spi_sh_probe(struct platform_device *pdev)
return ret;
}
- master->num_chipselect = 2;
- master->bus_num = pdev->id;
- master->setup = spi_sh_setup;
- master->transfer_one_message = spi_sh_transfer_one_message;
- master->cleanup = spi_sh_cleanup;
+ host->num_chipselect = 2;
+ host->bus_num = pdev->id;
+ host->setup = spi_sh_setup;
+ host->transfer_one_message = spi_sh_transfer_one_message;
+ host->cleanup = spi_sh_cleanup;
- ret = spi_register_master(master);
+ ret = spi_register_controller(host);
if (ret < 0) {
- printk(KERN_ERR "spi_register_master error.\n");
+ printk(KERN_ERR "spi_register_controller error.\n");
goto error3;
}
diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c
index dae9e097c333..cfd17bbb2202 100644
--- a/drivers/spi/spi-sifive.c
+++ b/drivers/spi/spi-sifive.c
@@ -128,9 +128,9 @@ static void sifive_spi_init(struct sifive_spi *spi)
}
static int
-sifive_spi_prepare_message(struct spi_master *master, struct spi_message *msg)
+sifive_spi_prepare_message(struct spi_controller *host, struct spi_message *msg)
{
- struct sifive_spi *spi = spi_master_get_devdata(master);
+ struct sifive_spi *spi = spi_controller_get_devdata(host);
struct spi_device *device = msg->spi;
/* Update the chip select polarity */
@@ -152,7 +152,7 @@ sifive_spi_prepare_message(struct spi_master *master, struct spi_message *msg)
static void sifive_spi_set_cs(struct spi_device *device, bool is_high)
{
- struct sifive_spi *spi = spi_master_get_devdata(device->master);
+ struct sifive_spi *spi = spi_controller_get_devdata(device->controller);
/* Reverse polarity is handled by SCMR/CPOL. Not inverted CS. */
if (device->mode & SPI_CS_HIGH)
@@ -252,10 +252,10 @@ static void sifive_spi_rx(struct sifive_spi *spi, u8 *rx_ptr)
}
static int
-sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device,
+sifive_spi_transfer_one(struct spi_controller *host, struct spi_device *device,
struct spi_transfer *t)
{
- struct sifive_spi *spi = spi_master_get_devdata(master);
+ struct sifive_spi *spi = spi_controller_get_devdata(host);
int poll = sifive_spi_prep_transfer(spi, device, t);
const u8 *tx_ptr = t->tx_buf;
u8 *rx_ptr = t->rx_buf;
@@ -294,35 +294,35 @@ static int sifive_spi_probe(struct platform_device *pdev)
struct sifive_spi *spi;
int ret, irq, num_cs;
u32 cs_bits, max_bits_per_word;
- struct spi_master *master;
+ struct spi_controller *host;
- master = spi_alloc_master(&pdev->dev, sizeof(struct sifive_spi));
- if (!master) {
+ host = spi_alloc_host(&pdev->dev, sizeof(struct sifive_spi));
+ if (!host) {
dev_err(&pdev->dev, "out of memory\n");
return -ENOMEM;
}
- spi = spi_master_get_devdata(master);
+ spi = spi_controller_get_devdata(host);
init_completion(&spi->done);
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
spi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi->regs)) {
ret = PTR_ERR(spi->regs);
- goto put_master;
+ goto put_host;
}
spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk)) {
dev_err(&pdev->dev, "Unable to find bus clock\n");
ret = PTR_ERR(spi->clk);
- goto put_master;
+ goto put_host;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- goto put_master;
+ goto put_host;
}
/* Optional parameters */
@@ -339,14 +339,14 @@ static int sifive_spi_probe(struct platform_device *pdev)
if (!ret && max_bits_per_word < 8) {
dev_err(&pdev->dev, "Only 8bit SPI words supported by the driver\n");
ret = -EINVAL;
- goto put_master;
+ goto put_host;
}
/* Spin up the bus clock before hitting registers */
ret = clk_prepare_enable(spi->clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable bus clock\n");
- goto put_master;
+ goto put_host;
}
/* probe the number of CS lines */
@@ -362,30 +362,30 @@ static int sifive_spi_probe(struct platform_device *pdev)
num_cs = ilog2(cs_bits) + 1;
if (num_cs > SIFIVE_SPI_MAX_CS) {
- dev_err(&pdev->dev, "Invalid number of spi slaves\n");
+ dev_err(&pdev->dev, "Invalid number of spi targets\n");
ret = -EINVAL;
goto disable_clk;
}
- /* Define our master */
- master->dev.of_node = pdev->dev.of_node;
- master->bus_num = pdev->id;
- master->num_chipselect = num_cs;
- master->mode_bits = SPI_CPHA | SPI_CPOL
+ /* Define our host */
+ host->dev.of_node = pdev->dev.of_node;
+ host->bus_num = pdev->id;
+ host->num_chipselect = num_cs;
+ host->mode_bits = SPI_CPHA | SPI_CPOL
| SPI_CS_HIGH | SPI_LSB_FIRST
| SPI_TX_DUAL | SPI_TX_QUAD
| SPI_RX_DUAL | SPI_RX_QUAD;
/* TODO: add driver support for bits_per_word < 8
* we need to "left-align" the bits (unless SPI_LSB_FIRST)
*/
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->flags = SPI_CONTROLLER_MUST_TX | SPI_MASTER_GPIO_SS;
- master->prepare_message = sifive_spi_prepare_message;
- master->set_cs = sifive_spi_set_cs;
- master->transfer_one = sifive_spi_transfer_one;
+ host->bits_per_word_mask = SPI_BPW_MASK(8);
+ host->flags = SPI_CONTROLLER_MUST_TX | SPI_CONTROLLER_GPIO_SS;
+ host->prepare_message = sifive_spi_prepare_message;
+ host->set_cs = sifive_spi_set_cs;
+ host->transfer_one = sifive_spi_transfer_one;
pdev->dev.dma_mask = NULL;
- /* Configure the SPI master hardware */
+ /* Configure the SPI host hardware */
sifive_spi_init(spi);
/* Register for SPI Interrupt */
@@ -397,11 +397,11 @@ static int sifive_spi_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n",
- irq, master->num_chipselect);
+ irq, host->num_chipselect);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_master failed\n");
+ dev_err(&pdev->dev, "spi_register_host failed\n");
goto disable_clk;
}
@@ -409,16 +409,16 @@ static int sifive_spi_probe(struct platform_device *pdev)
disable_clk:
clk_disable_unprepare(spi->clk);
-put_master:
- spi_master_put(master);
+put_host:
+ spi_controller_put(host);
return ret;
}
static void sifive_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct sifive_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct sifive_spi *spi = spi_controller_get_devdata(host);
/* Disable all the interrupts just in case */
sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
@@ -427,11 +427,11 @@ static void sifive_spi_remove(struct platform_device *pdev)
static int sifive_spi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct sifive_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct sifive_spi *spi = spi_controller_get_devdata(host);
int ret;
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
if (ret)
return ret;
@@ -445,14 +445,14 @@ static int sifive_spi_suspend(struct device *dev)
static int sifive_spi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct sifive_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct sifive_spi *spi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(spi->clk);
if (ret)
return ret;
- ret = spi_master_resume(master);
+ ret = spi_controller_resume(host);
if (ret)
clk_disable_unprepare(spi->clk);
diff --git a/drivers/spi/spi-slave-mt27xx.c b/drivers/spi/spi-slave-mt27xx.c
index 4e4d426bfb43..6d6772974783 100644
--- a/drivers/spi/spi-slave-mt27xx.c
+++ b/drivers/spi/spi-slave-mt27xx.c
@@ -414,7 +414,7 @@ static int mtk_spi_slave_probe(struct platform_device *pdev)
mdata->dev_comp = of_id->data;
if (mdata->dev_comp->must_rx)
- ctlr->flags = SPI_MASTER_MUST_RX;
+ ctlr->flags = SPI_CONTROLLER_MUST_RX;
platform_set_drvdata(pdev, ctlr);
diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
index d64d3f75c726..a7c3b3923b4a 100644
--- a/drivers/spi/spi-sn-f-ospi.c
+++ b/drivers/spi/spi-sn-f-ospi.c
@@ -10,7 +10,7 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
@@ -501,7 +501,7 @@ out:
static int f_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct f_ospi *ospi = spi_controller_get_devdata(mem->spi->master);
+ struct f_ospi *ospi = spi_controller_get_devdata(mem->spi->controller);
int err = 0;
switch (op->data.dir) {
@@ -606,7 +606,7 @@ static int f_ospi_probe(struct platform_device *pdev)
u32 num_cs = OSPI_NUM_CS;
int ret;
- ctlr = spi_alloc_master(dev, sizeof(*ospi));
+ ctlr = spi_alloc_host(dev, sizeof(*ospi));
if (!ctlr)
return -ENOMEM;
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
index 22e39c4c12c4..bf01feedbf93 100644
--- a/drivers/spi/spi-sprd-adi.c
+++ b/drivers/spi/spi-sprd-adi.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/spi/spi.h>
@@ -580,7 +579,7 @@ static int sprd_adi_probe(struct platform_device *pdev)
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->bus_num = pdev->id;
ctlr->num_chipselect = num_chipselect;
- ctlr->flags = SPI_MASTER_HALF_DUPLEX;
+ ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
ctlr->bits_per_word_mask = 0;
ctlr->transfer_one = sprd_adi_transfer_one;
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index 518c7eaca84e..95377cf748c0 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 2b6804aa6901..def74ae9b5f6 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 6d10fa4ab783..b6d66caba4c0 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -238,6 +238,7 @@ struct stm32_spi;
* @baud_rate_div_min: minimum baud rate divisor
* @baud_rate_div_max: maximum baud rate divisor
* @has_fifo: boolean to know if fifo is used for driver
+ * @has_device_mode: is this compatible capable to switch on device mode
* @flags: compatible specific SPI controller flags used at registration time
*/
struct stm32_spi_cfg {
@@ -259,6 +260,7 @@ struct stm32_spi_cfg {
unsigned int baud_rate_div_min;
unsigned int baud_rate_div_max;
bool has_fifo;
+ bool has_device_mode;
u16 flags;
};
@@ -1001,9 +1003,9 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
if (spi->cfg->set_number_of_data) {
int ret;
- ret = spi_split_transfers_maxsize(ctrl, msg,
- STM32H7_SPI_TSIZE_MAX,
- GFP_KERNEL | GFP_DMA);
+ ret = spi_split_transfers_maxwords(ctrl, msg,
+ STM32H7_SPI_TSIZE_MAX,
+ GFP_KERNEL | GFP_DMA);
if (ret)
return ret;
}
@@ -1750,7 +1752,8 @@ static const struct stm32_spi_cfg stm32f4_spi_cfg = {
.baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN,
.baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX,
.has_fifo = false,
- .flags = SPI_MASTER_MUST_TX,
+ .has_device_mode = false,
+ .flags = SPI_CONTROLLER_MUST_TX,
};
static const struct stm32_spi_cfg stm32h7_spi_cfg = {
@@ -1774,6 +1777,7 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = {
.baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
.baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX,
.has_fifo = true,
+ .has_device_mode = true,
};
static const struct of_device_id stm32_spi_of_match[] = {
@@ -1798,8 +1802,13 @@ static int stm32_spi_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
bool device_mode;
int ret;
+ const struct stm32_spi_cfg *cfg = of_device_get_match_data(&pdev->dev);
device_mode = of_property_read_bool(np, "spi-slave");
+ if (!cfg->has_device_mode && device_mode) {
+ dev_err(&pdev->dev, "spi-slave not supported\n");
+ return -EPERM;
+ }
if (device_mode)
ctrl = devm_spi_alloc_slave(&pdev->dev, sizeof(struct stm32_spi));
@@ -1817,9 +1826,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
spi->device_mode = device_mode;
spin_lock_init(&spi->lock);
- spi->cfg = (const struct stm32_spi_cfg *)
- of_match_device(pdev->dev.driver->of_match_table,
- &pdev->dev)->data;
+ spi->cfg = cfg;
spi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(spi->base))
@@ -1829,8 +1836,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
spi->irq = platform_get_irq(pdev, 0);
if (spi->irq <= 0)
- return dev_err_probe(&pdev->dev, spi->irq,
- "failed to get irq\n");
+ return spi->irq;
ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
spi->cfg->irq_handler_event,
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 30d541612253..3f5b1556ece0 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -14,7 +14,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -83,6 +83,9 @@
#define SUN6I_XMIT_CNT_REG 0x34
#define SUN6I_BURST_CTL_CNT_REG 0x38
+#define SUN6I_BURST_CTL_CNT_STC_MASK GENMASK(23, 0)
+#define SUN6I_BURST_CTL_CNT_DRM BIT(28)
+#define SUN6I_BURST_CTL_CNT_QUAD_EN BIT(29)
#define SUN6I_TXDATA_REG 0x200
#define SUN6I_RXDATA_REG 0x300
@@ -90,6 +93,7 @@
struct sun6i_spi_cfg {
unsigned long fifo_depth;
bool has_clk_ctl;
+ u32 mode_bits;
};
struct sun6i_spi {
@@ -266,7 +270,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
unsigned int div, div_cdr1, div_cdr2, timeout;
unsigned int start, end, tx_time;
unsigned int trig_level;
- unsigned int tx_len = 0, rx_len = 0;
+ unsigned int tx_len = 0, rx_len = 0, nbits = 0;
bool use_dma;
int ret = 0;
u32 reg;
@@ -418,13 +422,29 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG, reg);
/* Setup the transfer now... */
- if (sspi->tx_buf)
+ if (sspi->tx_buf) {
tx_len = tfr->len;
+ nbits = tfr->tx_nbits;
+ } else if (tfr->rx_buf) {
+ nbits = tfr->rx_nbits;
+ }
+
+ switch (nbits) {
+ case SPI_NBITS_DUAL:
+ reg = SUN6I_BURST_CTL_CNT_DRM;
+ break;
+ case SPI_NBITS_QUAD:
+ reg = SUN6I_BURST_CTL_CNT_QUAD_EN;
+ break;
+ case SPI_NBITS_SINGLE:
+ default:
+ reg = FIELD_PREP(SUN6I_BURST_CTL_CNT_STC_MASK, tx_len);
+ }
/* Setup the counters */
+ sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG, reg);
sun6i_spi_write(sspi, SUN6I_BURST_CNT_REG, tfr->len);
sun6i_spi_write(sspi, SUN6I_XMIT_CNT_REG, tx_len);
- sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG, tx_len);
if (!use_dma) {
/* Fill the TX FIFO */
@@ -623,7 +643,8 @@ static int sun6i_spi_probe(struct platform_device *pdev)
master->set_cs = sun6i_spi_set_cs;
master->transfer_one = sun6i_spi_transfer_one;
master->num_chipselect = 4;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
+ sspi->cfg->mode_bits;
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
@@ -740,6 +761,7 @@ static const struct sun6i_spi_cfg sun8i_h3_spi_cfg = {
static const struct sun6i_spi_cfg sun50i_r329_spi_cfg = {
.fifo_depth = SUN8I_FIFO_DEPTH,
+ .mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | SPI_RX_QUAD | SPI_TX_QUAD,
};
static const struct of_device_id sun6i_spi_match[] = {
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 488df681eaef..460f232dad50 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -20,7 +20,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
@@ -723,27 +722,23 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
struct spi_delay *setup = &spi->cs_setup;
struct spi_delay *hold = &spi->cs_hold;
struct spi_delay *inactive = &spi->cs_inactive;
- u8 setup_dly, hold_dly, inactive_dly;
+ u8 setup_dly, hold_dly;
u32 setup_hold;
u32 spi_cs_timing;
u32 inactive_cycles;
u8 cs_state;
- if ((setup && setup->unit != SPI_DELAY_UNIT_SCK) ||
- (hold && hold->unit != SPI_DELAY_UNIT_SCK) ||
- (inactive && inactive->unit != SPI_DELAY_UNIT_SCK)) {
+ if (setup->unit != SPI_DELAY_UNIT_SCK ||
+ hold->unit != SPI_DELAY_UNIT_SCK ||
+ inactive->unit != SPI_DELAY_UNIT_SCK) {
dev_err(&spi->dev,
"Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n",
SPI_DELAY_UNIT_SCK);
return -EINVAL;
}
- setup_dly = setup ? setup->value : 0;
- hold_dly = hold ? hold->value : 0;
- inactive_dly = inactive ? inactive->value : 0;
-
- setup_dly = min_t(u8, setup_dly, MAX_SETUP_HOLD_CYCLES);
- hold_dly = min_t(u8, hold_dly, MAX_SETUP_HOLD_CYCLES);
+ setup_dly = min_t(u8, setup->value, MAX_SETUP_HOLD_CYCLES);
+ hold_dly = min_t(u8, hold->value, MAX_SETUP_HOLD_CYCLES);
if (setup_dly && hold_dly) {
setup_hold = SPI_SETUP_HOLD(setup_dly - 1, hold_dly - 1);
spi_cs_timing = SPI_CS_SETUP_HOLD(tspi->spi_cs_timing1,
@@ -755,7 +750,7 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi)
}
}
- inactive_cycles = min_t(u8, inactive_dly, MAX_INACTIVE_CYCLES);
+ inactive_cycles = min_t(u8, inactive->value, MAX_INACTIVE_CYCLES);
if (inactive_cycles)
inactive_cycles--;
cs_state = inactive_cycles ? 0 : 1;
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 4286310628a2..0c5507473f97 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -455,7 +455,11 @@ static int tegra_sflash_probe(struct platform_device *pdev)
goto exit_free_master;
}
- tsd->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto exit_free_master;
+ tsd->irq = ret;
+
ret = request_irq(tsd->irq, tegra_sflash_isr, 0,
dev_name(&pdev->dev), tsd);
if (ret < 0) {
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index c2915f7672cc..4d6db6182c5e 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -21,7 +21,6 @@
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
@@ -1034,18 +1033,12 @@ static int tegra_slink_probe(struct platform_device *pdev)
&master->max_speed_hz))
master->max_speed_hz = 25000000; /* 25MHz */
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "No IO memory resource\n");
- ret = -ENODEV;
- goto exit_free_master;
- }
- tspi->phys = r->start;
- tspi->base = devm_ioremap_resource(&pdev->dev, r);
+ tspi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(tspi->base)) {
ret = PTR_ERR(tspi->base);
goto exit_free_master;
}
+ tspi->phys = r->start;
/* disabled clock may cause interrupt storm upon request */
tspi->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index fbd14dd7be44..e9ad9b0b598b 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -18,7 +18,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
#include <linux/acpi.h>
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 5914335ff63d..4c81516b67db 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -22,7 +22,6 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -770,7 +769,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
- master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->setup = ti_qspi_setup;
master->auto_runtime_pm = true;
master->transfer_one_message = ti_qspi_start_transfer_one;
diff --git a/drivers/spi/spi-wpcm-fiu.c b/drivers/spi/spi-wpcm-fiu.c
index f15312fdcdaf..852ffe013d32 100644
--- a/drivers/spi/spi-wpcm-fiu.c
+++ b/drivers/spi/spi-wpcm-fiu.c
@@ -3,9 +3,8 @@
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi-mem.h>
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index ae6218bcd02a..a3d57554f5ba 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -218,7 +218,7 @@ static int spi_xcomm_probe(struct i2c_client *i2c)
master->num_chipselect = 16;
master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_3WIRE;
master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->flags = SPI_CONTROLLER_HALF_DUPLEX;
master->transfer_one_message = spi_xcomm_transfer_one;
master->dev.of_node = i2c->dev.of_node;
i2c_set_clientdata(i2c, master);
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index 24dc845b940e..dbd85d7a1526 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -87,7 +87,7 @@ static int xtfpga_spi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
- master->flags = SPI_MASTER_NO_RX;
+ master->flags = SPI_CONTROLLER_NO_RX;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
master->bus_num = pdev->dev.id;
master->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index ee1995b91287..0db69a2a72ff 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -679,8 +679,8 @@ static int zynq_qspi_probe(struct platform_device *pdev)
}
xqspi->irq = platform_get_irq(pdev, 0);
- if (xqspi->irq <= 0) {
- ret = -ENXIO;
+ if (xqspi->irq < 0) {
+ ret = xqspi->irq;
goto clk_dis_all;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index fb2ca9b90eab..94d9a33d9af5 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -14,9 +14,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -1295,8 +1293,8 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
zynqmp_qspi_init_hw(xqspi);
xqspi->irq = platform_get_irq(pdev, 0);
- if (xqspi->irq <= 0) {
- ret = -ENXIO;
+ if (xqspi->irq < 0) {
+ ret = xqspi->irq;
goto clk_dis_all;
}
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynqmp_qspi_irq,
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 9291b2a0e887..8d6304cb061e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -4,36 +4,36 @@
// Copyright (C) 2005 David Brownell
// Copyright (C) 2008 Secret Lab Technologies Ltd.
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/init.h>
+#include <linux/acpi.h>
#include <linux/cache.h>
-#include <linux/dma-mapping.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/gpio/consumer.h>
+#include <linux/highmem.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/clk/clk-conf.h>
-#include <linux/slab.h>
-#include <linux/mod_devicetable.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/spi-mem.h>
-#include <linux/gpio/consumer.h>
-#include <linux/pm_runtime.h>
+#include <linux/percpu.h>
+#include <linux/platform_data/x86/apple.h>
#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
-#include <linux/export.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/sched/rt.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
#include <uapi/linux/sched/types.h>
-#include <linux/delay.h>
-#include <linux/kthread.h>
-#include <linux/ioport.h>
-#include <linux/acpi.h>
-#include <linux/highmem.h>
-#include <linux/idr.h>
-#include <linux/platform_data/x86/apple.h>
-#include <linux/ptp_clock_kernel.h>
-#include <linux/percpu.h>
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
@@ -64,7 +64,7 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
if (len != -ENODEV)
return len;
- return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
+ return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
}
static DEVICE_ATTR_RO(modalias);
@@ -89,7 +89,7 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
- len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
+ len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
device_unlock(dev);
return len;
}
@@ -631,6 +631,16 @@ static int __spi_add_device(struct spi_device *spi)
struct device *dev = ctlr->dev.parent;
int status;
+ /* Chipselects are numbered 0..max; validate. */
+ if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
+ ctlr->num_chipselect);
+ return -EINVAL;
+ }
+
+ /* Set the bus ID string */
+ spi_dev_set_name(spi);
+
/*
* We need to make sure there's no other device with this
* chipselect **BEFORE** we call setup(), else we'll trash
@@ -682,26 +692,15 @@ static int __spi_add_device(struct spi_device *spi)
* @spi: spi_device to register
*
* Companion function to spi_alloc_device. Devices allocated with
- * spi_alloc_device can be added onto the spi bus with this function.
+ * spi_alloc_device can be added onto the SPI bus with this function.
*
* Return: 0 on success; negative errno on failure
*/
int spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
- struct device *dev = ctlr->dev.parent;
int status;
- /* Chipselects are numbered 0..max; validate. */
- if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
- dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
- ctlr->num_chipselect);
- return -EINVAL;
- }
-
- /* Set the bus ID string */
- spi_dev_set_name(spi);
-
mutex_lock(&ctlr->add_lock);
status = __spi_add_device(spi);
mutex_unlock(&ctlr->add_lock);
@@ -709,25 +708,6 @@ int spi_add_device(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_add_device);
-static int spi_add_device_locked(struct spi_device *spi)
-{
- struct spi_controller *ctlr = spi->controller;
- struct device *dev = ctlr->dev.parent;
-
- /* Chipselects are numbered 0..max; validate. */
- if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
- dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
- ctlr->num_chipselect);
- return -EINVAL;
- }
-
- /* Set the bus ID string */
- spi_dev_set_name(spi);
-
- WARN_ON(!mutex_is_locked(&ctlr->add_lock));
- return __spi_add_device(spi);
-}
-
/**
* spi_new_device - instantiate one new SPI device
* @ctlr: Controller to which device is connected
@@ -889,7 +869,7 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
* spi_res_alloc - allocate a spi resource that is life-cycle managed
* during the processing of a spi_message while using
* spi_transfer_one
- * @spi: the spi device for which we allocate memory
+ * @spi: the SPI device for which we allocate memory
* @release: the release code to execute for this resource
* @size: size to alloc and return
* @gfp: GFP allocation flags
@@ -915,7 +895,7 @@ static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
}
/**
- * spi_res_free - free an spi resource
+ * spi_res_free - free an SPI resource
* @res: pointer to the custom data of a resource
*/
static void spi_res_free(void *res)
@@ -931,7 +911,7 @@ static void spi_res_free(void *res)
/**
* spi_res_add - add a spi_res to the spi_message
- * @message: the spi message
+ * @message: the SPI message
* @res: the spi_resource
*/
static void spi_res_add(struct spi_message *message, void *res)
@@ -943,7 +923,7 @@ static void spi_res_add(struct spi_message *message, void *res)
}
/**
- * spi_res_release - release all spi resources for this message
+ * spi_res_release - release all SPI resources for this message
* @ctlr: the @spi_controller
* @message: the @spi_message
*/
@@ -1006,7 +986,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate);
}
/* Some SPI masters need both GPIO CS & slave_select */
- if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
+ if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
spi->controller->set_cs)
spi->controller->set_cs(spi, !enable);
} else if (spi->controller->set_cs) {
@@ -1424,7 +1404,7 @@ int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
return -EINVAL;
/*
* If there is unknown effective speed, approximate it
- * by underestimating with half of the requested hz.
+ * by underestimating with half of the requested Hz.
*/
hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
if (!hz)
@@ -1739,11 +1719,11 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
}
/**
- * __spi_pump_messages - function which processes spi message queue
+ * __spi_pump_messages - function which processes SPI message queue
* @ctlr: controller to process queue for
* @in_kthread: true if we are in the context of the message pump thread
*
- * This function checks if there is any spi message in the queue that
+ * This function checks if there is any SPI message in the queue that
* needs processing and if so call out to the driver to initialize hardware
* and transfer each message.
*
@@ -1758,7 +1738,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
unsigned long flags;
int ret;
- /* Take the IO mutex */
+ /* Take the I/O mutex */
mutex_lock(&ctlr->io_mutex);
/* Lock queue */
@@ -2169,8 +2149,8 @@ static int __spi_queued_transfer(struct spi_device *spi,
/**
* spi_queued_transfer - transfer function for queued transfers
- * @spi: spi device which is requesting transfer
- * @msg: spi message which is to handled is queued to driver queue
+ * @spi: SPI device which is requesting transfer
+ * @msg: SPI message which is to handled is queued to driver queue
*
* Return: zero on success, else a negative error code.
*/
@@ -2399,9 +2379,6 @@ static void of_register_spi_devices(struct spi_controller *ctlr)
struct spi_device *spi;
struct device_node *nc;
- if (!ctlr->dev.of_node)
- return;
-
for_each_available_child_of_node(ctlr->dev.of_node, nc) {
if (of_node_test_and_set_flag(nc, OF_POPULATED))
continue;
@@ -2432,11 +2409,12 @@ static void of_register_spi_devices(struct spi_controller *ctlr) { }
struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
u8 chip_select)
{
+ struct spi_controller *ctlr = spi->controller;
struct spi_device *ancillary;
int rc = 0;
/* Alloc an spi_device */
- ancillary = spi_alloc_device(spi->controller);
+ ancillary = spi_alloc_device(ctlr);
if (!ancillary) {
rc = -ENOMEM;
goto err_out;
@@ -2451,8 +2429,10 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
ancillary->max_speed_hz = spi->max_speed_hz;
ancillary->mode = spi->mode;
+ WARN_ON(!mutex_is_locked(&ctlr->add_lock));
+
/* Register the new device */
- rc = spi_add_device_locked(ancillary);
+ rc = __spi_add_device(ancillary);
if (rc) {
dev_err(&spi->dev, "failed to register ancillary device\n");
goto err_out;
@@ -2499,7 +2479,7 @@ static int acpi_spi_count(struct acpi_resource *ares, void *data)
* acpi_spi_count_resources - Count the number of SpiSerialBus resources
* @adev: ACPI device
*
- * Returns the number of SpiSerialBus resources in the ACPI-device's
+ * Return: the number of SpiSerialBus resources in the ACPI-device's
* resource-list; or a negative error code.
*/
int acpi_spi_count_resources(struct acpi_device *adev)
@@ -2633,10 +2613,10 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
* @adev: ACPI Device for the spi device
* @index: Index of the spi resource inside the ACPI Node
*
- * This should be used to allocate a new spi device from and ACPI Node.
- * The caller is responsible for calling spi_add_device to register the spi device.
+ * This should be used to allocate a new SPI device from and ACPI Device node.
+ * The caller is responsible for calling spi_add_device to register the SPI device.
*
- * If ctlr is set to NULL, the Controller for the spi device will be looked up
+ * If ctlr is set to NULL, the Controller for the SPI device will be looked up
* using the resource.
* If index is set to -1, index is not used.
* Note: If index is -1, ctlr must be set.
@@ -2817,8 +2797,7 @@ static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
struct device *child;
child = device_find_any_child(&ctlr->dev);
- return sprintf(buf, "%s\n",
- child ? to_spi_device(child)->modalias : NULL);
+ return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
}
static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
@@ -3056,7 +3035,7 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr)
ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
- if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
+ if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
dev_err(dev, "No unused native chip select available\n");
return -EINVAL;
@@ -3084,6 +3063,20 @@ static int spi_controller_check_ops(struct spi_controller *ctlr)
return 0;
}
+/* Allocate dynamic bus number using Linux idr */
+static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
+{
+ int id;
+
+ mutex_lock(&board_lock);
+ id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
+ mutex_unlock(&board_lock);
+ if (WARN(id < 0, "couldn't get idr"))
+ return id == -ENOSPC ? -EBUSY : id;
+ ctlr->bus_num = id;
+ return 0;
+}
+
/**
* spi_register_controller - register SPI master or slave controller
* @ctlr: initialized master, originally from spi_alloc_master() or
@@ -3111,8 +3104,8 @@ int spi_register_controller(struct spi_controller *ctlr)
{
struct device *dev = ctlr->dev.parent;
struct boardinfo *bi;
+ int first_dynamic;
int status;
- int id, first_dynamic;
if (!dev)
return -ENODEV;
@@ -3125,27 +3118,13 @@ int spi_register_controller(struct spi_controller *ctlr)
if (status)
return status;
+ if (ctlr->bus_num < 0)
+ ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
if (ctlr->bus_num >= 0) {
/* Devices with a fixed bus num must check-in with the num */
- mutex_lock(&board_lock);
- id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
- ctlr->bus_num + 1, GFP_KERNEL);
- mutex_unlock(&board_lock);
- if (WARN(id < 0, "couldn't get idr"))
- return id == -ENOSPC ? -EBUSY : id;
- ctlr->bus_num = id;
- } else if (ctlr->dev.of_node) {
- /* Allocate dynamic bus number using Linux idr */
- id = of_alias_get_id(ctlr->dev.of_node, "spi");
- if (id >= 0) {
- ctlr->bus_num = id;
- mutex_lock(&board_lock);
- id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
- ctlr->bus_num + 1, GFP_KERNEL);
- mutex_unlock(&board_lock);
- if (WARN(id < 0, "couldn't get idr"))
- return id == -ENOSPC ? -EBUSY : id;
- }
+ status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
+ if (status)
+ return status;
}
if (ctlr->bus_num < 0) {
first_dynamic = of_alias_get_highest_id("spi");
@@ -3154,13 +3133,9 @@ int spi_register_controller(struct spi_controller *ctlr)
else
first_dynamic++;
- mutex_lock(&board_lock);
- id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
- 0, GFP_KERNEL);
- mutex_unlock(&board_lock);
- if (WARN(id < 0, "couldn't get idr"))
- return id;
- ctlr->bus_num = id;
+ status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
+ if (status)
+ return status;
}
ctlr->bus_lock_flag = 0;
init_completion(&ctlr->xfer_completion);
@@ -3339,7 +3314,8 @@ void spi_unregister_controller(struct spi_controller *ctlr)
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
mutex_unlock(&ctlr->add_lock);
- /* Release the last reference on the controller if its driver
+ /*
+ * Release the last reference on the controller if its driver
* has not yet been converted to devm_spi_alloc_master/slave().
*/
if (!ctlr->devm_allocated)
@@ -3552,7 +3528,7 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
/* All the others need rx_buf/tx_buf also set */
for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
- /* Update rx_buf, tx_buf and dma */
+ /* Update rx_buf, tx_buf and DMA */
if (xfers[i].rx_buf)
xfers[i].rx_buf += offset;
if (xfers[i].rx_dma)
@@ -3622,7 +3598,7 @@ EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
/**
- * spi_split_transfers_maxwords - split spi transfers into multiple transfers
+ * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
* when an individual transfer exceeds a
* certain number of SPI words
* @ctlr: the @spi_controller for this transfer
@@ -3650,13 +3626,7 @@ int spi_split_transfers_maxwords(struct spi_controller *ctlr,
size_t maxsize;
int ret;
- if (xfer->bits_per_word <= 8)
- maxsize = maxwords;
- else if (xfer->bits_per_word <= 16)
- maxsize = 2 * maxwords;
- else
- maxsize = 4 * maxwords;
-
+ maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
if (xfer->len > maxsize) {
ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
maxsize, gfp);
@@ -3671,7 +3641,8 @@ EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
/*-------------------------------------------------------------------------*/
-/* Core methods for SPI controller protocol drivers. Some of the
+/*
+ * Core methods for SPI controller protocol drivers. Some of the
* other core methods are currently defined as inline functions.
*/
@@ -3731,7 +3702,7 @@ static int spi_set_cs_timing(struct spi_device *spi)
* changes those settings, and must be called from a context that can sleep.
* Except for SPI_CS_HIGH, which takes effect immediately, the changes take
* effect the next time the device is selected and data is transferred to
- * or from it. When this function returns, the spi device is deselected.
+ * or from it. When this function returns, the SPI device is deselected.
*
* Note that this call will fail if the protocol driver specifies an option
* that the underlying controller or its driver does not support. For
@@ -3906,11 +3877,9 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
*/
if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
spi_get_csgpiod(spi, 0))) {
- size_t maxsize;
+ size_t maxsize = BITS_TO_BYTES(spi->bits_per_word);
int ret;
- maxsize = (spi->bits_per_word + 7) / 8;
-
/* spi_split_transfers_maxsize() requires message->spi */
message->spi = spi;
@@ -4071,7 +4040,7 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
* spi_async - asynchronous SPI transfer
* @spi: device with which data will be exchanged
* @message: describes the data transfers, including completion callback
- * Context: any (irqs may be blocked, etc)
+ * Context: any (IRQs may be blocked, etc)
*
* This call may be used in_irq and other contexts which can't sleep,
* as well as from task contexts which can sleep.
@@ -4125,7 +4094,7 @@ EXPORT_SYMBOL_GPL(spi_async);
* spi_async_locked - version of spi_async with exclusive bus usage
* @spi: device with which data will be exchanged
* @message: describes the data transfers, including completion callback
- * Context: any (irqs may be blocked, etc)
+ * Context: any (IRQs may be blocked, etc)
*
* This call may be used in_irq and other contexts which can't sleep,
* as well as from task contexts which can sleep.
@@ -4388,9 +4357,9 @@ static u8 *buf;
/**
* spi_write_then_read - SPI synchronous write followed by read
* @spi: device with which data will be exchanged
- * @txbuf: data to be written (need not be dma-safe)
+ * @txbuf: data to be written (need not be DMA-safe)
* @n_tx: size of txbuf, in bytes
- * @rxbuf: buffer into which data will be read (need not be dma-safe)
+ * @rxbuf: buffer into which data will be read (need not be DMA-safe)
* @n_rx: size of rxbuf, in bytes
* Context: can sleep
*
@@ -4401,7 +4370,7 @@ static u8 *buf;
*
* Parameters to this routine are always copied using a small buffer.
* Performance-sensitive or bulk transfer code should instead use
- * spi_{async,sync}() calls with dma-safe buffers.
+ * spi_{async,sync}() calls with DMA-safe buffers.
*
* Return: zero on success, else a negative error code.
*/
@@ -4446,7 +4415,7 @@ int spi_write_then_read(struct spi_device *spi,
x[0].tx_buf = local_buf;
x[1].rx_buf = local_buf + n_tx;
- /* Do the i/o */
+ /* Do the I/O */
status = spi_sync(spi, &message);
if (status == 0)
memcpy(rxbuf, x[1].rx_buf, n_rx);
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index 756b218880a7..81ebbf6de0de 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -22,8 +22,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/thermal.h>
diff --git a/drivers/thermal/broadcom/bcm2711_thermal.c b/drivers/thermal/broadcom/bcm2711_thermal.c
index c243a76a3471..03ac2d02e9d4 100644
--- a/drivers/thermal/broadcom/bcm2711_thermal.c
+++ b/drivers/thermal/broadcom/bcm2711_thermal.c
@@ -15,8 +15,8 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/thermal.h>
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 72d1dbe60b8f..0b73abdaa792 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -17,8 +17,8 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
#include <linux/thermal.h>
#define AVS_TMON_STATUS 0x00
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 3f09ef8be41a..fb54ed4bf6f0 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -13,9 +13,9 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/of_device.h>
#include <linux/thermal.h>
#define HI6220_TEMP0_LAG (0x0)
diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
index d4b40869c7d7..e89b11b3f2b9 100644
--- a/drivers/thermal/imx8mm_thermal.c
+++ b/drivers/thermal/imx8mm_thermal.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/thermal.h>
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index 8d6b4ef23746..7224f8d21db9 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -8,7 +8,6 @@
#include <linux/firmware/imx/sci.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/thermal.h>
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index a94ec0a0c9dd..826358cbe810 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -11,7 +11,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/thermal.h>
#include <linux/nvmem-consumer.h>
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 5e1164226ada..ddd600820f68 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -15,6 +15,7 @@
#define INT3400_THERMAL_TABLE_CHANGED 0x83
#define INT3400_ODVP_CHANGED 0x88
#define INT3400_KEEP_ALIVE 0xA0
+#define INT3400_FAKE_TEMP (20 * 1000) /* faked temp sensor with 20C */
enum int3400_thermal_uuid {
INT3400_THERMAL_ACTIVE = 0,
@@ -453,6 +454,7 @@ static void int3400_notify(acpi_handle handle,
void *data)
{
struct int3400_thermal_priv *priv = data;
+ struct device *dev;
char *thermal_prop[5];
int therm_event;
@@ -475,12 +477,14 @@ static void int3400_notify(acpi_handle handle,
return;
}
- thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", priv->thermal->type);
- thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", priv->thermal->temperature);
+ dev = thermal_zone_device(priv->thermal);
+
+ thermal_prop[0] = kasprintf(GFP_KERNEL, "NAME=%s", thermal_zone_device_type(priv->thermal));
+ thermal_prop[1] = kasprintf(GFP_KERNEL, "TEMP=%d", INT3400_FAKE_TEMP);
thermal_prop[2] = kasprintf(GFP_KERNEL, "TRIP=");
thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event);
thermal_prop[4] = NULL;
- kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop);
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, thermal_prop);
kfree(thermal_prop[0]);
kfree(thermal_prop[1]);
kfree(thermal_prop[2]);
@@ -490,7 +494,7 @@ static void int3400_notify(acpi_handle handle,
static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
int *temp)
{
- *temp = 20 * 1000; /* faked temp sensor with 20C */
+ *temp = INT3400_FAKE_TEMP;
return 0;
}
@@ -499,32 +503,28 @@ static int int3400_thermal_change_mode(struct thermal_zone_device *thermal,
{
struct int3400_thermal_priv *priv = thermal_zone_device_priv(thermal);
int result = 0;
+ int enabled;
if (!priv)
return -EINVAL;
- if (mode != thermal->mode) {
- int enabled;
-
- enabled = mode == THERMAL_DEVICE_ENABLED;
+ enabled = mode == THERMAL_DEVICE_ENABLED;
- if (priv->os_uuid_mask) {
- if (!enabled) {
- priv->os_uuid_mask = 0;
- result = set_os_uuid_mask(priv, priv->os_uuid_mask);
- }
- goto eval_odvp;
+ if (priv->os_uuid_mask) {
+ if (!enabled) {
+ priv->os_uuid_mask = 0;
+ result = set_os_uuid_mask(priv, priv->os_uuid_mask);
}
-
- if (priv->current_uuid_index < 0 ||
- priv->current_uuid_index >= INT3400_THERMAL_MAXIMUM_UUID)
- return -EINVAL;
-
- result = int3400_thermal_run_osc(priv->adev->handle,
- int3400_thermal_uuids[priv->current_uuid_index],
- &enabled);
+ goto eval_odvp;
}
+ if (priv->current_uuid_index < 0 ||
+ priv->current_uuid_index >= INT3400_THERMAL_MAXIMUM_UUID)
+ return -EINVAL;
+
+ result = int3400_thermal_run_osc(priv->adev->handle,
+ int3400_thermal_uuids[priv->current_uuid_index],
+ &enabled);
eval_odvp:
evaluate_odvp(priv);
diff --git a/drivers/thermal/intel/int340x_thermal/int3401_thermal.c b/drivers/thermal/intel/int340x_thermal/int3401_thermal.c
index 217786fba185..c93a28eec4db 100644
--- a/drivers/thermal/intel/int340x_thermal/int3401_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3401_thermal.c
@@ -70,18 +70,7 @@ static struct platform_driver int3401_driver = {
},
};
-static int __init proc_thermal_init(void)
-{
- return platform_driver_register(&int3401_driver);
-}
-
-static void __exit proc_thermal_exit(void)
-{
- platform_driver_unregister(&int3401_driver);
-}
-
-module_init(proc_thermal_init);
-module_exit(proc_thermal_exit);
+module_platform_driver(int3401_driver);
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
index 09e032f822f3..16fd9df5f36d 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
@@ -59,7 +59,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
* ACPI/MSR. So we don't want to fail for auxiliary DTSs.
*/
proc_priv->soc_dts = intel_soc_dts_iosf_init(
- INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
+ INTEL_SOC_DTS_INTERRUPT_MSI, false, 0);
if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
ret = pci_enable_msi(pdev);
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c
index db97499f4f0a..d00def3c4703 100644
--- a/drivers/thermal/intel/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.c
@@ -37,44 +37,11 @@
/* DTS encoding for TJ MAX temperature */
#define SOC_DTS_TJMAX_ENCODING 0x7F
-/* Only 2 out of 4 is allowed for OSPM */
-#define SOC_MAX_DTS_TRIPS 2
-
/* Mask for two trips in status bits */
#define SOC_DTS_TRIP_MASK 0x03
-/* DTS0 and DTS 1 */
-#define SOC_MAX_DTS_SENSORS 2
-
-static int sys_get_trip_temp(struct thermal_zone_device *tzd, int trip,
- int *temp)
-{
- int status;
- u32 out;
- struct intel_soc_dts_sensor_entry *dts;
- struct intel_soc_dts_sensors *sensors;
-
- dts = thermal_zone_device_priv(tzd);
- sensors = dts->sensors;
- mutex_lock(&sensors->dts_update_lock);
- status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
- SOC_DTS_OFFSET_PTPS, &out);
- mutex_unlock(&sensors->dts_update_lock);
- if (status)
- return status;
-
- out = (out >> (trip * 8)) & SOC_DTS_TJMAX_ENCODING;
- if (!out)
- *temp = 0;
- else
- *temp = sensors->tj_max - out * 1000;
-
- return 0;
-}
-
-static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
- int thres_index, int temp,
- enum thermal_trip_type trip_type)
+static int update_trip_temp(struct intel_soc_dts_sensors *sensors,
+ int thres_index, int temp)
{
int status;
u32 temp_out;
@@ -85,7 +52,6 @@ static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
u32 store_te_out;
u32 te_out;
u32 int_enable_bit = SOC_DTS_TE_APICA_ENABLE;
- struct intel_soc_dts_sensors *sensors = dts->sensors;
if (sensors->intr_type == INTEL_SOC_DTS_INTERRUPT_MSI)
int_enable_bit |= SOC_DTS_TE_MSI_ENABLE;
@@ -148,8 +114,6 @@ static int update_trip_temp(struct intel_soc_dts_sensor_entry *dts,
if (status)
goto err_restore_te_out;
- dts->trip_types[thres_index] = trip_type;
-
return 0;
err_restore_te_out:
iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
@@ -165,6 +129,22 @@ err_restore_ptps:
return status;
}
+static int configure_trip(struct intel_soc_dts_sensor_entry *dts,
+ int thres_index, enum thermal_trip_type trip_type,
+ int temp)
+{
+ int ret;
+
+ ret = update_trip_temp(dts->sensors, thres_index, temp);
+ if (ret)
+ return ret;
+
+ dts->trips[thres_index].temperature = temp;
+ dts->trips[thres_index].type = trip_type;
+
+ return 0;
+}
+
static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
int temp)
{
@@ -176,23 +156,12 @@ static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
return -EINVAL;
mutex_lock(&sensors->dts_update_lock);
- status = update_trip_temp(dts, trip, temp,
- dts->trip_types[trip]);
+ status = update_trip_temp(sensors, trip, temp);
mutex_unlock(&sensors->dts_update_lock);
return status;
}
-static int sys_get_trip_type(struct thermal_zone_device *tzd,
- int trip, enum thermal_trip_type *type)
-{
- struct intel_soc_dts_sensor_entry *dts = thermal_zone_device_priv(tzd);
-
- *type = dts->trip_types[trip];
-
- return 0;
-}
-
static int sys_get_curr_temp(struct thermal_zone_device *tzd,
int *temp)
{
@@ -217,8 +186,6 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd,
static struct thermal_zone_device_ops tzone_ops = {
.get_temp = sys_get_curr_temp,
- .get_trip_temp = sys_get_trip_temp,
- .get_trip_type = sys_get_trip_type,
.set_trip_temp = sys_set_trip_temp,
};
@@ -245,22 +212,18 @@ static int soc_dts_enable(int id)
static void remove_dts_thermal_zone(struct intel_soc_dts_sensor_entry *dts)
{
- if (dts) {
- iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
- SOC_DTS_OFFSET_ENABLE, dts->store_status);
- thermal_zone_device_unregister(dts->tzone);
- }
+ iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ SOC_DTS_OFFSET_ENABLE, dts->store_status);
+ thermal_zone_device_unregister(dts->tzone);
}
static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
- bool notification_support, int trip_cnt,
- int read_only_trip_cnt)
+ bool critical_trip)
{
+ int writable_trip_cnt = SOC_MAX_DTS_TRIPS;
char name[10];
unsigned long trip;
- int trip_count = 0;
- int trip_mask = 0;
- int writable_trip_cnt = 0;
+ int trip_mask;
unsigned long ptps;
u32 store_ptps;
unsigned long i;
@@ -273,11 +236,11 @@ static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
goto err_ret;
dts->id = id;
- if (notification_support) {
- trip_count = min(SOC_MAX_DTS_TRIPS, trip_cnt);
- writable_trip_cnt = trip_count - read_only_trip_cnt;
- trip_mask = GENMASK(writable_trip_cnt - 1, 0);
- }
+
+ if (critical_trip)
+ writable_trip_cnt--;
+
+ trip_mask = GENMASK(writable_trip_cnt - 1, 0);
/* Check if the writable trip we provide is not used by BIOS */
ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
@@ -290,13 +253,12 @@ static int add_dts_thermal_zone(int id, struct intel_soc_dts_sensor_entry *dts,
trip_mask &= ~BIT(i / 8);
}
dts->trip_mask = trip_mask;
- dts->trip_count = trip_count;
snprintf(name, sizeof(name), "soc_dts%d", id);
- dts->tzone = thermal_zone_device_register(name,
- trip_count,
- trip_mask,
- dts, &tzone_ops,
- NULL, 0, 0);
+ dts->tzone = thermal_zone_device_register_with_trips(name, dts->trips,
+ SOC_MAX_DTS_TRIPS,
+ trip_mask,
+ dts, &tzone_ops,
+ NULL, 0, 0);
if (IS_ERR(dts->tzone)) {
ret = PTR_ERR(dts->tzone);
goto err_ret;
@@ -316,26 +278,6 @@ err_ret:
return ret;
}
-int intel_soc_dts_iosf_add_read_only_critical_trip(
- struct intel_soc_dts_sensors *sensors, int critical_offset)
-{
- int i, j;
-
- for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
- struct intel_soc_dts_sensor_entry *entry = &sensors->soc_dts[i];
- int temp = sensors->tj_max - critical_offset;
- unsigned long count = entry->trip_count;
- unsigned long mask = entry->trip_mask;
-
- j = find_first_zero_bit(&mask, count);
- if (j < count)
- return update_trip_temp(entry, j, temp, THERMAL_TRIP_CRITICAL);
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_add_read_only_critical_trip);
-
void intel_soc_dts_iosf_interrupt_handler(struct intel_soc_dts_sensors *sensors)
{
u32 sticky_out;
@@ -371,12 +313,17 @@ void intel_soc_dts_iosf_interrupt_handler(struct intel_soc_dts_sensors *sensors)
}
EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_interrupt_handler);
-struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
- enum intel_soc_dts_interrupt_type intr_type, int trip_count,
- int read_only_trip_count)
+static void dts_trips_reset(struct intel_soc_dts_sensors *sensors, int dts_index)
+{
+ configure_trip(&sensors->soc_dts[dts_index], 0, 0, 0);
+ configure_trip(&sensors->soc_dts[dts_index], 1, 0, 0);
+}
+
+struct intel_soc_dts_sensors *
+intel_soc_dts_iosf_init(enum intel_soc_dts_interrupt_type intr_type,
+ bool critical_trip, int crit_offset)
{
struct intel_soc_dts_sensors *sensors;
- bool notification;
int tj_max;
int ret;
int i;
@@ -384,9 +331,6 @@ struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
if (!iosf_mbi_available())
return ERR_PTR(-ENODEV);
- if (!trip_count || read_only_trip_count > trip_count)
- return ERR_PTR(-EINVAL);
-
tj_max = intel_tcc_get_tjmax(-1);
if (tj_max < 0)
return ERR_PTR(tj_max);
@@ -399,37 +343,46 @@ struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
mutex_init(&sensors->dts_update_lock);
sensors->intr_type = intr_type;
sensors->tj_max = tj_max * 1000;
- if (intr_type == INTEL_SOC_DTS_INTERRUPT_NONE)
- notification = false;
- else
- notification = true;
+
for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
+ enum thermal_trip_type trip_type;
+ int temp;
+
sensors->soc_dts[i].sensors = sensors;
- ret = add_dts_thermal_zone(i, &sensors->soc_dts[i],
- notification, trip_count,
- read_only_trip_count);
+
+ ret = configure_trip(&sensors->soc_dts[i], 0,
+ THERMAL_TRIP_PASSIVE, 0);
+ if (ret)
+ goto err_reset_trips;
+
+ if (critical_trip) {
+ trip_type = THERMAL_TRIP_CRITICAL;
+ temp = sensors->tj_max - crit_offset;
+ } else {
+ trip_type = THERMAL_TRIP_PASSIVE;
+ temp = 0;
+ }
+ ret = configure_trip(&sensors->soc_dts[i], 1, trip_type, temp);
if (ret)
- goto err_free;
+ goto err_reset_trips;
}
for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
- ret = update_trip_temp(&sensors->soc_dts[i], 0, 0,
- THERMAL_TRIP_PASSIVE);
- if (ret)
- goto err_remove_zone;
-
- ret = update_trip_temp(&sensors->soc_dts[i], 1, 0,
- THERMAL_TRIP_PASSIVE);
+ ret = add_dts_thermal_zone(i, &sensors->soc_dts[i], critical_trip);
if (ret)
goto err_remove_zone;
}
return sensors;
+
err_remove_zone:
for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
remove_dts_thermal_zone(&sensors->soc_dts[i]);
-err_free:
+err_reset_trips:
+ for (i = 0; i < SOC_MAX_DTS_SENSORS; i++)
+ dts_trips_reset(sensors, i);
+
kfree(sensors);
return ERR_PTR(ret);
}
@@ -440,9 +393,8 @@ void intel_soc_dts_iosf_exit(struct intel_soc_dts_sensors *sensors)
int i;
for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
- update_trip_temp(&sensors->soc_dts[i], 0, 0, 0);
- update_trip_temp(&sensors->soc_dts[i], 1, 0, 0);
remove_dts_thermal_zone(&sensors->soc_dts[i]);
+ dts_trips_reset(sensors, i);
}
kfree(sensors);
}
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.h b/drivers/thermal/intel/intel_soc_dts_iosf.h
index c54945748200..162841df0ebe 100644
--- a/drivers/thermal/intel/intel_soc_dts_iosf.h
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.h
@@ -12,6 +12,9 @@
/* DTS0 and DTS 1 */
#define SOC_MAX_DTS_SENSORS 2
+/* Only 2 out of 4 is allowed for OSPM */
+#define SOC_MAX_DTS_TRIPS 2
+
enum intel_soc_dts_interrupt_type {
INTEL_SOC_DTS_INTERRUPT_NONE,
INTEL_SOC_DTS_INTERRUPT_APIC,
@@ -26,8 +29,7 @@ struct intel_soc_dts_sensor_entry {
int id;
u32 store_status;
u32 trip_mask;
- u32 trip_count;
- enum thermal_trip_type trip_types[2];
+ struct thermal_trip trips[SOC_MAX_DTS_TRIPS];
struct thermal_zone_device *tzone;
struct intel_soc_dts_sensors *sensors;
};
@@ -40,12 +42,11 @@ struct intel_soc_dts_sensors {
struct intel_soc_dts_sensor_entry soc_dts[SOC_MAX_DTS_SENSORS];
};
-struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
- enum intel_soc_dts_interrupt_type intr_type, int trip_count,
- int read_only_trip_count);
+
+struct intel_soc_dts_sensors *
+intel_soc_dts_iosf_init(enum intel_soc_dts_interrupt_type intr_type,
+ bool critical_trip, int crit_offset);
void intel_soc_dts_iosf_exit(struct intel_soc_dts_sensors *sensors);
void intel_soc_dts_iosf_interrupt_handler(
struct intel_soc_dts_sensors *sensors);
-int intel_soc_dts_iosf_add_read_only_critical_trip(
- struct intel_soc_dts_sensors *sensors, int critical_offset);
#endif
diff --git a/drivers/thermal/intel/intel_soc_dts_thermal.c b/drivers/thermal/intel/intel_soc_dts_thermal.c
index 92e5c19d03f6..9c825c6e1f38 100644
--- a/drivers/thermal/intel/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel/intel_soc_dts_thermal.c
@@ -51,7 +51,8 @@ static int __init intel_soc_thermal_init(void)
return -ENODEV;
/* Create a zone with 2 trips with marked as read only */
- soc_dts = intel_soc_dts_iosf_init(INTEL_SOC_DTS_INTERRUPT_APIC, 2, 1);
+ soc_dts = intel_soc_dts_iosf_init(INTEL_SOC_DTS_INTERRUPT_APIC, true,
+ crit_offset);
if (IS_ERR(soc_dts)) {
err = PTR_ERR(soc_dts);
return err;
@@ -88,21 +89,7 @@ static int __init intel_soc_thermal_init(void)
}
}
- err = intel_soc_dts_iosf_add_read_only_critical_trip(soc_dts,
- crit_offset);
- if (err)
- goto error_trips;
-
return 0;
-
-error_trips:
- if (soc_dts_thres_irq) {
- free_irq(soc_dts_thres_irq, soc_dts);
- acpi_unregister_gsi(soc_dts_thres_gsi);
- }
- intel_soc_dts_iosf_exit(soc_dts);
-
- return err;
}
static void __exit intel_soc_thermal_exit(void)
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
index e95f799454fe..6c392147e6d1 100644
--- a/drivers/thermal/intel/intel_tcc_cooling.c
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -60,7 +60,7 @@ static const struct x86_cpu_id tcc_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, NULL),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL),
diff --git a/drivers/thermal/k3_bandgap.c b/drivers/thermal/k3_bandgap.c
index 1c3e590157ec..68f59b3735d3 100644
--- a/drivers/thermal/k3_bandgap.c
+++ b/drivers/thermal/k3_bandgap.c
@@ -11,7 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/thermal.h>
#include <linux/types.h>
diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
index 5be1f09eeb2c..a5a0fc9b9356 100644
--- a/drivers/thermal/k3_j72xx_bandgap.c
+++ b/drivers/thermal/k3_j72xx_bandgap.c
@@ -10,10 +10,10 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/types.h>
-#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/thermal.h>
#include <linux/of.h>
diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
index f59d36de20a0..c537aed71017 100644
--- a/drivers/thermal/mediatek/auxadc_thermal.c
+++ b/drivers/thermal/mediatek/auxadc_thermal.c
@@ -15,7 +15,6 @@
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/io.h>
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index b693fac2d677..054c965ae5e1 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -13,7 +13,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/thermal.h>
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index 5ddc39b2be32..756ac6842ff9 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/thermal.h>
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index 0e8ebfcd84c5..78c5cfe6a0c0 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -10,7 +10,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/thermal.h>
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 9029d01e029b..bd2fb8c2e968 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -11,7 +11,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/thermal.h>
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index b8571f7090aa..293f8dd9fe0a 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -11,7 +11,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reboot.h>
diff --git a/drivers/thermal/rzg2l_thermal.c b/drivers/thermal/rzg2l_thermal.c
index b56981f85306..6b2bf3426f52 100644
--- a/drivers/thermal/rzg2l_thermal.c
+++ b/drivers/thermal/rzg2l_thermal.c
@@ -9,8 +9,8 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/math.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 45e5c840d130..58f4d8f7a3fd 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -15,7 +15,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c
index 2fb90fdad76e..e27c4bdc8912 100644
--- a/drivers/thermal/sprd_thermal.c
+++ b/drivers/thermal/sprd_thermal.c
@@ -6,7 +6,7 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/thermal.h>
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index 903fcf1763f1..142a7e5d12f4 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -14,8 +14,6 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/thermal.h>
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index 195f3c5d0b38..cca16d632d9f 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -14,7 +14,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c
index c243e9d76d3c..d911fa60f100 100644
--- a/drivers/thermal/tegra/tegra30-tsensor.c
+++ b/drivers/thermal/tegra/tegra30-tsensor.c
@@ -18,7 +18,7 @@
#include <linux/iopoll.h>
#include <linux/math.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/reset.h>
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index cc2b5e81c620..a59700593d32 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -348,7 +348,8 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip_id)
struct thermal_trip trip;
/* Ignore disabled trip points */
- if (test_bit(trip_id, &tz->trips_disabled))
+ if (test_bit(trip_id, &tz->trips_disabled) ||
+ trip.temperature == THERMAL_TEMP_INVALID)
return;
__thermal_zone_get_trip(tz, trip_id, &trip);
@@ -496,6 +497,25 @@ void thermal_zone_device_update(struct thermal_zone_device *tz,
}
EXPORT_SYMBOL_GPL(thermal_zone_device_update);
+/**
+ * thermal_zone_device_exec - Run a callback under the zone lock.
+ * @tz: Thermal zone.
+ * @cb: Callback to run.
+ * @data: Data to pass to the callback.
+ */
+void thermal_zone_device_exec(struct thermal_zone_device *tz,
+ void (*cb)(struct thermal_zone_device *,
+ unsigned long),
+ unsigned long data)
+{
+ mutex_lock(&tz->lock);
+
+ cb(tz, data);
+
+ mutex_unlock(&tz->lock);
+}
+EXPORT_SYMBOL_GPL(thermal_zone_device_exec);
+
static void thermal_zone_device_check(struct work_struct *work)
{
struct thermal_zone_device *tz = container_of(work, struct
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 17c1bbed734d..04513f9fbfa1 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -54,10 +54,6 @@ int for_each_thermal_cooling_device(int (*cb)(struct thermal_cooling_device *,
int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
void *thermal_governor);
-int __for_each_thermal_trip(struct thermal_zone_device *,
- int (*cb)(struct thermal_trip *, void *),
- void *);
-
struct thermal_zone_device *thermal_zone_get_by_id(int id);
struct thermal_attr {
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index bc07ae1c284c..4ca905723429 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -10,8 +10,7 @@
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/thermal.h>
#include <linux/types.h>
@@ -292,13 +291,13 @@ static int __thermal_of_unbind(struct device_node *map_np, int index, int trip_i
ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells",
index, &cooling_spec);
- of_node_put(cooling_spec.np);
-
if (ret < 0) {
pr_err("Invalid cooling-device entry\n");
return ret;
}
+ of_node_put(cooling_spec.np);
+
if (cooling_spec.args_count < 2) {
pr_err("wrong reference to cooling device, missing limits\n");
return -EINVAL;
@@ -325,13 +324,13 @@ static int __thermal_of_bind(struct device_node *map_np, int index, int trip_id,
ret = of_parse_phandle_with_args(map_np, "cooling-device", "#cooling-cells",
index, &cooling_spec);
- of_node_put(cooling_spec.np);
-
if (ret < 0) {
pr_err("Invalid cooling-device entry\n");
return ret;
}
+ of_node_put(cooling_spec.np);
+
if (cooling_spec.args_count < 2) {
pr_err("wrong reference to cooling device, missing limits\n");
return -EINVAL;
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index 907f3a4d7bc8..53115cfdfd42 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -9,28 +9,26 @@
*/
#include "thermal_core.h"
-int __for_each_thermal_trip(struct thermal_zone_device *tz,
- int (*cb)(struct thermal_trip *, void *),
- void *data)
+int for_each_thermal_trip(struct thermal_zone_device *tz,
+ int (*cb)(struct thermal_trip *, void *),
+ void *data)
{
int i, ret;
- struct thermal_trip trip;
lockdep_assert_held(&tz->lock);
- for (i = 0; i < tz->num_trips; i++) {
-
- ret = __thermal_zone_get_trip(tz, i, &trip);
- if (ret)
- return ret;
+ if (!tz->trips)
+ return -ENODATA;
- ret = cb(&trip, data);
+ for (i = 0; i < tz->num_trips; i++) {
+ ret = cb(&tz->trips[i], data);
if (ret)
return ret;
}
return 0;
}
+EXPORT_SYMBOL_GPL(for_each_thermal_trip);
int thermal_zone_get_num_trips(struct thermal_zone_device *tz)
{
diff --git a/drivers/thermal/uniphier_thermal.c b/drivers/thermal/uniphier_thermal.c
index aef6119cc004..6f32ab61d174 100644
--- a/drivers/thermal/uniphier_thermal.c
+++ b/drivers/thermal/uniphier_thermal.c
@@ -12,7 +12,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/thermal.h>
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 6fb0e007af63..386674ead7f0 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -580,7 +580,6 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
struct utp_transfer_req_desc *utrd;
- u32 mask = hwq->max_entries - 1;
__le64 cmd_desc_base_addr;
bool ret = false;
u64 addr, match;
@@ -608,7 +607,10 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
ret = true;
goto out;
}
- sq_head_slot = (sq_head_slot + 1) & mask;
+
+ sq_head_slot++;
+ if (sq_head_slot == hwq->max_entries)
+ sq_head_slot = 0;
}
out:
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 8d6fd4c3324f..c1557d21b027 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -321,7 +321,7 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
REG_UFS_CFG1);
- if (host->hw_ver.major == 0x05)
+ if (host->hw_ver.major >= 0x05)
ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
/* make sure above configuration is applied before we return */
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 1a16a8bdea60..4f68f6ef3cc1 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -2642,21 +2642,21 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CONTROL\n", __func__);
ret = proc_control(ps, p);
if (ret >= 0)
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_BULK:
snoop(&dev->dev, "%s: BULK\n", __func__);
ret = proc_bulk(ps, p);
if (ret >= 0)
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_RESETEP:
snoop(&dev->dev, "%s: RESETEP\n", __func__);
ret = proc_resetep(ps, p);
if (ret >= 0)
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_RESET:
@@ -2668,7 +2668,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__);
ret = proc_clearhalt(ps, p);
if (ret >= 0)
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_GETDRIVER:
@@ -2695,7 +2695,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: SUBMITURB\n", __func__);
ret = proc_submiturb(ps, p);
if (ret >= 0)
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
break;
#ifdef CONFIG_COMPAT
@@ -2703,14 +2703,14 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CONTROL32\n", __func__);
ret = proc_control_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_BULK32:
snoop(&dev->dev, "%s: BULK32\n", __func__);
ret = proc_bulk_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_DISCSIGNAL32:
@@ -2722,7 +2722,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: SUBMITURB32\n", __func__);
ret = proc_submiturb_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
break;
case USBDEVFS_IOCTL32:
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index f41a385a5c42..6e9ef35a43a7 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1377,7 +1377,7 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
inode = new_inode(sb);
if (inode) {
- struct timespec64 ts = current_time(inode);
+ struct timespec64 ts = inode_set_ctime_current(inode);
inode->i_ino = get_next_ino();
inode->i_mode = perms->mode;
@@ -1385,7 +1385,6 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
inode->i_gid = perms->gid;
inode->i_atime = ts;
inode->i_mtime = ts;
- inode->i_ctime = ts;
inode->i_private = data;
if (fops)
inode->i_fop = fops;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 28249d0bf062..ce9e31f3d26b 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1969,8 +1969,7 @@ gadgetfs_make_inode (struct super_block *sb,
inode->i_mode = mode;
inode->i_uid = make_kuid(&init_user_ns, default_uid);
inode->i_gid = make_kgid(&init_user_ns, default_gid);
- inode->i_atime = inode->i_mtime = inode->i_ctime
- = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_private = data;
inode->i_fop = fops;
}
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index d5d7c402b651..d43153fec18e 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -269,6 +269,13 @@ config XEN_PRIVCMD
disaggregated Xen setups this driver might be needed for other
domains, too.
+config XEN_PRIVCMD_IRQFD
+ bool "Xen irqfd support"
+ depends on XEN_PRIVCMD && XEN_VIRTIO && EVENTFD
+ help
+ Using the irqfd mechanism a virtio backend running in a daemon can
+ speed up interrupt injection into a guest.
+
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index f13c3b76ad1e..35659bf70746 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1044,7 +1044,7 @@ EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
/**
* gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
- * @nr_pages; number of pages to free
+ * @nr_pages: number of pages to free
* @pages: the pages
*/
void gnttab_free_pages(int nr_pages, struct page **pages)
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index f447cd37cc4c..f00ad5f5f1d4 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -9,11 +9,16 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+#include <linux/eventfd.h>
+#include <linux/file.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
@@ -833,6 +838,263 @@ out:
return rc;
}
+#ifdef CONFIG_XEN_PRIVCMD_IRQFD
+/* Irqfd support */
+static struct workqueue_struct *irqfd_cleanup_wq;
+static DEFINE_MUTEX(irqfds_lock);
+static LIST_HEAD(irqfds_list);
+
+struct privcmd_kernel_irqfd {
+ struct xen_dm_op_buf xbufs;
+ domid_t dom;
+ bool error;
+ struct eventfd_ctx *eventfd;
+ struct work_struct shutdown;
+ wait_queue_entry_t wait;
+ struct list_head list;
+ poll_table pt;
+};
+
+static void irqfd_deactivate(struct privcmd_kernel_irqfd *kirqfd)
+{
+ lockdep_assert_held(&irqfds_lock);
+
+ list_del_init(&kirqfd->list);
+ queue_work(irqfd_cleanup_wq, &kirqfd->shutdown);
+}
+
+static void irqfd_shutdown(struct work_struct *work)
+{
+ struct privcmd_kernel_irqfd *kirqfd =
+ container_of(work, struct privcmd_kernel_irqfd, shutdown);
+ u64 cnt;
+
+ eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
+ eventfd_ctx_put(kirqfd->eventfd);
+ kfree(kirqfd);
+}
+
+static void irqfd_inject(struct privcmd_kernel_irqfd *kirqfd)
+{
+ u64 cnt;
+ long rc;
+
+ eventfd_ctx_do_read(kirqfd->eventfd, &cnt);
+
+ xen_preemptible_hcall_begin();
+ rc = HYPERVISOR_dm_op(kirqfd->dom, 1, &kirqfd->xbufs);
+ xen_preemptible_hcall_end();
+
+ /* Don't repeat the error message for consecutive failures */
+ if (rc && !kirqfd->error) {
+ pr_err("Failed to configure irq for guest domain: %d\n",
+ kirqfd->dom);
+ }
+
+ kirqfd->error = rc;
+}
+
+static int
+irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
+{
+ struct privcmd_kernel_irqfd *kirqfd =
+ container_of(wait, struct privcmd_kernel_irqfd, wait);
+ __poll_t flags = key_to_poll(key);
+
+ if (flags & EPOLLIN)
+ irqfd_inject(kirqfd);
+
+ if (flags & EPOLLHUP) {
+ mutex_lock(&irqfds_lock);
+ irqfd_deactivate(kirqfd);
+ mutex_unlock(&irqfds_lock);
+ }
+
+ return 0;
+}
+
+static void
+irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
+{
+ struct privcmd_kernel_irqfd *kirqfd =
+ container_of(pt, struct privcmd_kernel_irqfd, pt);
+
+ add_wait_queue_priority(wqh, &kirqfd->wait);
+}
+
+static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
+{
+ struct privcmd_kernel_irqfd *kirqfd, *tmp;
+ __poll_t events;
+ struct fd f;
+ void *dm_op;
+ int ret;
+
+ kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
+ if (!kirqfd)
+ return -ENOMEM;
+ dm_op = kirqfd + 1;
+
+ if (copy_from_user(dm_op, irqfd->dm_op, irqfd->size)) {
+ ret = -EFAULT;
+ goto error_kfree;
+ }
+
+ kirqfd->xbufs.size = irqfd->size;
+ set_xen_guest_handle(kirqfd->xbufs.h, dm_op);
+ kirqfd->dom = irqfd->dom;
+ INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
+
+ f = fdget(irqfd->fd);
+ if (!f.file) {
+ ret = -EBADF;
+ goto error_kfree;
+ }
+
+ kirqfd->eventfd = eventfd_ctx_fileget(f.file);
+ if (IS_ERR(kirqfd->eventfd)) {
+ ret = PTR_ERR(kirqfd->eventfd);
+ goto error_fd_put;
+ }
+
+ /*
+ * Install our own custom wake-up handling so we are notified via a
+ * callback whenever someone signals the underlying eventfd.
+ */
+ init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
+ init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
+
+ mutex_lock(&irqfds_lock);
+
+ list_for_each_entry(tmp, &irqfds_list, list) {
+ if (kirqfd->eventfd == tmp->eventfd) {
+ ret = -EBUSY;
+ mutex_unlock(&irqfds_lock);
+ goto error_eventfd;
+ }
+ }
+
+ list_add_tail(&kirqfd->list, &irqfds_list);
+ mutex_unlock(&irqfds_lock);
+
+ /*
+ * Check if there was an event already pending on the eventfd before we
+ * registered, and trigger it as if we didn't miss it.
+ */
+ events = vfs_poll(f.file, &kirqfd->pt);
+ if (events & EPOLLIN)
+ irqfd_inject(kirqfd);
+
+ /*
+ * Do not drop the file until the kirqfd is fully initialized, otherwise
+ * we might race against the EPOLLHUP.
+ */
+ fdput(f);
+ return 0;
+
+error_eventfd:
+ eventfd_ctx_put(kirqfd->eventfd);
+
+error_fd_put:
+ fdput(f);
+
+error_kfree:
+ kfree(kirqfd);
+ return ret;
+}
+
+static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
+{
+ struct privcmd_kernel_irqfd *kirqfd;
+ struct eventfd_ctx *eventfd;
+
+ eventfd = eventfd_ctx_fdget(irqfd->fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ mutex_lock(&irqfds_lock);
+
+ list_for_each_entry(kirqfd, &irqfds_list, list) {
+ if (kirqfd->eventfd == eventfd) {
+ irqfd_deactivate(kirqfd);
+ break;
+ }
+ }
+
+ mutex_unlock(&irqfds_lock);
+
+ eventfd_ctx_put(eventfd);
+
+ /*
+ * Block until we know all outstanding shutdown jobs have completed so
+ * that we guarantee there will not be any more interrupts once this
+ * deassign function returns.
+ */
+ flush_workqueue(irqfd_cleanup_wq);
+
+ return 0;
+}
+
+static long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
+{
+ struct privcmd_data *data = file->private_data;
+ struct privcmd_irqfd irqfd;
+
+ if (copy_from_user(&irqfd, udata, sizeof(irqfd)))
+ return -EFAULT;
+
+ /* No other flags should be set */
+ if (irqfd.flags & ~PRIVCMD_IRQFD_FLAG_DEASSIGN)
+ return -EINVAL;
+
+ /* If restriction is in place, check the domid matches */
+ if (data->domid != DOMID_INVALID && data->domid != irqfd.dom)
+ return -EPERM;
+
+ if (irqfd.flags & PRIVCMD_IRQFD_FLAG_DEASSIGN)
+ return privcmd_irqfd_deassign(&irqfd);
+
+ return privcmd_irqfd_assign(&irqfd);
+}
+
+static int privcmd_irqfd_init(void)
+{
+ irqfd_cleanup_wq = alloc_workqueue("privcmd-irqfd-cleanup", 0, 0);
+ if (!irqfd_cleanup_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void privcmd_irqfd_exit(void)
+{
+ struct privcmd_kernel_irqfd *kirqfd, *tmp;
+
+ mutex_lock(&irqfds_lock);
+
+ list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
+ irqfd_deactivate(kirqfd);
+
+ mutex_unlock(&irqfds_lock);
+
+ destroy_workqueue(irqfd_cleanup_wq);
+}
+#else
+static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int privcmd_irqfd_init(void)
+{
+ return 0;
+}
+
+static inline void privcmd_irqfd_exit(void)
+{
+}
+#endif /* CONFIG_XEN_PRIVCMD_IRQFD */
+
static long privcmd_ioctl(struct file *file,
unsigned int cmd, unsigned long data)
{
@@ -868,6 +1130,10 @@ static long privcmd_ioctl(struct file *file,
ret = privcmd_ioctl_mmap_resource(file, udata);
break;
+ case IOCTL_PRIVCMD_IRQFD:
+ ret = privcmd_ioctl_irqfd(file, udata);
+ break;
+
default:
break;
}
@@ -992,15 +1258,27 @@ static int __init privcmd_init(void)
err = misc_register(&xen_privcmdbuf_dev);
if (err != 0) {
pr_err("Could not register Xen hypercall-buf device\n");
- misc_deregister(&privcmd_dev);
- return err;
+ goto err_privcmdbuf;
+ }
+
+ err = privcmd_irqfd_init();
+ if (err != 0) {
+ pr_err("irqfd init failed\n");
+ goto err_irqfd;
}
return 0;
+
+err_irqfd:
+ misc_deregister(&xen_privcmdbuf_dev);
+err_privcmdbuf:
+ misc_deregister(&privcmd_dev);
+ return err;
}
static void __exit privcmd_exit(void)
{
+ privcmd_irqfd_exit();
misc_deregister(&privcmd_dev);
misc_deregister(&xen_privcmdbuf_dev);
}
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 9cb61db67efd..296703939846 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -473,11 +473,8 @@ static int xen_upload_processor_pm_data(void)
if (!_pr)
continue;
- if (!pr_backup) {
- pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
- if (pr_backup)
- memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
- }
+ if (!pr_backup)
+ pr_backup = kmemdup(_pr, sizeof(*_pr), GFP_KERNEL);
(void)upload_pm_data(_pr);
}
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.h b/drivers/xen/xen-pciback/conf_space_quirks.h
index d873abe35bf6..fc1557dfef49 100644
--- a/drivers/xen/xen-pciback/conf_space_quirks.h
+++ b/drivers/xen/xen-pciback/conf_space_quirks.h
@@ -21,8 +21,6 @@ struct xen_pcibk_config_quirk {
int xen_pcibk_config_quirks_add_field(struct pci_dev *dev, struct config_field
*field);
-int xen_pcibk_config_quirks_remove_field(struct pci_dev *dev, int reg);
-
int xen_pcibk_config_quirks_init(struct pci_dev *dev);
void xen_pcibk_config_field_free(struct config_field *field);
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 9a64196e831d..f9599ed2f2e2 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -201,6 +201,3 @@ static inline void xen_pcibk_lateeoi(struct xen_pcibk_device *pdev,
int xen_pcibk_xenbus_register(void);
void xen_pcibk_xenbus_unregister(void);
#endif
-
-/* Handles shared IRQs that can to device domain and control domain. */
-void xen_pcibk_irq_handler(struct pci_dev *dev, int reset);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 3f3836cb7279..fcb335bb7b18 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -429,7 +429,7 @@ static void xenbus_check_frontend(char *class, char *dev)
printk(KERN_DEBUG "XENBUS: frontend %s %s\n",
frontend, xenbus_strstate(fe_state));
backend = xenbus_read(XBT_NIL, frontend, "backend", NULL);
- if (!backend || IS_ERR(backend))
+ if (IS_ERR_OR_NULL(backend))
goto out;
err = xenbus_scanf(XBT_NIL, backend, "state", "%i", &be_state);
if (err == 1)
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 12e02eb01f59..028a182bcc9e 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -840,8 +840,8 @@ void xs_suspend(void)
{
xs_suspend_enter();
- down_write(&xs_watch_rwsem);
mutex_lock(&xs_response_mutex);
+ down_write(&xs_watch_rwsem);
}
void xs_resume(void)
@@ -866,8 +866,8 @@ void xs_resume(void)
void xs_suspend_cancel(void)
{
- mutex_unlock(&xs_response_mutex);
up_write(&xs_watch_rwsem);
+ mutex_unlock(&xs_response_mutex);
xs_suspend_exit();
}
diff --git a/drivers/zorro/names.c b/drivers/zorro/names.c
index fa3c83dbe843..077114ccc840 100644
--- a/drivers/zorro/names.c
+++ b/drivers/zorro/names.c
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/zorro.h>
+#include "zorro.h"
struct zorro_prod_info {
__u16 prod;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 950cf61f118b..0d28ecf668d0 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -260,7 +260,7 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
inode->i_blocks = 0;
inode->i_rdev = rdev;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_mapping->a_ops = &v9fs_addr_operations;
inode->i_private = NULL;
@@ -1011,7 +1011,7 @@ v9fs_vfs_getattr(struct mnt_idmap *idmap, const struct path *path,
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
return 0;
} else if (v9ses->cache & CACHE_WRITEBACK) {
if (S_ISREG(inode->i_mode)) {
@@ -1032,7 +1032,7 @@ v9fs_vfs_getattr(struct mnt_idmap *idmap, const struct path *path,
return PTR_ERR(st);
v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
- generic_fillattr(&nop_mnt_idmap, d_inode(dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(dentry), stat);
p9stat_free(st);
kfree(st);
@@ -1152,7 +1152,7 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
inode->i_atime.tv_sec = stat->atime;
inode->i_mtime.tv_sec = stat->mtime;
- inode->i_ctime.tv_sec = stat->mtime;
+ inode_set_ctime(inode, stat->mtime, 0);
inode->i_uid = v9ses->dfltuid;
inode->i_gid = v9ses->dfltgid;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 14510872ecc3..1312f68965ac 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -450,7 +450,7 @@ v9fs_vfs_getattr_dotl(struct mnt_idmap *idmap,
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
return 0;
} else if (v9ses->cache) {
if (S_ISREG(inode->i_mode)) {
@@ -475,7 +475,7 @@ v9fs_vfs_getattr_dotl(struct mnt_idmap *idmap,
return PTR_ERR(st);
v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
- generic_fillattr(&nop_mnt_idmap, d_inode(dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(dentry), stat);
/* Change block size to what the server returned */
stat->blksize = st->st_blksize;
@@ -645,8 +645,8 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
inode->i_atime.tv_nsec = stat->st_atime_nsec;
inode->i_mtime.tv_sec = stat->st_mtime_sec;
inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
- inode->i_ctime.tv_sec = stat->st_ctime_sec;
- inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+ inode_set_ctime(inode, stat->st_ctime_sec,
+ stat->st_ctime_nsec);
inode->i_uid = stat->st_uid;
inode->i_gid = stat->st_gid;
set_nlink(inode, stat->st_nlink);
@@ -668,8 +668,8 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
}
if (stat->st_result_mask & P9_STATS_CTIME) {
- inode->i_ctime.tv_sec = stat->st_ctime_sec;
- inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+ inode_set_ctime(inode, stat->st_ctime_sec,
+ stat->st_ctime_nsec);
}
if (stat->st_result_mask & P9_STATS_UID)
inode->i_uid = stat->st_uid;
diff --git a/fs/Kconfig b/fs/Kconfig
index 18d034ec7953..7da21f563192 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -205,8 +205,8 @@ config TMPFS_XATTR
Extended attributes are name:value pairs associated with inodes by
the kernel or by users (see the attr(5) manual page for details).
- Currently this enables support for the trusted.* and
- security.* namespaces.
+ This enables support for the trusted.*, security.* and user.*
+ namespaces.
You need this for POSIX ACL support on tmpfs.
@@ -233,6 +233,18 @@ config TMPFS_INODE64
If unsure, say N.
+config TMPFS_QUOTA
+ bool "Tmpfs quota support"
+ depends on TMPFS
+ select QUOTA
+ help
+ Quota support allows to set per user and group limits for tmpfs
+ usage. Say Y to enable quota support. Once enabled you can control
+ user and group quota enforcement with quota, usrquota and grpquota
+ mount options.
+
+ If unsure, say N.
+
config ARCH_SUPPORTS_HUGETLBFS
def_bool n
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index c3ac613d0975..20963002578a 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -270,7 +270,7 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
inode->i_mode = adfs_atts2mode(sb, inode);
adfs_adfs2unix_time(&inode->i_mtime, inode);
inode->i_atime = inode->i_mtime;
- inode->i_ctime = inode->i_mtime;
+ inode_set_ctime_to_ts(inode, inode->i_mtime);
if (S_ISDIR(inode->i_mode)) {
inode->i_op = &adfs_dir_inode_operations;
@@ -331,7 +331,7 @@ adfs_notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
if (ia_valid & ATTR_ATIME)
inode->i_atime = attr->ia_atime;
if (ia_valid & ATTR_CTIME)
- inode->i_ctime = attr->ia_ctime;
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
ADFS_I(inode)->attr = adfs_mode2atts(sb, inode, attr->ia_mode);
inode->i_mode = adfs_atts2mode(sb, inode);
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 29f11e10a7c7..7ba93efc1143 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -60,7 +60,7 @@ affs_insert_hash(struct inode *dir, struct buffer_head *bh)
mark_buffer_dirty_inode(dir_bh, dir);
affs_brelse(dir_bh);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
inode_inc_iversion(dir);
mark_inode_dirty(dir);
@@ -114,7 +114,7 @@ affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh)
affs_brelse(bh);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
inode_inc_iversion(dir);
mark_inode_dirty(dir);
@@ -315,7 +315,7 @@ affs_remove_header(struct dentry *dentry)
else
clear_nlink(inode);
affs_unlock_link(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
done:
diff --git a/fs/affs/file.c b/fs/affs/file.c
index e43f2f007ac1..472e2bdd5349 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -15,6 +15,7 @@
#include <linux/uio.h>
#include <linux/blkdev.h>
+#include <linux/mpage.h>
#include "affs.h"
static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
@@ -370,9 +371,10 @@ err_alloc:
return -ENOSPC;
}
-static int affs_writepage(struct page *page, struct writeback_control *wbc)
+static int affs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
{
- return block_write_full_page(page, affs_get_block, wbc);
+ return mpage_writepages(mapping, wbc, affs_get_block);
}
static int affs_read_folio(struct file *file, struct folio *folio)
@@ -456,10 +458,11 @@ const struct address_space_operations affs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = affs_read_folio,
- .writepage = affs_writepage,
+ .writepages = affs_writepages,
.write_begin = affs_write_begin,
.write_end = affs_write_end,
.direct_IO = affs_direct_IO,
+ .migrate_folio = buffer_migrate_folio,
.bmap = _affs_bmap
};
@@ -835,9 +838,10 @@ const struct address_space_operations affs_aops_ofs = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = affs_read_folio_ofs,
- //.writepage = affs_writepage_ofs,
+ //.writepages = affs_writepages_ofs,
.write_begin = affs_write_begin_ofs,
- .write_end = affs_write_end_ofs
+ .write_end = affs_write_end_ofs,
+ .migrate_folio = filemap_migrate_folio,
};
/* Free any preallocated blocks. */
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 27f77a52c5c8..060746c63151 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -149,13 +149,13 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
break;
}
- inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec
- = (be32_to_cpu(tail->change.days) * 86400LL +
- be32_to_cpu(tail->change.mins) * 60 +
- be32_to_cpu(tail->change.ticks) / 50 +
- AFFS_EPOCH_DELTA) +
- sys_tz.tz_minuteswest * 60;
- inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_atime.tv_nsec = 0;
+ inode->i_mtime.tv_sec = inode->i_atime.tv_sec =
+ inode_set_ctime(inode,
+ (be32_to_cpu(tail->change.days) * 86400LL +
+ be32_to_cpu(tail->change.mins) * 60 +
+ be32_to_cpu(tail->change.ticks) / 50 + AFFS_EPOCH_DELTA)
+ + sys_tz.tz_minuteswest * 60, 0).tv_sec;
+ inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = 0;
affs_brelse(bh);
unlock_new_inode(inode);
return inode;
@@ -314,7 +314,7 @@ affs_new_inode(struct inode *dir)
inode->i_gid = current_fsgid();
inode->i_ino = block;
set_nlink(inode, 1);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
atomic_set(&AFFS_I(inode)->i_opencnt, 0);
AFFS_I(inode)->i_blkcnt = 0;
AFFS_I(inode)->i_lc = NULL;
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index d12ccfd2a83d..2fe4a5832fcf 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -43,7 +43,7 @@ affs_get_toupper(struct super_block *sb)
* Note: the dentry argument is the parent dentry.
*/
static inline int
-__affs_hash_dentry(const struct dentry *dentry, struct qstr *qstr, toupper_t toupper, bool notruncate)
+__affs_hash_dentry(const struct dentry *dentry, struct qstr *qstr, toupper_t fn, bool notruncate)
{
const u8 *name = qstr->name;
unsigned long hash;
@@ -57,7 +57,7 @@ __affs_hash_dentry(const struct dentry *dentry, struct qstr *qstr, toupper_t tou
hash = init_name_hash(dentry);
len = min(qstr->len, AFFSNAMEMAX);
for (; len > 0; name++, len--)
- hash = partial_name_hash(toupper(*name), hash);
+ hash = partial_name_hash(fn(*name), hash);
qstr->hash = end_name_hash(hash);
return 0;
@@ -80,7 +80,7 @@ affs_intl_hash_dentry(const struct dentry *dentry, struct qstr *qstr)
}
static inline int __affs_compare_dentry(unsigned int len,
- const char *str, const struct qstr *name, toupper_t toupper,
+ const char *str, const struct qstr *name, toupper_t fn,
bool notruncate)
{
const u8 *aname = str;
@@ -106,7 +106,7 @@ static inline int __affs_compare_dentry(unsigned int len,
return 1;
for (; len > 0; len--)
- if (toupper(*aname++) != toupper(*bname++))
+ if (fn(*aname++) != fn(*bname++))
return 1;
return 0;
@@ -135,7 +135,7 @@ affs_intl_compare_dentry(const struct dentry *dentry,
*/
static inline int
-affs_match(struct dentry *dentry, const u8 *name2, toupper_t toupper)
+affs_match(struct dentry *dentry, const u8 *name2, toupper_t fn)
{
const u8 *name = dentry->d_name.name;
int len = dentry->d_name.len;
@@ -148,7 +148,7 @@ affs_match(struct dentry *dentry, const u8 *name2, toupper_t toupper)
return 0;
for (name2++; len > 0; len--)
- if (toupper(*name++) != toupper(*name2++))
+ if (fn(*name++) != fn(*name2++))
return 0;
return 1;
}
@@ -156,12 +156,12 @@ affs_match(struct dentry *dentry, const u8 *name2, toupper_t toupper)
int
affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len)
{
- toupper_t toupper = affs_get_toupper(sb);
+ toupper_t fn = affs_get_toupper(sb);
u32 hash;
hash = len = min(len, AFFSNAMEMAX);
for (; len > 0; len--)
- hash = (hash * 13 + toupper(*name++)) & 0x7ff;
+ hash = (hash * 13 + fn(*name++)) & 0x7ff;
return hash % AFFS_SB(sb)->s_hashsize;
}
@@ -171,7 +171,7 @@ affs_find_entry(struct inode *dir, struct dentry *dentry)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
- toupper_t toupper = affs_get_toupper(sb);
+ toupper_t fn = affs_get_toupper(sb);
u32 key;
pr_debug("%s(\"%pd\")\n", __func__, dentry);
@@ -189,7 +189,7 @@ affs_find_entry(struct inode *dir, struct dentry *dentry)
bh = affs_bread(sb, key);
if (!bh)
return ERR_PTR(-EIO);
- if (affs_match(dentry, AFFS_TAIL(sb, bh)->name, toupper))
+ if (affs_match(dentry, AFFS_TAIL(sb, bh)->name, fn))
return bh;
key = be32_to_cpu(AFFS_TAIL(sb, bh)->hash_chain);
}
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index d7d9402ff718..95bcbd7654d1 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -88,7 +88,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
set_nlink(inode, 2);
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
- inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_blocks = 0;
inode->i_generation = 0;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 866bab860a88..1c794a1896aa 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -90,7 +90,7 @@ static int afs_inode_init_from_status(struct afs_operation *op,
vnode->status = *status;
t = status->mtime_client;
- inode->i_ctime = t;
+ inode_set_ctime_to_ts(inode, t);
inode->i_mtime = t;
inode->i_atime = t;
inode->i_flags |= S_NOATIME;
@@ -206,7 +206,7 @@ static void afs_apply_status(struct afs_operation *op,
t = status->mtime_client;
inode->i_mtime = t;
if (vp->update_ctime)
- inode->i_ctime = op->ctime;
+ inode_set_ctime_to_ts(inode, op->ctime);
if (vnode->status.data_version != status->data_version)
data_changed = true;
@@ -252,7 +252,7 @@ static void afs_apply_status(struct afs_operation *op,
vnode->netfs.remote_i_size = status->size;
if (change_size) {
afs_set_i_size(vnode, status->size);
- inode->i_ctime = t;
+ inode_set_ctime_to_ts(inode, t);
inode->i_atime = t;
}
}
@@ -773,7 +773,7 @@ int afs_getattr(struct mnt_idmap *idmap, const struct path *path,
do {
read_seqbegin_or_lock(&vnode->cb_lock, &seq);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (test_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags) &&
stat->nlink > 0)
stat->nlink -= 1;
diff --git a/fs/aio.c b/fs/aio.c
index 77e33619de40..b3174da80ff6 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1447,13 +1447,8 @@ static void aio_complete_rw(struct kiocb *kiocb, long res)
if (kiocb->ki_flags & IOCB_WRITE) {
struct inode *inode = file_inode(kiocb->ki_filp);
- /*
- * Tell lockdep we inherited freeze protection from submission
- * thread.
- */
if (S_ISREG(inode->i_mode))
- __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
- file_end_write(kiocb->ki_filp);
+ kiocb_end_write(kiocb);
}
iocb->ki_res.res = res;
@@ -1581,17 +1576,8 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb,
return ret;
ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
if (!ret) {
- /*
- * Open-code file_start_write here to grab freeze protection,
- * which will be released by another thread in
- * aio_complete_rw(). Fool lockdep by telling it the lock got
- * released so that it doesn't complain about the held lock when
- * we return to userspace.
- */
- if (S_ISREG(file_inode(file)->i_mode)) {
- sb_start_write(file_inode(file)->i_sb);
- __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
- }
+ if (S_ISREG(file_inode(file)->i_mode))
+ kiocb_start_write(req);
req->ki_flags |= IOCB_WRITE;
aio_rw_done(req, call_write_iter(file, req, &iter));
}
diff --git a/fs/attr.c b/fs/attr.c
index d60dc1edb526..a8ae5f6d9b16 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -312,7 +312,7 @@ void setattr_copy(struct mnt_idmap *idmap, struct inode *inode,
if (ia_valid & ATTR_MTIME)
inode->i_mtime = attr->ia_mtime;
if (ia_valid & ATTR_CTIME)
- inode->i_ctime = attr->ia_ctime;
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
if (!in_group_or_capable(idmap, inode,
@@ -394,9 +394,25 @@ int notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
return error;
if ((ia_valid & ATTR_MODE)) {
- umode_t amode = attr->ia_mode;
+ /*
+ * Don't allow changing the mode of symlinks:
+ *
+ * (1) The vfs doesn't take the mode of symlinks into account
+ * during permission checking.
+ * (2) This has never worked correctly. Most major filesystems
+ * did return EOPNOTSUPP due to interactions with POSIX ACLs
+ * but did still updated the mode of the symlink.
+ * This inconsistency led system call wrapper providers such
+ * as libc to block changing the mode of symlinks with
+ * EOPNOTSUPP already.
+ * (3) To even do this in the first place one would have to use
+ * specific file descriptors and quite some effort.
+ */
+ if (S_ISLNK(inode->i_mode))
+ return -EOPNOTSUPP;
+
/* Flag setting protected by i_mutex */
- if (is_sxid(amode))
+ if (is_sxid(attr->ia_mode))
inode->i_flags &= ~S_NOSEC;
}
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index affa70360b1f..2b49662ed237 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -370,7 +370,7 @@ struct inode *autofs_get_inode(struct super_block *sb, umode_t mode)
inode->i_uid = d_inode(sb->s_root)->i_uid;
inode->i_gid = d_inode(sb->s_root)->i_gid;
}
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_ino = get_next_ino();
if (S_ISDIR(mode)) {
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 93046c9dc461..512b9a26c63d 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -600,7 +600,7 @@ static int autofs_dir_symlink(struct mnt_idmap *idmap,
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count++;
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
return 0;
}
@@ -633,7 +633,7 @@ static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry)
d_inode(dentry)->i_size = 0;
clear_nlink(d_inode(dentry));
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
spin_lock(&sbi->lookup_lock);
__autofs_add_expiring(dentry);
@@ -749,7 +749,7 @@ static int autofs_dir_mkdir(struct mnt_idmap *idmap,
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count++;
inc_nlink(dir);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
return 0;
}
diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c
index 54c1f8b8b075..33dd4660d82f 100644
--- a/fs/autofs/waitq.c
+++ b/fs/autofs/waitq.c
@@ -32,8 +32,9 @@ void autofs_catatonic_mode(struct autofs_sb_info *sbi)
wq->status = -ENOENT; /* Magic is gone - report failure */
kfree(wq->name.name - wq->offset);
wq->name.name = NULL;
- wq->wait_ctr--;
- wake_up_interruptible(&wq->queue);
+ wake_up(&wq->queue);
+ if (!--wq->wait_ctr)
+ kfree(wq);
wq = nwq;
}
fput(sbi->pipe); /* Close the pipe */
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index db649487d58c..83f9566c973b 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -133,8 +133,7 @@ static int bad_inode_fiemap(struct inode *inode,
return -EIO;
}
-static int bad_inode_update_time(struct inode *inode, struct timespec64 *time,
- int flags)
+static int bad_inode_update_time(struct inode *inode, int flags)
{
return -EIO;
}
@@ -209,8 +208,7 @@ void make_bad_inode(struct inode *inode)
remove_inode_hash(inode);
inode->i_mode = S_IFREG;
- inode->i_atime = inode->i_mtime = inode->i_ctime =
- current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_op = &bad_inode_ops;
inode->i_opflags &= ~IOP_XATTR;
inode->i_fop = &bad_file_ops;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index eee9237386e2..9a16a51fbb88 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -363,7 +363,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
inode->i_mtime.tv_sec =
fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16;
inode->i_mtime.tv_nsec = 0; /* lower 16 bits are not a time */
- inode->i_ctime = inode->i_mtime;
+ inode_set_ctime_to_ts(inode, inode->i_mtime);
inode->i_atime = inode->i_mtime;
befs_ino->i_inode_num = fsrun_to_cpu(sb, raw_inode->inode_num);
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 040d5140e426..12b8af04dcb3 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -97,7 +97,7 @@ static int bfs_create(struct mnt_idmap *idmap, struct inode *dir,
set_bit(ino, info->si_imap);
info->si_freei--;
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_blocks = 0;
inode->i_op = &bfs_file_inops;
inode->i_fop = &bfs_file_operations;
@@ -158,7 +158,7 @@ static int bfs_link(struct dentry *old, struct inode *dir,
return err;
}
inc_nlink(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
ihold(inode);
d_instantiate(new, inode);
@@ -187,9 +187,9 @@ static int bfs_unlink(struct inode *dir, struct dentry *dentry)
}
de->ino = 0;
mark_buffer_dirty_inode(bh, dir);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
- inode->i_ctime = dir->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode);
error = 0;
@@ -240,10 +240,10 @@ static int bfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
goto end_rename;
}
old_de->ino = 0;
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ old_dir->i_mtime = inode_set_ctime_current(old_dir);
mark_inode_dirty(old_dir);
if (new_inode) {
- new_inode->i_ctime = current_time(new_inode);
+ inode_set_ctime_current(new_inode);
inode_dec_link_count(new_inode);
}
mark_buffer_dirty_inode(old_bh, old_dir);
@@ -292,9 +292,9 @@ static int bfs_add_entry(struct inode *dir, const struct qstr *child, int ino)
pos = (block - sblock) * BFS_BSIZE + off;
if (pos >= dir->i_size) {
dir->i_size += BFS_DIRENT_SIZE;
- dir->i_ctime = current_time(dir);
+ inode_set_ctime_current(dir);
}
- dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
de->ino = cpu_to_le16((u16)ino);
for (i = 0; i < BFS_NAMELEN; i++)
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 1926bec2c850..e6a76ae9eb44 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -82,10 +82,9 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
inode->i_blocks = BFS_FILEBLOCKS(di);
inode->i_atime.tv_sec = le32_to_cpu(di->i_atime);
inode->i_mtime.tv_sec = le32_to_cpu(di->i_mtime);
- inode->i_ctime.tv_sec = le32_to_cpu(di->i_ctime);
+ inode_set_ctime(inode, le32_to_cpu(di->i_ctime), 0);
inode->i_atime.tv_nsec = 0;
inode->i_mtime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
brelse(bh);
unlock_new_inode(inode);
@@ -143,7 +142,7 @@ static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc)
di->i_nlink = cpu_to_le32(inode->i_nlink);
di->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
di->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
- di->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
+ di->i_ctime = cpu_to_le32(inode_get_ctime(inode).tv_sec);
i_sblock = BFS_I(inode)->i_sblock;
di->i_sblock = cpu_to_le32(i_sblock);
di->i_eblock = cpu_to_le32(BFS_I(inode)->i_eblock);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index bb202ad369d5..e0108d17b085 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -547,8 +547,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime =
- current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
}
return inode;
}
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 66fa9ab2c046..3282adc84d52 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -49,9 +49,11 @@ config BTRFS_FS_POSIX_ACL
If you don't know what Access Control Lists are, say N
config BTRFS_FS_CHECK_INTEGRITY
- bool "Btrfs with integrity check tool compiled in (DANGEROUS)"
+ bool "Btrfs with integrity check tool compiled in (DEPRECATED)"
depends on BTRFS_FS
help
+ This feature has been deprecated and will be removed in 6.7.
+
Adds code that examines all block write requests (including
writes of the super block). The goal is to verify that the
state of the filesystem on disk is always consistent, i.e.,
diff --git a/fs/btrfs/accessors.h b/fs/btrfs/accessors.h
index ceadfc5d6c66..8cfc8214109c 100644
--- a/fs/btrfs/accessors.h
+++ b/fs/btrfs/accessors.h
@@ -3,6 +3,8 @@
#ifndef BTRFS_ACCESSORS_H
#define BTRFS_ACCESSORS_H
+#include <linux/stddef.h>
+
struct btrfs_map_token {
struct extent_buffer *eb;
char *kaddr;
@@ -34,13 +36,13 @@ static inline void put_unaligned_le8(u8 val, void *p)
read_extent_buffer(eb, (char *)(result), \
((unsigned long)(ptr)) + \
offsetof(type, member), \
- sizeof(((type *)0)->member)))
+ sizeof_field(type, member)))
#define write_eb_member(eb, ptr, type, member, result) (\
write_extent_buffer(eb, (char *)(result), \
((unsigned long)(ptr)) + \
offsetof(type, member), \
- sizeof(((type *)0)->member)))
+ sizeof_field(type, member)))
#define DECLARE_BTRFS_SETGET_BITS(bits) \
u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
@@ -62,25 +64,25 @@ DECLARE_BTRFS_SETGET_BITS(64)
static inline u##bits btrfs_##name(const struct extent_buffer *eb, \
const type *s) \
{ \
- static_assert(sizeof(u##bits) == sizeof(((type *)0))->member); \
+ static_assert(sizeof(u##bits) == sizeof_field(type, member)); \
return btrfs_get_##bits(eb, s, offsetof(type, member)); \
} \
static inline void btrfs_set_##name(const struct extent_buffer *eb, type *s, \
u##bits val) \
{ \
- static_assert(sizeof(u##bits) == sizeof(((type *)0))->member); \
+ static_assert(sizeof(u##bits) == sizeof_field(type, member)); \
btrfs_set_##bits(eb, s, offsetof(type, member), val); \
} \
static inline u##bits btrfs_token_##name(struct btrfs_map_token *token, \
const type *s) \
{ \
- static_assert(sizeof(u##bits) == sizeof(((type *)0))->member); \
+ static_assert(sizeof(u##bits) == sizeof_field(type, member)); \
return btrfs_get_token_##bits(token, s, offsetof(type, member));\
} \
static inline void btrfs_set_token_##name(struct btrfs_map_token *token,\
type *s, u##bits val) \
{ \
- static_assert(sizeof(u##bits) == sizeof(((type *)0))->member); \
+ static_assert(sizeof(u##bits) == sizeof_field(type, member)); \
btrfs_set_token_##bits(token, s, offsetof(type, member), val); \
}
@@ -111,17 +113,14 @@ static inline void btrfs_set_##name(type *s, u##bits val) \
static inline u64 btrfs_device_total_bytes(const struct extent_buffer *eb,
struct btrfs_dev_item *s)
{
- static_assert(sizeof(u64) ==
- sizeof(((struct btrfs_dev_item *)0))->total_bytes);
- return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item,
- total_bytes));
+ static_assert(sizeof(u64) == sizeof_field(struct btrfs_dev_item, total_bytes));
+ return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes));
}
static inline void btrfs_set_device_total_bytes(const struct extent_buffer *eb,
struct btrfs_dev_item *s,
u64 val)
{
- static_assert(sizeof(u64) ==
- sizeof(((struct btrfs_dev_item *)0))->total_bytes);
+ static_assert(sizeof(u64) == sizeof_field(struct btrfs_dev_item, total_bytes));
WARN_ON(!IS_ALIGNED(val, eb->fs_info->sectorsize));
btrfs_set_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes), val);
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 79336fa853db..b7d54efb4728 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -3373,7 +3373,6 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
struct btrfs_key *node_key,
struct btrfs_backref_node *cur)
{
- struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_backref_edge *edge;
struct btrfs_backref_node *exist;
int ret;
@@ -3462,25 +3461,21 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
ret = handle_direct_tree_backref(cache, &key, cur);
if (ret < 0)
goto out;
- continue;
- } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
- ret = -EINVAL;
- btrfs_print_v0_err(fs_info);
- btrfs_handle_fs_error(fs_info, ret, NULL);
- goto out;
- } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
- continue;
+ } else if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
+ /*
+ * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref
+ * offset means the root objectid. We need to search
+ * the tree to get its parent bytenr.
+ */
+ ret = handle_indirect_tree_backref(cache, path, &key, node_key,
+ cur);
+ if (ret < 0)
+ goto out;
}
-
/*
- * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
- * means the root objectid. We need to search the tree to get
- * its parent bytenr.
+ * Unrecognized tree backref items (if it can pass tree-checker)
+ * would be ignored.
*/
- ret = handle_indirect_tree_backref(cache, path, &key, node_key,
- cur);
- if (ret < 0)
- goto out;
}
ret = 0;
cur->checked = 1;
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 82324c327a50..0cb1dee965a0 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -504,13 +504,20 @@ static void fragment_free_space(struct btrfs_block_group *block_group)
#endif
/*
- * This is only called by btrfs_cache_block_group, since we could have freed
- * extents we need to check the pinned_extents for any extents that can't be
- * used yet since their free space will be released as soon as the transaction
- * commits.
+ * Add a free space range to the in memory free space cache of a block group.
+ * This checks if the range contains super block locations and any such
+ * locations are not added to the free space cache.
+ *
+ * @block_group: The target block group.
+ * @start: Start offset of the range.
+ * @end: End offset of the range (exclusive).
+ * @total_added_ret: Optional pointer to return the total amount of space
+ * added to the block group's free space cache.
+ *
+ * Returns 0 on success or < 0 on error.
*/
-int add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end,
- u64 *total_added_ret)
+int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start,
+ u64 end, u64 *total_added_ret)
{
struct btrfs_fs_info *info = block_group->fs_info;
u64 extent_start, extent_end, size;
@@ -520,11 +527,10 @@ int add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end
*total_added_ret = 0;
while (start < end) {
- ret = find_first_extent_bit(&info->excluded_extents, start,
- &extent_start, &extent_end,
- EXTENT_DIRTY | EXTENT_UPTODATE,
- NULL);
- if (ret)
+ if (!find_first_extent_bit(&info->excluded_extents, start,
+ &extent_start, &extent_end,
+ EXTENT_DIRTY | EXTENT_UPTODATE,
+ NULL))
break;
if (extent_start <= start) {
@@ -799,8 +805,8 @@ next:
key.type == BTRFS_METADATA_ITEM_KEY) {
u64 space_added;
- ret = add_new_free_space(block_group, last, key.objectid,
- &space_added);
+ ret = btrfs_add_new_free_space(block_group, last,
+ key.objectid, &space_added);
if (ret)
goto out;
total_found += space_added;
@@ -821,14 +827,20 @@ next:
path->slots[0]++;
}
- ret = add_new_free_space(block_group, last,
- block_group->start + block_group->length,
- NULL);
+ ret = btrfs_add_new_free_space(block_group, last,
+ block_group->start + block_group->length,
+ NULL);
out:
btrfs_free_path(path);
return ret;
}
+static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg)
+{
+ clear_extent_bits(&bg->fs_info->excluded_extents, bg->start,
+ bg->start + bg->length - 1, EXTENT_UPTODATE);
+}
+
static noinline void caching_thread(struct btrfs_work *work)
{
struct btrfs_block_group *block_group;
@@ -2098,8 +2110,9 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
cache->bytes_super += stripe_len;
- ret = btrfs_add_excluded_extent(fs_info, cache->start,
- stripe_len);
+ ret = set_extent_bit(&fs_info->excluded_extents, cache->start,
+ cache->start + stripe_len - 1,
+ EXTENT_UPTODATE, NULL);
if (ret)
return ret;
}
@@ -2125,8 +2138,9 @@ static int exclude_super_stripes(struct btrfs_block_group *cache)
cache->start + cache->length - logical[nr]);
cache->bytes_super += len;
- ret = btrfs_add_excluded_extent(fs_info, logical[nr],
- len);
+ ret = set_extent_bit(&fs_info->excluded_extents, logical[nr],
+ logical[nr] + len - 1,
+ EXTENT_UPTODATE, NULL);
if (ret) {
kfree(logical);
return ret;
@@ -2319,8 +2333,8 @@ static int read_one_block_group(struct btrfs_fs_info *info,
btrfs_free_excluded_extents(cache);
} else if (cache->used == 0) {
cache->cached = BTRFS_CACHE_FINISHED;
- ret = add_new_free_space(cache, cache->start,
- cache->start + cache->length, NULL);
+ ret = btrfs_add_new_free_space(cache, cache->start,
+ cache->start + cache->length, NULL);
btrfs_free_excluded_extents(cache);
if (ret)
goto error;
@@ -2767,7 +2781,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
return ERR_PTR(ret);
}
- ret = add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
+ ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
btrfs_free_excluded_extents(cache);
if (ret) {
btrfs_put_block_group(cache);
@@ -4075,7 +4089,7 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
if (IS_ERR(ret_bg)) {
ret = PTR_ERR(ret_bg);
- } else if (from_extent_allocation) {
+ } else if (from_extent_allocation && (flags & BTRFS_BLOCK_GROUP_DATA)) {
/*
* New block group is likely to be used soon. Try to activate
* it now. Failure is OK for now.
@@ -4273,6 +4287,17 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
struct btrfs_caching_control *caching_ctl;
struct rb_node *n;
+ if (btrfs_is_zoned(info)) {
+ if (info->active_meta_bg) {
+ btrfs_put_block_group(info->active_meta_bg);
+ info->active_meta_bg = NULL;
+ }
+ if (info->active_system_bg) {
+ btrfs_put_block_group(info->active_system_bg);
+ info->active_system_bg = NULL;
+ }
+ }
+
write_lock(&info->block_group_cache_lock);
while (!list_empty(&info->caching_block_groups)) {
caching_ctl = list_entry(info->caching_block_groups.next,
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 74b61e663028..2bdbcb834f95 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -291,8 +291,8 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
struct btrfs_caching_control *btrfs_get_caching_control(
struct btrfs_block_group *cache);
-int add_new_free_space(struct btrfs_block_group *block_group,
- u64 start, u64 end, u64 *total_added_ret);
+int btrfs_add_new_free_space(struct btrfs_block_group *block_group,
+ u64 start, u64 end, u64 *total_added_ret);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index d47a927b3504..bda1fdbba666 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -498,12 +498,8 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint);
int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written, struct writeback_control *wbc);
+ u64 start, u64 end, struct writeback_control *wbc);
int btrfs_writepage_cow_fixup(struct page *page);
-void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
- struct page *page, u64 start,
- u64 end, bool uptodate);
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
int compress_type);
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 6d51db066503..53c1211dd60b 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1736,9 +1736,6 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
int over = 0;
unsigned char d_type;
- if (list_empty(ins_list))
- return 0;
-
/*
* Changing the data of the delayed item is impossible. So
* we needn't lock them. And we have held i_mutex of the
@@ -1809,9 +1806,9 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
inode->i_mtime.tv_nsec);
btrfs_set_stack_timespec_sec(&inode_item->ctime,
- inode->i_ctime.tv_sec);
+ inode_get_ctime(inode).tv_sec);
btrfs_set_stack_timespec_nsec(&inode_item->ctime,
- inode->i_ctime.tv_nsec);
+ inode_get_ctime(inode).tv_nsec);
btrfs_set_stack_timespec_sec(&inode_item->otime,
BTRFS_I(inode)->i_otime.tv_sec);
@@ -1862,8 +1859,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
- inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
- inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
+ inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
+ btrfs_stack_timespec_nsec(&inode_item->ctime));
BTRFS_I(inode)->i_otime.tv_sec =
btrfs_stack_timespec_sec(&inode_item->otime);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 5f10965fd72b..fff22ed55c42 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -792,9 +792,9 @@ static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev,
lockdep_assert_held(&srcdev->fs_info->chunk_mutex);
- while (!find_first_extent_bit(&srcdev->alloc_state, start,
- &found_start, &found_end,
- CHUNK_ALLOCATED, &cached_state)) {
+ while (find_first_extent_bit(&srcdev->alloc_state, start,
+ &found_start, &found_end,
+ CHUNK_ALLOCATED, &cached_state)) {
ret = set_extent_bit(&tgtdev->alloc_state, found_start,
found_end, CHUNK_ALLOCATED, NULL);
if (ret)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a9a2c5446c18..0a96ea8c1d3a 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -313,21 +313,16 @@ static bool check_tree_block_fsid(struct extent_buffer *eb)
struct btrfs_fs_info *fs_info = eb->fs_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
u8 fsid[BTRFS_FSID_SIZE];
- u8 *metadata_uuid;
read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
BTRFS_FSID_SIZE);
+
/*
- * Checking the incompat flag is only valid for the current fs. For
- * seed devices it's forbidden to have their uuid changed so reading
- * ->fsid in this case is fine
+ * alloc_fs_devices() copies the fsid into metadata_uuid if the
+ * metadata_uuid is unset in the superblock, including for a seed device.
+ * So, we can use fs_devices->metadata_uuid.
*/
- if (btrfs_fs_incompat(fs_info, METADATA_UUID))
- metadata_uuid = fs_devices->metadata_uuid;
- else
- metadata_uuid = fs_devices->fsid;
-
- if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE))
+ if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
return false;
list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
@@ -2384,21 +2379,18 @@ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
- if (memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
- BTRFS_FSID_SIZE)) {
+ if (memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
- fs_info->super_copy->fsid, fs_info->fs_devices->fsid);
+ sb->fsid, fs_info->fs_devices->fsid);
ret = -EINVAL;
}
- if (btrfs_fs_incompat(fs_info, METADATA_UUID) &&
- memcmp(fs_info->fs_devices->metadata_uuid,
- fs_info->super_copy->metadata_uuid, BTRFS_FSID_SIZE)) {
+ if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb),
+ BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
- fs_info->super_copy->metadata_uuid,
- fs_info->fs_devices->metadata_uuid);
+ btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
ret = -EINVAL;
}
@@ -2869,6 +2861,56 @@ static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
return 0;
}
+static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
+{
+ u64 root_objectid = 0;
+ struct btrfs_root *gang[8];
+ int i = 0;
+ int err = 0;
+ unsigned int ret = 0;
+
+ while (1) {
+ spin_lock(&fs_info->fs_roots_radix_lock);
+ ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+ (void **)gang, root_objectid,
+ ARRAY_SIZE(gang));
+ if (!ret) {
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+ break;
+ }
+ root_objectid = gang[ret - 1]->root_key.objectid + 1;
+
+ for (i = 0; i < ret; i++) {
+ /* Avoid to grab roots in dead_roots. */
+ if (btrfs_root_refs(&gang[i]->root_item) == 0) {
+ gang[i] = NULL;
+ continue;
+ }
+ /* Grab all the search result for later use. */
+ gang[i] = btrfs_grab_root(gang[i]);
+ }
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+
+ for (i = 0; i < ret; i++) {
+ if (!gang[i])
+ continue;
+ root_objectid = gang[i]->root_key.objectid;
+ err = btrfs_orphan_cleanup(gang[i]);
+ if (err)
+ goto out;
+ btrfs_put_root(gang[i]);
+ }
+ root_objectid++;
+ }
+out:
+ /* Release the uncleaned roots due to error. */
+ for (; i < ret; i++) {
+ if (gang[i])
+ btrfs_put_root(gang[i]);
+ }
+ return err;
+}
+
/*
* Some options only have meaning at mount time and shouldn't persist across
* remounts, or be displayed. Clear these at the end of mount and remount
@@ -3222,7 +3264,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
/* check FS state, whether FS is broken. */
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
- set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
+ WRITE_ONCE(fs_info->fs_error, -EUCLEAN);
/*
* In the long term, we'll store the compression type in the super
@@ -3417,6 +3459,8 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
btrfs_free_zone_cache(fs_info);
+ btrfs_check_active_zone_reservation(fs_info);
+
if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
!btrfs_check_rw_degradable(fs_info, NULL)) {
btrfs_warn(fs_info,
@@ -4136,56 +4180,6 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
btrfs_put_root(root);
}
-int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
-{
- u64 root_objectid = 0;
- struct btrfs_root *gang[8];
- int i = 0;
- int err = 0;
- unsigned int ret = 0;
-
- while (1) {
- spin_lock(&fs_info->fs_roots_radix_lock);
- ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
- (void **)gang, root_objectid,
- ARRAY_SIZE(gang));
- if (!ret) {
- spin_unlock(&fs_info->fs_roots_radix_lock);
- break;
- }
- root_objectid = gang[ret - 1]->root_key.objectid + 1;
-
- for (i = 0; i < ret; i++) {
- /* Avoid to grab roots in dead_roots */
- if (btrfs_root_refs(&gang[i]->root_item) == 0) {
- gang[i] = NULL;
- continue;
- }
- /* grab all the search result for later use */
- gang[i] = btrfs_grab_root(gang[i]);
- }
- spin_unlock(&fs_info->fs_roots_radix_lock);
-
- for (i = 0; i < ret; i++) {
- if (!gang[i])
- continue;
- root_objectid = gang[i]->root_key.objectid;
- err = btrfs_orphan_cleanup(gang[i]);
- if (err)
- goto out;
- btrfs_put_root(gang[i]);
- }
- root_objectid++;
- }
-out:
- /* release the uncleaned roots due to error */
- for (; i < ret; i++) {
- if (gang[i])
- btrfs_put_root(gang[i]);
- }
- return err;
-}
-
int btrfs_commit_super(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root = fs_info->tree_root;
@@ -4228,7 +4222,7 @@ static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
u64 found_end;
found = true;
- while (!find_first_extent_bit(&trans->dirty_pages, cur,
+ while (find_first_extent_bit(&trans->dirty_pages, cur,
&found_start, &found_end, EXTENT_DIRTY, &cached)) {
dirty_bytes += found_end + 1 - found_start;
cur = found_end + 1;
@@ -4552,9 +4546,7 @@ static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
- struct list_head splice;
-
- INIT_LIST_HEAD(&splice);
+ LIST_HEAD(splice);
spin_lock(&fs_info->ordered_root_lock);
list_splice_init(&fs_info->ordered_roots, &splice);
@@ -4660,9 +4652,7 @@ static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
{
struct btrfs_inode *btrfs_inode;
- struct list_head splice;
-
- INIT_LIST_HEAD(&splice);
+ LIST_HEAD(splice);
spin_lock(&root->delalloc_lock);
list_splice_init(&root->delalloc_inodes, &splice);
@@ -4695,9 +4685,7 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root;
- struct list_head splice;
-
- INIT_LIST_HEAD(&splice);
+ LIST_HEAD(splice);
spin_lock(&fs_info->delalloc_root_lock);
list_splice_init(&fs_info->delalloc_roots, &splice);
@@ -4716,21 +4704,16 @@ static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
spin_unlock(&fs_info->delalloc_root_lock);
}
-static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *dirty_pages,
- int mark)
+static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *dirty_pages,
+ int mark)
{
- int ret;
struct extent_buffer *eb;
u64 start = 0;
u64 end;
- while (1) {
- ret = find_first_extent_bit(dirty_pages, start, &start, &end,
- mark, NULL);
- if (ret)
- break;
-
+ while (find_first_extent_bit(dirty_pages, start, &start, &end,
+ mark, NULL)) {
clear_extent_bits(dirty_pages, start, end, mark);
while (start <= end) {
eb = find_extent_buffer(fs_info, start);
@@ -4746,16 +4729,13 @@ static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
free_extent_buffer_stale(eb);
}
}
-
- return ret;
}
-static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
- struct extent_io_tree *unpin)
+static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
+ struct extent_io_tree *unpin)
{
u64 start;
u64 end;
- int ret;
while (1) {
struct extent_state *cached_state = NULL;
@@ -4767,9 +4747,8 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
* the same extent range.
*/
mutex_lock(&fs_info->unused_bg_unpin_mutex);
- ret = find_first_extent_bit(unpin, 0, &start, &end,
- EXTENT_DIRTY, &cached_state);
- if (ret) {
+ if (!find_first_extent_bit(unpin, 0, &start, &end,
+ EXTENT_DIRTY, &cached_state)) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
}
@@ -4780,8 +4759,6 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
}
-
- return 0;
}
static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index b03767f4d7ed..02b645744a82 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -77,7 +77,6 @@ struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr);
struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info);
void btrfs_free_fs_info(struct btrfs_fs_info *fs_info);
-int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info);
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
index a2315a4b8b75..ff8e117a1ace 100644
--- a/fs/btrfs/extent-io-tree.c
+++ b/fs/btrfs/extent-io-tree.c
@@ -831,15 +831,15 @@ static struct extent_state *find_first_extent_bit_state(struct extent_io_tree *t
*
* Note: If there are multiple bits set in @bits, any of them will match.
*
- * Return 0 if we find something, and update @start_ret and @end_ret.
- * Return 1 if we found nothing.
+ * Return true if we find something, and update @start_ret and @end_ret.
+ * Return false if we found nothing.
*/
-int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits,
- struct extent_state **cached_state)
+bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits,
+ struct extent_state **cached_state)
{
struct extent_state *state;
- int ret = 1;
+ bool ret = false;
spin_lock(&tree->lock);
if (cached_state && *cached_state) {
@@ -863,7 +863,7 @@ got_it:
cache_state_if_flags(state, cached_state, 0);
*start_ret = state->start;
*end_ret = state->end;
- ret = 0;
+ ret = true;
}
out:
spin_unlock(&tree->lock);
diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
index fbd3b275ab1c..28c23a23d121 100644
--- a/fs/btrfs/extent-io-tree.h
+++ b/fs/btrfs/extent-io-tree.h
@@ -182,9 +182,9 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, u32 clear_bits,
struct extent_state **cached_state);
-int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, u32 bits,
- struct extent_state **cached_state);
+bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
+ u64 *start_ret, u64 *end_ret, u32 bits,
+ struct extent_state **cached_state);
void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, u32 bits);
int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f396a9afa403..f356f08b55cb 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -69,27 +69,6 @@ static int block_group_bits(struct btrfs_block_group *cache, u64 bits)
return (cache->flags & bits) == bits;
}
-int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
- u64 start, u64 num_bytes)
-{
- u64 end = start + num_bytes - 1;
- set_extent_bit(&fs_info->excluded_extents, start, end,
- EXTENT_UPTODATE, NULL);
- return 0;
-}
-
-void btrfs_free_excluded_extents(struct btrfs_block_group *cache)
-{
- struct btrfs_fs_info *fs_info = cache->fs_info;
- u64 start, end;
-
- start = cache->start;
- end = start + cache->length - 1;
-
- clear_extent_bits(&fs_info->excluded_extents, start, end,
- EXTENT_UPTODATE);
-}
-
/* simple helper to search for an existing data extent at a given offset */
int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len)
{
@@ -187,8 +166,10 @@ search_again:
num_refs = btrfs_extent_refs(leaf, ei);
extent_flags = btrfs_extent_flags(leaf, ei);
} else {
- ret = -EINVAL;
- btrfs_print_v0_err(fs_info);
+ ret = -EUCLEAN;
+ btrfs_err(fs_info,
+ "unexpected extent item size, has %u expect >= %zu",
+ item_size, sizeof(*ei));
if (trans)
btrfs_abort_transaction(trans, ret);
else
@@ -402,11 +383,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
}
}
+ WARN_ON(1);
btrfs_print_leaf(eb);
btrfs_err(eb->fs_info,
"eb %llu iref 0x%lx invalid extent inline ref type %d",
eb->start, (unsigned long)iref, type);
- WARN_ON(1);
return BTRFS_REF_TYPE_INVALID;
}
@@ -624,12 +605,12 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
ref2 = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
num_refs = btrfs_shared_data_ref_count(leaf, ref2);
- } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
- btrfs_print_v0_err(trans->fs_info);
- btrfs_abort_transaction(trans, -EINVAL);
- return -EINVAL;
} else {
- BUG();
+ btrfs_err(trans->fs_info,
+ "unrecognized backref key (%llu %u %llu)",
+ key.objectid, key.type, key.offset);
+ btrfs_abort_transaction(trans, -EUCLEAN);
+ return -EUCLEAN;
}
BUG_ON(num_refs < refs_to_drop);
@@ -660,7 +641,6 @@ static noinline u32 extent_data_ref_count(struct btrfs_path *path,
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
if (iref) {
/*
* If type is invalid, we should have bailed out earlier than
@@ -869,6 +849,11 @@ again:
err = -ENOENT;
goto out;
} else if (WARN_ON(ret)) {
+ btrfs_print_leaf(path->nodes[0]);
+ btrfs_err(fs_info,
+"extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu",
+ bytenr, num_bytes, parent, root_objectid, owner,
+ offset);
err = -EIO;
goto out;
}
@@ -876,8 +861,10 @@ again:
leaf = path->nodes[0];
item_size = btrfs_item_size(leaf, path->slots[0]);
if (unlikely(item_size < sizeof(*ei))) {
- err = -EINVAL;
- btrfs_print_v0_err(fs_info);
+ err = -EUCLEAN;
+ btrfs_err(fs_info,
+ "unexpected extent item size, has %llu expect >= %zu",
+ item_size, sizeof(*ei));
btrfs_abort_transaction(trans, err);
goto out;
}
@@ -1079,13 +1066,13 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
/*
* helper to update/remove inline back ref
*/
-static noinline_for_stack
-void update_inline_extent_backref(struct btrfs_path *path,
+static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_mod,
struct btrfs_delayed_extent_op *extent_op)
{
struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_extent_item *ei;
struct btrfs_extent_data_ref *dref = NULL;
struct btrfs_shared_data_ref *sref = NULL;
@@ -1098,18 +1085,33 @@ void update_inline_extent_backref(struct btrfs_path *path,
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
- WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
+ if (unlikely(refs_to_mod < 0 && refs + refs_to_mod <= 0)) {
+ struct btrfs_key key;
+ u32 extent_size;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.type == BTRFS_METADATA_ITEM_KEY)
+ extent_size = fs_info->nodesize;
+ else
+ extent_size = key.offset;
+ btrfs_print_leaf(leaf);
+ btrfs_err(fs_info,
+ "invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu",
+ key.objectid, extent_size, refs_to_mod, refs);
+ return -EUCLEAN;
+ }
refs += refs_to_mod;
btrfs_set_extent_refs(leaf, ei, refs);
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, ei);
+ type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
/*
- * If type is invalid, we should have bailed out after
- * lookup_inline_extent_backref().
+ * Function btrfs_get_extent_inline_ref_type() has already printed
+ * error messages.
*/
- type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
- ASSERT(type != BTRFS_REF_TYPE_INVALID);
+ if (unlikely(type == BTRFS_REF_TYPE_INVALID))
+ return -EUCLEAN;
if (type == BTRFS_EXTENT_DATA_REF_KEY) {
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
@@ -1119,10 +1121,43 @@ void update_inline_extent_backref(struct btrfs_path *path,
refs = btrfs_shared_data_ref_count(leaf, sref);
} else {
refs = 1;
- BUG_ON(refs_to_mod != -1);
+ /*
+ * For tree blocks we can only drop one ref for it, and tree
+ * blocks should not have refs > 1.
+ *
+ * Furthermore if we're inserting a new inline backref, we
+ * won't reach this path either. That would be
+ * setup_inline_extent_backref().
+ */
+ if (unlikely(refs_to_mod != -1)) {
+ struct btrfs_key key;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+
+ btrfs_print_leaf(leaf);
+ btrfs_err(fs_info,
+ "invalid refs_to_mod for tree block %llu, has %d expect -1",
+ key.objectid, refs_to_mod);
+ return -EUCLEAN;
+ }
}
- BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
+ if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) {
+ struct btrfs_key key;
+ u32 extent_size;
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.type == BTRFS_METADATA_ITEM_KEY)
+ extent_size = fs_info->nodesize;
+ else
+ extent_size = key.offset;
+ btrfs_print_leaf(leaf);
+ btrfs_err(fs_info,
+"invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu",
+ (unsigned long)iref, key.objectid, extent_size,
+ refs_to_mod, refs);
+ return -EUCLEAN;
+ }
refs += refs_to_mod;
if (refs > 0) {
@@ -1142,6 +1177,7 @@ void update_inline_extent_backref(struct btrfs_path *path,
btrfs_truncate_item(path, item_size, 1);
}
btrfs_mark_buffer_dirty(leaf);
+ return 0;
}
static noinline_for_stack
@@ -1170,7 +1206,7 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
bytenr, num_bytes, root_objectid, path->slots[0]);
return -EUCLEAN;
}
- update_inline_extent_backref(path, iref, refs_to_add, extent_op);
+ ret = update_inline_extent_backref(path, iref, refs_to_add, extent_op);
} else if (ret == -ENOENT) {
setup_inline_extent_backref(trans->fs_info, path, iref, parent,
root_objectid, owner, offset,
@@ -1190,7 +1226,7 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
BUG_ON(!is_data && refs_to_drop != 1);
if (iref)
- update_inline_extent_backref(path, iref, -refs_to_drop, NULL);
+ ret = update_inline_extent_backref(path, iref, -refs_to_drop, NULL);
else if (is_data)
ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
else
@@ -1629,8 +1665,10 @@ again:
item_size = btrfs_item_size(leaf, path->slots[0]);
if (unlikely(item_size < sizeof(*ei))) {
- err = -EINVAL;
- btrfs_print_v0_err(fs_info);
+ err = -EUCLEAN;
+ btrfs_err(fs_info,
+ "unexpected extent item size, has %u expect >= %zu",
+ item_size, sizeof(*ei));
btrfs_abort_transaction(trans, err);
goto out;
}
@@ -2751,9 +2789,8 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
struct extent_state *cached_state = NULL;
mutex_lock(&fs_info->unused_bg_unpin_mutex);
- ret = find_first_extent_bit(unpin, 0, &start, &end,
- EXTENT_DIRTY, &cached_state);
- if (ret) {
+ if (!find_first_extent_bit(unpin, 0, &start, &end,
+ EXTENT_DIRTY, &cached_state)) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
}
@@ -3059,8 +3096,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
item_size = btrfs_item_size(leaf, extent_slot);
if (unlikely(item_size < sizeof(*ei))) {
- ret = -EINVAL;
- btrfs_print_v0_err(info);
+ ret = -EUCLEAN;
+ btrfs_err(trans->fs_info,
+ "unexpected extent item size, has %u expect >= %zu",
+ item_size, sizeof(*ei));
btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -3351,11 +3390,38 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
}
enum btrfs_loop_type {
+ /*
+ * Start caching block groups but do not wait for progress or for them
+ * to be done.
+ */
LOOP_CACHING_NOWAIT,
+
+ /*
+ * Wait for the block group free_space >= the space we're waiting for if
+ * the block group isn't cached.
+ */
LOOP_CACHING_WAIT,
+
+ /*
+ * Allow allocations to happen from block groups that do not yet have a
+ * size classification.
+ */
LOOP_UNSET_SIZE_CLASS,
+
+ /*
+ * Allocate a chunk and then retry the allocation.
+ */
LOOP_ALLOC_CHUNK,
+
+ /*
+ * Ignore the size class restrictions for this allocation.
+ */
LOOP_WRONG_SIZE_CLASS,
+
+ /*
+ * Ignore the empty size, only try to allocate the number of bytes
+ * needed for this allocation.
+ */
LOOP_NO_EMPTY_SIZE,
};
@@ -3427,7 +3493,6 @@ btrfs_release_block_group(struct btrfs_block_group *cache,
* Helper function for find_free_extent().
*
* Return -ENOENT to inform caller that we need fallback to unclustered mode.
- * Return -EAGAIN to inform caller that we need to re-search this block group
* Return >0 to inform caller that we find nothing
* Return 0 means we have found a location and set ffe_ctl->found_offset.
*/
@@ -3508,14 +3573,6 @@ refill_cluster:
trace_btrfs_reserve_extent_cluster(bg, ffe_ctl);
return 0;
}
- } else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
- !ffe_ctl->retry_clustered) {
- spin_unlock(&last_ptr->refill_lock);
-
- ffe_ctl->retry_clustered = true;
- btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
- ffe_ctl->empty_cluster + ffe_ctl->empty_size);
- return -EAGAIN;
}
/*
* At this point we either didn't find a cluster or we weren't able to
@@ -3530,7 +3587,6 @@ refill_cluster:
/*
* Return >0 to inform caller that we find nothing
* Return 0 when we found an free extent and set ffe_ctrl->found_offset
- * Return -EAGAIN to inform caller that we need to re-search this block group
*/
static int find_free_extent_unclustered(struct btrfs_block_group *bg,
struct find_free_extent_ctl *ffe_ctl)
@@ -3568,25 +3624,8 @@ static int find_free_extent_unclustered(struct btrfs_block_group *bg,
offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
ffe_ctl->num_bytes, ffe_ctl->empty_size,
&ffe_ctl->max_extent_size);
-
- /*
- * If we didn't find a chunk, and we haven't failed on this block group
- * before, and this block group is in the middle of caching and we are
- * ok with waiting, then go ahead and wait for progress to be made, and
- * set @retry_unclustered to true.
- *
- * If @retry_unclustered is true then we've already waited on this
- * block group once and should move on to the next block group.
- */
- if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
- ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
- btrfs_wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
- ffe_ctl->empty_size);
- ffe_ctl->retry_unclustered = true;
- return -EAGAIN;
- } else if (!offset) {
+ if (!offset)
return 1;
- }
ffe_ctl->found_offset = offset;
return 0;
}
@@ -3600,7 +3639,7 @@ static int do_allocation_clustered(struct btrfs_block_group *block_group,
/* We want to try and use the cluster allocator, so lets look there */
if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) {
ret = find_free_extent_clustered(block_group, ffe_ctl, bg_ret);
- if (ret >= 0 || ret == -EAGAIN)
+ if (ret >= 0)
return ret;
/* ret == -ENOENT case falls through */
}
@@ -3685,7 +3724,9 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
}
spin_unlock(&block_group->lock);
- if (!ret && !btrfs_zone_activate(block_group)) {
+ /* Metadata block group is activated at write time. */
+ if (!ret && (block_group->flags & BTRFS_BLOCK_GROUP_DATA) &&
+ !btrfs_zone_activate(block_group)) {
ret = 1;
/*
* May need to clear fs_info->{treelog,data_reloc}_bg.
@@ -3709,7 +3750,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
fs_info->data_reloc_bg == 0);
if (block_group->ro ||
- test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
+ (!ffe_ctl->for_data_reloc &&
+ test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) {
ret = 1;
goto out;
}
@@ -3752,8 +3794,26 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
if (ffe_ctl->for_treelog && !fs_info->treelog_bg)
fs_info->treelog_bg = block_group->start;
- if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg)
- fs_info->data_reloc_bg = block_group->start;
+ if (ffe_ctl->for_data_reloc) {
+ if (!fs_info->data_reloc_bg)
+ fs_info->data_reloc_bg = block_group->start;
+ /*
+ * Do not allow allocations from this block group, unless it is
+ * for data relocation. Compared to increasing the ->ro, setting
+ * the ->zoned_data_reloc_ongoing flag still allows nocow
+ * writers to come in. See btrfs_inc_nocow_writers().
+ *
+ * We need to disable an allocation to avoid an allocation of
+ * regular (non-relocation data) extent. With mix of relocation
+ * extents and regular extents, we can dispatch WRITE commands
+ * (for relocation extents) and ZONE APPEND commands (for
+ * regular extents) at the same time to the same zone, which
+ * easily break the write pointer.
+ *
+ * Also, this flag avoids this block group to be zone finished.
+ */
+ set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
+ }
ffe_ctl->found_offset = start + block_group->alloc_offset;
block_group->alloc_offset += num_bytes;
@@ -3771,24 +3831,8 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,
out:
if (ret && ffe_ctl->for_treelog)
fs_info->treelog_bg = 0;
- if (ret && ffe_ctl->for_data_reloc &&
- fs_info->data_reloc_bg == block_group->start) {
- /*
- * Do not allow further allocations from this block group.
- * Compared to increasing the ->ro, setting the
- * ->zoned_data_reloc_ongoing flag still allows nocow
- * writers to come in. See btrfs_inc_nocow_writers().
- *
- * We need to disable an allocation to avoid an allocation of
- * regular (non-relocation data) extent. With mix of relocation
- * extents and regular extents, we can dispatch WRITE commands
- * (for relocation extents) and ZONE APPEND commands (for
- * regular extents) at the same time to the same zone, which
- * easily break the write pointer.
- */
- set_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags);
+ if (ret && ffe_ctl->for_data_reloc)
fs_info->data_reloc_bg = 0;
- }
spin_unlock(&fs_info->relocation_bg_lock);
spin_unlock(&fs_info->treelog_bg_lock);
spin_unlock(&block_group->lock);
@@ -3816,8 +3860,7 @@ static void release_block_group(struct btrfs_block_group *block_group,
{
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
- ffe_ctl->retry_clustered = false;
- ffe_ctl->retry_unclustered = false;
+ ffe_ctl->retry_uncached = false;
break;
case BTRFS_EXTENT_ALLOC_ZONED:
/* Nothing to do */
@@ -3861,6 +3904,10 @@ static void found_extent(struct find_free_extent_ctl *ffe_ctl,
static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl)
{
+ /* Block group's activeness is not a requirement for METADATA block groups. */
+ if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA))
+ return 0;
+
/* If we can activate new zone, just allocate a chunk and use it */
if (btrfs_can_activate_zone(fs_info->fs_devices, ffe_ctl->flags))
return 0;
@@ -3949,15 +3996,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
if (ffe_ctl->index < BTRFS_NR_RAID_TYPES)
return 1;
- /*
- * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
- * caching kthreads as we move along
- * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
- * LOOP_UNSET_SIZE_CLASS, allow unset size class
- * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
- * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
- * again
- */
+ /* See the comments for btrfs_loop_type for an explanation of the phases. */
if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) {
ffe_ctl->index = 0;
/*
@@ -4168,9 +4207,7 @@ static noinline int find_free_extent(struct btrfs_root *root,
ffe_ctl->orig_have_caching_bg = false;
ffe_ctl->index = btrfs_bg_flags_to_raid_index(ffe_ctl->flags);
ffe_ctl->loop = 0;
- /* For clustered allocation */
- ffe_ctl->retry_clustered = false;
- ffe_ctl->retry_unclustered = false;
+ ffe_ctl->retry_uncached = false;
ffe_ctl->cached = 0;
ffe_ctl->max_extent_size = 0;
ffe_ctl->total_free_space = 0;
@@ -4321,16 +4358,12 @@ have_block_group:
bg_ret = NULL;
ret = do_allocation(block_group, ffe_ctl, &bg_ret);
- if (ret == 0) {
- if (bg_ret && bg_ret != block_group) {
- btrfs_release_block_group(block_group,
- ffe_ctl->delalloc);
- block_group = bg_ret;
- }
- } else if (ret == -EAGAIN) {
- goto have_block_group;
- } else if (ret > 0) {
+ if (ret > 0)
goto loop;
+
+ if (bg_ret && bg_ret != block_group) {
+ btrfs_release_block_group(block_group, ffe_ctl->delalloc);
+ block_group = bg_ret;
}
/* Checks */
@@ -4371,6 +4404,15 @@ have_block_group:
btrfs_release_block_group(block_group, ffe_ctl->delalloc);
break;
loop:
+ if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
+ !ffe_ctl->retry_uncached) {
+ ffe_ctl->retry_uncached = true;
+ btrfs_wait_block_group_cache_progress(block_group,
+ ffe_ctl->num_bytes +
+ ffe_ctl->empty_cluster +
+ ffe_ctl->empty_size);
+ goto have_block_group;
+ }
release_block_group(block_group, ffe_ctl, ffe_ctl->delalloc);
cond_resched();
}
diff --git a/fs/btrfs/extent-tree.h b/fs/btrfs/extent-tree.h
index 429d5c570061..88c249c37516 100644
--- a/fs/btrfs/extent-tree.h
+++ b/fs/btrfs/extent-tree.h
@@ -48,16 +48,11 @@ struct find_free_extent_ctl {
int loop;
/*
- * Whether we're refilling a cluster, if true we need to re-search
- * current block group but don't try to refill the cluster again.
+ * Set to true if we're retrying the allocation on this block group
+ * after waiting for caching progress, this is so that we retry only
+ * once before moving on to another block group.
*/
- bool retry_clustered;
-
- /*
- * Whether we're updating free space cache, if true we need to re-search
- * current block group but don't try updating free space cache again.
- */
- bool retry_unclustered;
+ bool retry_uncached;
/* If current block group is cached */
int cached;
@@ -96,9 +91,6 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
enum btrfs_inline_ref_type is_data);
u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
-int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
- u64 start, u64 num_bytes);
-void btrfs_free_excluded_extents(struct btrfs_block_group *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unsigned long count);
void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 90ad3006ef3a..ac3fca5a5e41 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -181,34 +181,9 @@ void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
}
}
-void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
-{
- struct address_space *mapping = inode->i_mapping;
- unsigned long index = start >> PAGE_SHIFT;
- unsigned long end_index = end >> PAGE_SHIFT;
- struct folio *folio;
-
- while (index <= end_index) {
- folio = filemap_get_folio(mapping, index);
- filemap_dirty_folio(mapping, folio);
- folio_account_redirty(folio);
- index += folio_nr_pages(folio);
- folio_put(folio);
- }
-}
-
-/*
- * Process one page for __process_pages_contig().
- *
- * Return >0 if we hit @page == @locked_page.
- * Return 0 if we updated the page status.
- * Return -EGAIN if the we need to try again.
- * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
- */
-static int process_one_page(struct btrfs_fs_info *fs_info,
- struct address_space *mapping,
- struct page *page, struct page *locked_page,
- unsigned long page_ops, u64 start, u64 end)
+static void process_one_page(struct btrfs_fs_info *fs_info,
+ struct page *page, struct page *locked_page,
+ unsigned long page_ops, u64 start, u64 end)
{
u32 len;
@@ -224,94 +199,36 @@ static int process_one_page(struct btrfs_fs_info *fs_info,
if (page_ops & PAGE_END_WRITEBACK)
btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
- if (page == locked_page)
- return 1;
-
- if (page_ops & PAGE_LOCK) {
- int ret;
-
- ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
- if (ret)
- return ret;
- if (!PageDirty(page) || page->mapping != mapping) {
- btrfs_page_end_writer_lock(fs_info, page, start, len);
- return -EAGAIN;
- }
- }
- if (page_ops & PAGE_UNLOCK)
+ if (page != locked_page && (page_ops & PAGE_UNLOCK))
btrfs_page_end_writer_lock(fs_info, page, start, len);
- return 0;
}
-static int __process_pages_contig(struct address_space *mapping,
- struct page *locked_page,
- u64 start, u64 end, unsigned long page_ops,
- u64 *processed_end)
+static void __process_pages_contig(struct address_space *mapping,
+ struct page *locked_page, u64 start, u64 end,
+ unsigned long page_ops)
{
struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
pgoff_t start_index = start >> PAGE_SHIFT;
pgoff_t end_index = end >> PAGE_SHIFT;
pgoff_t index = start_index;
- unsigned long pages_processed = 0;
struct folio_batch fbatch;
- int err = 0;
int i;
- if (page_ops & PAGE_LOCK) {
- ASSERT(page_ops == PAGE_LOCK);
- ASSERT(processed_end && *processed_end == start);
- }
-
folio_batch_init(&fbatch);
while (index <= end_index) {
int found_folios;
found_folios = filemap_get_folios_contig(mapping, &index,
end_index, &fbatch);
-
- if (found_folios == 0) {
- /*
- * Only if we're going to lock these pages, we can find
- * nothing at @index.
- */
- ASSERT(page_ops & PAGE_LOCK);
- err = -EAGAIN;
- goto out;
- }
-
for (i = 0; i < found_folios; i++) {
- int process_ret;
struct folio *folio = fbatch.folios[i];
- process_ret = process_one_page(fs_info, mapping,
- &folio->page, locked_page, page_ops,
- start, end);
- if (process_ret < 0) {
- err = -EAGAIN;
- folio_batch_release(&fbatch);
- goto out;
- }
- pages_processed += folio_nr_pages(folio);
+
+ process_one_page(fs_info, &folio->page, locked_page,
+ page_ops, start, end);
}
folio_batch_release(&fbatch);
cond_resched();
}
-out:
- if (err && processed_end) {
- /*
- * Update @processed_end. I know this is awful since it has
- * two different return value patterns (inclusive vs exclusive).
- *
- * But the exclusive pattern is necessary if @start is 0, or we
- * underflow and check against processed_end won't work as
- * expected.
- */
- if (pages_processed)
- *processed_end = min(end,
- ((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
- else
- *processed_end = start;
- }
- return err;
}
static noinline void __unlock_for_delalloc(struct inode *inode,
@@ -326,29 +243,63 @@ static noinline void __unlock_for_delalloc(struct inode *inode,
return;
__process_pages_contig(inode->i_mapping, locked_page, start, end,
- PAGE_UNLOCK, NULL);
+ PAGE_UNLOCK);
}
static noinline int lock_delalloc_pages(struct inode *inode,
struct page *locked_page,
- u64 delalloc_start,
- u64 delalloc_end)
+ u64 start,
+ u64 end)
{
- unsigned long index = delalloc_start >> PAGE_SHIFT;
- unsigned long end_index = delalloc_end >> PAGE_SHIFT;
- u64 processed_end = delalloc_start;
- int ret;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct address_space *mapping = inode->i_mapping;
+ pgoff_t start_index = start >> PAGE_SHIFT;
+ pgoff_t end_index = end >> PAGE_SHIFT;
+ pgoff_t index = start_index;
+ u64 processed_end = start;
+ struct folio_batch fbatch;
- ASSERT(locked_page);
if (index == locked_page->index && index == end_index)
return 0;
- ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
- delalloc_end, PAGE_LOCK, &processed_end);
- if (ret == -EAGAIN && processed_end > delalloc_start)
- __unlock_for_delalloc(inode, locked_page, delalloc_start,
- processed_end);
- return ret;
+ folio_batch_init(&fbatch);
+ while (index <= end_index) {
+ unsigned int found_folios, i;
+
+ found_folios = filemap_get_folios_contig(mapping, &index,
+ end_index, &fbatch);
+ if (found_folios == 0)
+ goto out;
+
+ for (i = 0; i < found_folios; i++) {
+ struct page *page = &fbatch.folios[i]->page;
+ u32 len = end + 1 - start;
+
+ if (page == locked_page)
+ continue;
+
+ if (btrfs_page_start_writer_lock(fs_info, page, start,
+ len))
+ goto out;
+
+ if (!PageDirty(page) || page->mapping != mapping) {
+ btrfs_page_end_writer_lock(fs_info, page, start,
+ len);
+ goto out;
+ }
+
+ processed_end = page_offset(page) + PAGE_SIZE - 1;
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+
+ return 0;
+out:
+ folio_batch_release(&fbatch);
+ if (processed_end > start)
+ __unlock_for_delalloc(inode, locked_page, start, processed_end);
+ return -EAGAIN;
}
/*
@@ -467,7 +418,7 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
- start, end, page_ops, NULL);
+ start, end, page_ops);
}
static bool btrfs_verify_page(struct page *page, u64 start)
@@ -497,31 +448,6 @@ static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
btrfs_subpage_end_reader(fs_info, page, start, len);
}
-/* lots and lots of room for performance fixes in the end_bio funcs */
-
-void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
-{
- struct btrfs_inode *inode;
- const bool uptodate = (err == 0);
- int ret = 0;
-
- ASSERT(page && page->mapping);
- inode = BTRFS_I(page->mapping->host);
- btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
-
- if (!uptodate) {
- const struct btrfs_fs_info *fs_info = inode->root->fs_info;
- u32 len;
-
- ASSERT(end + 1 - start <= U32_MAX);
- len = end + 1 - start;
-
- btrfs_page_clear_uptodate(fs_info, page, start, len);
- ret = err < 0 ? err : -EIO;
- mapping_set_error(page->mapping, ret);
- }
-}
-
/*
* after a writepage IO is done, we need to:
* clear the uptodate bits on error
@@ -1243,38 +1169,45 @@ static inline void contiguous_readpages(struct page *pages[], int nr_pages,
static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
struct page *page, struct writeback_control *wbc)
{
- const u64 page_end = page_offset(page) + PAGE_SIZE - 1;
- u64 delalloc_start = page_offset(page);
+ const u64 page_start = page_offset(page);
+ const u64 page_end = page_start + PAGE_SIZE - 1;
+ u64 delalloc_start = page_start;
+ u64 delalloc_end = page_end;
u64 delalloc_to_write = 0;
- /* How many pages are started by btrfs_run_delalloc_range() */
- unsigned long nr_written = 0;
- int ret;
- int page_started = 0;
+ int ret = 0;
while (delalloc_start < page_end) {
- u64 delalloc_end = page_end;
- bool found;
-
- found = find_lock_delalloc_range(&inode->vfs_inode, page,
- &delalloc_start,
- &delalloc_end);
- if (!found) {
+ delalloc_end = page_end;
+ if (!find_lock_delalloc_range(&inode->vfs_inode, page,
+ &delalloc_start, &delalloc_end)) {
delalloc_start = delalloc_end + 1;
continue;
}
+
ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
- delalloc_end, &page_started, &nr_written, wbc);
- if (ret)
+ delalloc_end, wbc);
+ if (ret < 0)
return ret;
- /*
- * delalloc_end is already one less than the total length, so
- * we don't subtract one from PAGE_SIZE
- */
- delalloc_to_write += (delalloc_end - delalloc_start +
- PAGE_SIZE) >> PAGE_SHIFT;
delalloc_start = delalloc_end + 1;
}
+
+ /*
+ * delalloc_end is already one less than the total length, so
+ * we don't subtract one from PAGE_SIZE
+ */
+ delalloc_to_write +=
+ DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
+
+ /*
+ * If btrfs_run_dealloc_range() already started I/O and unlocked
+ * the pages, we just need to account for them here.
+ */
+ if (ret == 1) {
+ wbc->nr_to_write -= delalloc_to_write;
+ return 1;
+ }
+
if (wbc->nr_to_write < delalloc_to_write) {
int thresh = 8192;
@@ -1284,16 +1217,6 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
thresh);
}
- /* Did btrfs_run_dealloc_range() already unlock and start the IO? */
- if (page_started) {
- /*
- * We've unlocked the page, so we can't update the mapping's
- * writeback index, just update nr_to_write.
- */
- wbc->nr_to_write -= nr_written;
- return 1;
- }
-
return 0;
}
@@ -1382,6 +1305,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
bio_ctrl->end_io_func = end_bio_extent_writepage;
while (cur <= end) {
+ u32 len = end - cur + 1;
u64 disk_bytenr;
u64 em_end;
u64 dirty_range_start = cur;
@@ -1389,8 +1313,8 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
u32 iosize;
if (cur >= i_size) {
- btrfs_writepage_endio_finish_ordered(inode, page, cur,
- end, true);
+ btrfs_mark_ordered_io_finished(inode, page, cur, len,
+ true);
/*
* This range is beyond i_size, thus we don't need to
* bother writing back.
@@ -1399,7 +1323,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
* writeback the sectors with subpage dirty bits,
* causing writeback without ordered extent.
*/
- btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
+ btrfs_page_clear_dirty(fs_info, page, cur, len);
break;
}
@@ -1410,7 +1334,7 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
continue;
}
- em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
+ em = btrfs_get_extent(inode, NULL, 0, cur, len);
if (IS_ERR(em)) {
ret = PTR_ERR_OR_ZERO(em);
goto out_error;
@@ -1486,7 +1410,6 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl
struct folio *folio = page_folio(page);
struct inode *inode = page->mapping->host;
const u64 page_start = page_offset(page);
- const u64 page_end = page_start + PAGE_SIZE - 1;
int ret;
int nr = 0;
size_t pg_offset;
@@ -1530,8 +1453,13 @@ done:
set_page_writeback(page);
end_page_writeback(page);
}
- if (ret)
- end_extent_writepage(page, ret, page_start, page_end);
+ if (ret) {
+ btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
+ PAGE_SIZE, !ret);
+ btrfs_page_clear_uptodate(btrfs_sb(inode->i_sb), page,
+ page_start, PAGE_SIZE);
+ mapping_set_error(page->mapping, ret);
+ }
unlock_page(page);
ASSERT(ret <= 0);
return ret;
@@ -1877,11 +1805,10 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
* previous call.
* Return <0 for fatal error.
*/
-static int submit_eb_page(struct page *page, struct writeback_control *wbc,
- struct extent_buffer **eb_context)
+static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
{
+ struct writeback_control *wbc = ctx->wbc;
struct address_space *mapping = page->mapping;
- struct btrfs_block_group *cache = NULL;
struct extent_buffer *eb;
int ret;
@@ -1908,7 +1835,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
return 0;
}
- if (eb == *eb_context) {
+ if (eb == ctx->eb) {
spin_unlock(&mapping->private_lock);
return 0;
}
@@ -1917,34 +1844,25 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
if (!ret)
return 0;
- if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
- /*
- * If for_sync, this hole will be filled with
- * trasnsaction commit.
- */
- if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
- ret = -EAGAIN;
- else
+ ctx->eb = eb;
+
+ ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
+ if (ret) {
+ if (ret == -EBUSY)
ret = 0;
free_extent_buffer(eb);
return ret;
}
- *eb_context = eb;
-
if (!lock_extent_buffer_for_io(eb, wbc)) {
- btrfs_revert_meta_write_pointer(cache, eb);
- if (cache)
- btrfs_put_block_group(cache);
free_extent_buffer(eb);
return 0;
}
- if (cache) {
- /*
- * Implies write in zoned mode. Mark the last eb in a block group.
- */
- btrfs_schedule_zone_finish_bg(cache, eb);
- btrfs_put_block_group(cache);
+ /* Implies write in zoned mode. */
+ if (ctx->zoned_bg) {
+ /* Mark the last eb in the block group. */
+ btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
+ ctx->zoned_bg->meta_write_pointer += eb->len;
}
write_one_eb(eb, wbc);
free_extent_buffer(eb);
@@ -1954,7 +1872,7 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
- struct extent_buffer *eb_context = NULL;
+ struct btrfs_eb_write_context ctx = { .wbc = wbc };
struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
int ret = 0;
int done = 0;
@@ -1996,7 +1914,7 @@ retry:
for (i = 0; i < nr_folios; i++) {
struct folio *folio = fbatch.folios[i];
- ret = submit_eb_page(&folio->page, wbc, &eb_context);
+ ret = submit_eb_page(&folio->page, &ctx);
if (ret == 0)
continue;
if (ret < 0) {
@@ -2057,6 +1975,9 @@ retry:
ret = 0;
if (!ret && BTRFS_FS_ERROR(fs_info))
ret = -EROFS;
+
+ if (ctx.zoned_bg)
+ btrfs_put_block_group(ctx.zoned_bg);
btrfs_zoned_meta_io_unlock(fs_info);
return ret;
}
@@ -2150,7 +2071,7 @@ retry:
for (i = 0; i < nr_folios; i++) {
struct folio *folio = fbatch.folios[i];
- done_index = folio->index + folio_nr_pages(folio);
+ done_index = folio_next_index(folio);
/*
* At this point we hold neither the i_pages lock nor
* the page lock: the page may be truncated or
@@ -2233,11 +2154,11 @@ retry:
* already been ran (aka, ordered extent inserted) and all pages are still
* locked.
*/
-int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
- struct writeback_control *wbc)
+void extent_write_locked_range(struct inode *inode, struct page *locked_page,
+ u64 start, u64 end, struct writeback_control *wbc,
+ bool pages_dirty)
{
bool found_error = false;
- int first_error = 0;
int ret = 0;
struct address_space *mapping = inode->i_mapping;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -2256,18 +2177,16 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
while (cur <= end) {
u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
+ u32 cur_len = cur_end + 1 - cur;
struct page *page;
int nr = 0;
page = find_get_page(mapping, cur >> PAGE_SHIFT);
- /*
- * All pages in the range are locked since
- * btrfs_run_delalloc_range(), thus there is no way to clear
- * the page dirty flag.
- */
ASSERT(PageLocked(page));
- ASSERT(PageDirty(page));
- clear_page_dirty_for_io(page);
+ if (pages_dirty && page != locked_page) {
+ ASSERT(PageDirty(page));
+ clear_page_dirty_for_io(page);
+ }
ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
i_size, &nr);
@@ -2279,23 +2198,21 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
set_page_writeback(page);
end_page_writeback(page);
}
- if (ret)
- end_extent_writepage(page, ret, cur, cur_end);
- btrfs_page_unlock_writer(fs_info, page, cur, cur_end + 1 - cur);
- if (ret < 0) {
- found_error = true;
- first_error = ret;
+ if (ret) {
+ btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
+ cur, cur_len, !ret);
+ btrfs_page_clear_uptodate(fs_info, page, cur, cur_len);
+ mapping_set_error(page->mapping, ret);
}
+ btrfs_page_unlock_writer(fs_info, page, cur, cur_len);
+ if (ret < 0)
+ found_error = true;
next_page:
put_page(page);
cur = cur_end + 1;
}
submit_write_bio(&bio_ctrl, found_error ? ret : 0);
-
- if (found_error)
- return first_error;
- return ret;
}
int extent_writepages(struct address_space *mapping,
@@ -3315,8 +3232,8 @@ struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
return NULL;
}
WARN_ON(PageDirty(p));
- copy_page(page_address(p), page_address(src->pages[i]));
}
+ copy_extent_buffer_full(new, src);
set_extent_buffer_uptodate(new);
return new;
@@ -3559,6 +3476,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
struct extent_buffer *exists = NULL;
struct page *p;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
+ struct btrfs_subpage *prealloc = NULL;
u64 lockdep_owner = owner_root;
int uptodate = 1;
int ret;
@@ -3595,36 +3513,30 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
num_pages = num_extent_pages(eb);
- for (i = 0; i < num_pages; i++, index++) {
- struct btrfs_subpage *prealloc = NULL;
+ /*
+ * Preallocate page->private for subpage case, so that we won't
+ * allocate memory with private_lock nor page lock hold.
+ *
+ * The memory will be freed by attach_extent_buffer_page() or freed
+ * manually if we exit earlier.
+ */
+ if (fs_info->nodesize < PAGE_SIZE) {
+ prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
+ if (IS_ERR(prealloc)) {
+ exists = ERR_CAST(prealloc);
+ goto free_eb;
+ }
+ }
+
+ for (i = 0; i < num_pages; i++, index++) {
p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
if (!p) {
exists = ERR_PTR(-ENOMEM);
+ btrfs_free_subpage(prealloc);
goto free_eb;
}
- /*
- * Preallocate page->private for subpage case, so that we won't
- * allocate memory with private_lock hold. The memory will be
- * freed by attach_extent_buffer_page() or freed manually if
- * we exit earlier.
- *
- * Although we have ensured one subpage eb can only have one
- * page, but it may change in the future for 16K page size
- * support, so we still preallocate the memory in the loop.
- */
- if (fs_info->nodesize < PAGE_SIZE) {
- prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
- if (IS_ERR(prealloc)) {
- ret = PTR_ERR(prealloc);
- unlock_page(p);
- put_page(p);
- exists = ERR_PTR(ret);
- goto free_eb;
- }
- }
-
spin_lock(&mapping->private_lock);
exists = grab_extent_buffer(fs_info, p);
if (exists) {
@@ -4210,30 +4122,9 @@ static void assert_eb_page_uptodate(const struct extent_buffer *eb,
}
}
-void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
- const void *srcv)
-{
- char *kaddr;
-
- assert_eb_page_uptodate(eb, eb->pages[0]);
- kaddr = page_address(eb->pages[0]) +
- get_eb_offset_in_page(eb, offsetof(struct btrfs_header,
- chunk_tree_uuid));
- memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
-}
-
-void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
-{
- char *kaddr;
-
- assert_eb_page_uptodate(eb, eb->pages[0]);
- kaddr = page_address(eb->pages[0]) +
- get_eb_offset_in_page(eb, offsetof(struct btrfs_header, fsid));
- memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
-}
-
-void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
- unsigned long start, unsigned long len)
+static void __write_extent_buffer(const struct extent_buffer *eb,
+ const void *srcv, unsigned long start,
+ unsigned long len, bool use_memmove)
{
size_t cur;
size_t offset;
@@ -4241,6 +4132,8 @@ void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
char *kaddr;
char *src = (char *)srcv;
unsigned long i = get_eb_page_index(start);
+ /* For unmapped (dummy) ebs, no need to check their uptodate status. */
+ const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
@@ -4251,11 +4144,15 @@ void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
while (len > 0) {
page = eb->pages[i];
- assert_eb_page_uptodate(eb, page);
+ if (check_uptodate)
+ assert_eb_page_uptodate(eb, page);
cur = min(len, PAGE_SIZE - offset);
kaddr = page_address(page);
- memcpy(kaddr + offset, src, cur);
+ if (use_memmove)
+ memmove(kaddr + offset, src, cur);
+ else
+ memcpy(kaddr + offset, src, cur);
src += cur;
len -= cur;
@@ -4264,55 +4161,54 @@ void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
}
}
-void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
- unsigned long len)
+void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
+ unsigned long start, unsigned long len)
{
- size_t cur;
- size_t offset;
- struct page *page;
- char *kaddr;
- unsigned long i = get_eb_page_index(start);
+ return __write_extent_buffer(eb, srcv, start, len, false);
+}
- if (check_eb_range(eb, start, len))
- return;
+static void memset_extent_buffer(const struct extent_buffer *eb, int c,
+ unsigned long start, unsigned long len)
+{
+ unsigned long cur = start;
- offset = get_eb_offset_in_page(eb, start);
+ while (cur < start + len) {
+ unsigned long index = get_eb_page_index(cur);
+ unsigned int offset = get_eb_offset_in_page(eb, cur);
+ unsigned int cur_len = min(start + len - cur, PAGE_SIZE - offset);
+ struct page *page = eb->pages[index];
- while (len > 0) {
- page = eb->pages[i];
assert_eb_page_uptodate(eb, page);
+ memset(page_address(page) + offset, c, cur_len);
- cur = min(len, PAGE_SIZE - offset);
- kaddr = page_address(page);
- memset(kaddr + offset, 0, cur);
-
- len -= cur;
- offset = 0;
- i++;
+ cur += cur_len;
}
}
+void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
+ unsigned long len)
+{
+ if (check_eb_range(eb, start, len))
+ return;
+ return memset_extent_buffer(eb, 0, start, len);
+}
+
void copy_extent_buffer_full(const struct extent_buffer *dst,
const struct extent_buffer *src)
{
- int i;
- int num_pages;
+ unsigned long cur = 0;
ASSERT(dst->len == src->len);
- if (dst->fs_info->nodesize >= PAGE_SIZE) {
- num_pages = num_extent_pages(dst);
- for (i = 0; i < num_pages; i++)
- copy_page(page_address(dst->pages[i]),
- page_address(src->pages[i]));
- } else {
- size_t src_offset = get_eb_offset_in_page(src, 0);
- size_t dst_offset = get_eb_offset_in_page(dst, 0);
+ while (cur < src->len) {
+ unsigned long index = get_eb_page_index(cur);
+ unsigned long offset = get_eb_offset_in_page(src, cur);
+ unsigned long cur_len = min(src->len, PAGE_SIZE - offset);
+ void *addr = page_address(src->pages[index]) + offset;
+
+ write_extent_buffer(dst, addr, cur, cur_len);
- ASSERT(src->fs_info->nodesize < PAGE_SIZE);
- memcpy(page_address(dst->pages[0]) + dst_offset,
- page_address(src->pages[0]) + src_offset,
- src->len);
+ cur += cur_len;
}
}
@@ -4406,6 +4302,15 @@ int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
}
+static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
+{
+ unsigned long index = get_eb_page_index(bytenr);
+
+ if (check_eb_range(eb, bytenr, 1))
+ return NULL;
+ return page_address(eb->pages[index]) + get_eb_offset_in_page(eb, bytenr);
+}
+
/*
* Set an area of a bitmap to 1.
*
@@ -4417,35 +4322,28 @@ int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
unsigned long pos, unsigned long len)
{
+ unsigned int first_byte = start + BIT_BYTE(pos);
+ unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
+ const bool same_byte = (first_byte == last_byte);
+ u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
u8 *kaddr;
- struct page *page;
- unsigned long i;
- size_t offset;
- const unsigned int size = pos + len;
- int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
- u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
- eb_bitmap_offset(eb, start, pos, &i, &offset);
- page = eb->pages[i];
- assert_eb_page_uptodate(eb, page);
- kaddr = page_address(page);
+ if (same_byte)
+ mask &= BITMAP_LAST_BYTE_MASK(pos + len);
- while (len >= bits_to_set) {
- kaddr[offset] |= mask_to_set;
- len -= bits_to_set;
- bits_to_set = BITS_PER_BYTE;
- mask_to_set = ~0;
- if (++offset >= PAGE_SIZE && len > 0) {
- offset = 0;
- page = eb->pages[++i];
- assert_eb_page_uptodate(eb, page);
- kaddr = page_address(page);
- }
- }
- if (len) {
- mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
- kaddr[offset] |= mask_to_set;
- }
+ /* Handle the first byte. */
+ kaddr = extent_buffer_get_byte(eb, first_byte);
+ *kaddr |= mask;
+ if (same_byte)
+ return;
+
+ /* Handle the byte aligned part. */
+ ASSERT(first_byte + 1 <= last_byte);
+ memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
+
+ /* Handle the last byte. */
+ kaddr = extent_buffer_get_byte(eb, last_byte);
+ *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
}
@@ -4461,35 +4359,28 @@ void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
unsigned long start, unsigned long pos,
unsigned long len)
{
+ unsigned int first_byte = start + BIT_BYTE(pos);
+ unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
+ const bool same_byte = (first_byte == last_byte);
+ u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
u8 *kaddr;
- struct page *page;
- unsigned long i;
- size_t offset;
- const unsigned int size = pos + len;
- int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
- u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
- eb_bitmap_offset(eb, start, pos, &i, &offset);
- page = eb->pages[i];
- assert_eb_page_uptodate(eb, page);
- kaddr = page_address(page);
+ if (same_byte)
+ mask &= BITMAP_LAST_BYTE_MASK(pos + len);
- while (len >= bits_to_clear) {
- kaddr[offset] &= ~mask_to_clear;
- len -= bits_to_clear;
- bits_to_clear = BITS_PER_BYTE;
- mask_to_clear = ~0;
- if (++offset >= PAGE_SIZE && len > 0) {
- offset = 0;
- page = eb->pages[++i];
- assert_eb_page_uptodate(eb, page);
- kaddr = page_address(page);
- }
- }
- if (len) {
- mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
- kaddr[offset] &= ~mask_to_clear;
- }
+ /* Handle the first byte. */
+ kaddr = extent_buffer_get_byte(eb, first_byte);
+ *kaddr &= ~mask;
+ if (same_byte)
+ return;
+
+ /* Handle the byte aligned part. */
+ ASSERT(first_byte + 1 <= last_byte);
+ memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
+
+ /* Handle the last byte. */
+ kaddr = extent_buffer_get_byte(eb, last_byte);
+ *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
}
static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
@@ -4498,60 +4389,29 @@ static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned
return distance < len;
}
-static void copy_pages(struct page *dst_page, struct page *src_page,
- unsigned long dst_off, unsigned long src_off,
- unsigned long len)
-{
- char *dst_kaddr = page_address(dst_page);
- char *src_kaddr;
- int must_memmove = 0;
-
- if (dst_page != src_page) {
- src_kaddr = page_address(src_page);
- } else {
- src_kaddr = dst_kaddr;
- if (areas_overlap(src_off, dst_off, len))
- must_memmove = 1;
- }
-
- if (must_memmove)
- memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
- else
- memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
-}
-
void memcpy_extent_buffer(const struct extent_buffer *dst,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
{
- size_t cur;
- size_t dst_off_in_page;
- size_t src_off_in_page;
- unsigned long dst_i;
- unsigned long src_i;
+ unsigned long cur_off = 0;
if (check_eb_range(dst, dst_offset, len) ||
check_eb_range(dst, src_offset, len))
return;
- while (len > 0) {
- dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
- src_off_in_page = get_eb_offset_in_page(dst, src_offset);
-
- dst_i = get_eb_page_index(dst_offset);
- src_i = get_eb_page_index(src_offset);
-
- cur = min(len, (unsigned long)(PAGE_SIZE -
- src_off_in_page));
- cur = min_t(unsigned long, cur,
- (unsigned long)(PAGE_SIZE - dst_off_in_page));
-
- copy_pages(dst->pages[dst_i], dst->pages[src_i],
- dst_off_in_page, src_off_in_page, cur);
-
- src_offset += cur;
- dst_offset += cur;
- len -= cur;
+ while (cur_off < len) {
+ unsigned long cur_src = cur_off + src_offset;
+ unsigned long pg_index = get_eb_page_index(cur_src);
+ unsigned long pg_off = get_eb_offset_in_page(dst, cur_src);
+ unsigned long cur_len = min(src_offset + len - cur_src,
+ PAGE_SIZE - pg_off);
+ void *src_addr = page_address(dst->pages[pg_index]) + pg_off;
+ const bool use_memmove = areas_overlap(src_offset + cur_off,
+ dst_offset + cur_off, cur_len);
+
+ __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
+ use_memmove);
+ cur_off += cur_len;
}
}
@@ -4559,23 +4419,26 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
{
- size_t cur;
- size_t dst_off_in_page;
- size_t src_off_in_page;
unsigned long dst_end = dst_offset + len - 1;
unsigned long src_end = src_offset + len - 1;
- unsigned long dst_i;
- unsigned long src_i;
if (check_eb_range(dst, dst_offset, len) ||
check_eb_range(dst, src_offset, len))
return;
+
if (dst_offset < src_offset) {
memcpy_extent_buffer(dst, dst_offset, src_offset, len);
return;
}
+
while (len > 0) {
- dst_i = get_eb_page_index(dst_end);
+ unsigned long src_i;
+ size_t cur;
+ size_t dst_off_in_page;
+ size_t src_off_in_page;
+ void *src_addr;
+ bool use_memmove;
+
src_i = get_eb_page_index(src_end);
dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
@@ -4583,9 +4446,14 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
cur = min_t(unsigned long, len, src_off_in_page + 1);
cur = min(cur, dst_off_in_page + 1);
- copy_pages(dst->pages[dst_i], dst->pages[src_i],
- dst_off_in_page - cur + 1,
- src_off_in_page - cur + 1, cur);
+
+ src_addr = page_address(dst->pages[src_i]) + src_off_in_page -
+ cur + 1;
+ use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
+ cur);
+
+ __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
+ use_memmove);
dst_end -= cur;
src_end -= cur;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index c5fae3a7d911..68368ba99321 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -40,7 +40,6 @@ enum {
ENUM_BIT(PAGE_START_WRITEBACK),
ENUM_BIT(PAGE_END_WRITEBACK),
ENUM_BIT(PAGE_SET_ORDERED),
- ENUM_BIT(PAGE_LOCK),
};
/*
@@ -94,6 +93,13 @@ struct extent_buffer {
#endif
};
+struct btrfs_eb_write_context {
+ struct writeback_control *wbc;
+ struct extent_buffer *eb;
+ /* Block group @eb resides in. Only used for zoned mode. */
+ struct btrfs_block_group *zoned_bg;
+};
+
/*
* Get the correct offset inside the page of extent buffer.
*
@@ -178,8 +184,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page);
int btrfs_read_folio(struct file *file, struct folio *folio);
-int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
- struct writeback_control *wbc);
+void extent_write_locked_range(struct inode *inode, struct page *locked_page,
+ u64 start, u64 end, struct writeback_control *wbc,
+ bool pages_dirty);
int extent_writepages(struct address_space *mapping,
struct writeback_control *wbc);
int btree_write_cache_pages(struct address_space *mapping,
@@ -236,11 +243,24 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dst,
int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
void __user *dst, unsigned long start,
unsigned long len);
-void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src);
-void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
- const void *src);
void write_extent_buffer(const struct extent_buffer *eb, const void *src,
unsigned long start, unsigned long len);
+
+static inline void write_extent_buffer_chunk_tree_uuid(
+ const struct extent_buffer *eb, const void *chunk_tree_uuid)
+{
+ write_extent_buffer(eb, chunk_tree_uuid,
+ offsetof(struct btrfs_header, chunk_tree_uuid),
+ BTRFS_FSID_SIZE);
+}
+
+static inline void write_extent_buffer_fsid(const struct extent_buffer *eb,
+ const void *fsid)
+{
+ write_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
+ BTRFS_FSID_SIZE);
+}
+
void copy_extent_buffer_full(const struct extent_buffer *dst,
const struct extent_buffer *src);
void copy_extent_buffer(const struct extent_buffer *dst,
@@ -266,7 +286,6 @@ void set_extent_buffer_dirty(struct extent_buffer *eb);
void set_extent_buffer_uptodate(struct extent_buffer *eb);
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
-void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
struct page *locked_page,
u32 bits_to_clear, unsigned long page_ops);
@@ -277,8 +296,6 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array);
-void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
-
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
bool find_lock_delalloc_range(struct inode *inode,
struct page *locked_page, u64 *start,
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 696bf695d8eb..1ce5dd154499 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -597,29 +597,37 @@ fail:
* Each bit represents a sector. Thus caller should ensure @csum_buf passed
* in is large enough to contain all csums.
*/
-int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
- u8 *csum_buf, unsigned long *csum_bitmap,
- bool search_commit)
+int btrfs_lookup_csums_bitmap(struct btrfs_root *root, struct btrfs_path *path,
+ u64 start, u64 end, u8 *csum_buf,
+ unsigned long *csum_bitmap)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
- struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_csum_item *item;
const u64 orig_start = start;
+ bool free_path = false;
int ret;
ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
IS_ALIGNED(end + 1, fs_info->sectorsize));
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+ if (!path) {
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ free_path = true;
+ }
- if (search_commit) {
- path->skip_locking = 1;
- path->reada = READA_FORWARD;
- path->search_commit_root = 1;
+ /* Check if we can reuse the previous path. */
+ if (path->nodes[0]) {
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+
+ if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
+ key.type == BTRFS_EXTENT_CSUM_KEY &&
+ key.offset <= start)
+ goto search_forward;
+ btrfs_release_path(path);
}
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
@@ -656,6 +664,7 @@ int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
}
}
+search_forward:
while (start <= end) {
u64 csum_end;
@@ -712,7 +721,8 @@ int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
}
ret = 0;
fail:
- btrfs_free_path(path);
+ if (free_path)
+ btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/file-item.h b/fs/btrfs/file-item.h
index 4ec669b69008..04bd2d34efb1 100644
--- a/fs/btrfs/file-item.h
+++ b/fs/btrfs/file-item.h
@@ -57,9 +57,9 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit,
bool nowait);
-int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
- u8 *csum_buf, unsigned long *csum_bitmap,
- bool search_commit);
+int btrfs_lookup_csums_bitmap(struct btrfs_root *root, struct btrfs_path *path,
+ u64 start, u64 end, u8 *csum_buf,
+ unsigned long *csum_bitmap);
void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
const struct btrfs_path *path,
struct btrfs_file_extent_item *fi,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index fd03e689a6be..ca46a529d56b 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -876,9 +876,9 @@ static int prepare_uptodate_page(struct inode *inode,
return 0;
}
-static unsigned int get_prepare_fgp_flags(bool nowait)
+static fgf_t get_prepare_fgp_flags(bool nowait)
{
- unsigned int fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
+ fgf_t fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
if (nowait)
fgp_flags |= FGP_NOWAIT;
@@ -910,7 +910,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
int i;
unsigned long index = pos >> PAGE_SHIFT;
gfp_t mask = get_prepare_gfp_flags(inode, nowait);
- unsigned int fgp_flags = get_prepare_fgp_flags(nowait);
+ fgf_t fgp_flags = get_prepare_fgp_flags(nowait);
int err = 0;
int faili;
@@ -1106,24 +1106,6 @@ void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
btrfs_drew_write_unlock(&inode->root->snapshot_lock);
}
-static void update_time_for_write(struct inode *inode)
-{
- struct timespec64 now;
-
- if (IS_NOCMTIME(inode))
- return;
-
- now = current_time(inode);
- if (!timespec64_equal(&inode->i_mtime, &now))
- inode->i_mtime = now;
-
- if (!timespec64_equal(&inode->i_ctime, &now))
- inode->i_ctime = now;
-
- if (IS_I_VERSION(inode))
- inode_inc_iversion(inode);
-}
-
static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
size_t count)
{
@@ -1155,7 +1137,10 @@ static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
* need to start yet another transaction to update the inode as we will
* update the inode when we finish writing whatever data we write.
*/
- update_time_for_write(inode);
+ if (!IS_NOCMTIME(inode)) {
+ inode->i_mtime = inode_set_ctime_current(inode);
+ inode_inc_iversion(inode);
+ }
start_pos = round_down(pos, fs_info->sectorsize);
oldsize = i_size_read(inode);
@@ -2459,10 +2444,8 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
*/
inode_inc_iversion(&inode->vfs_inode);
- if (!extent_info || extent_info->update_times) {
- inode->vfs_inode.i_mtime = current_time(&inode->vfs_inode);
- inode->vfs_inode.i_ctime = inode->vfs_inode.i_mtime;
- }
+ if (!extent_info || extent_info->update_times)
+ inode->vfs_inode.i_mtime = inode_set_ctime_current(&inode->vfs_inode);
ret = btrfs_update_inode(trans, root, inode);
if (ret)
@@ -2703,8 +2686,7 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
ASSERT(trans != NULL);
inode_inc_iversion(inode);
- inode->i_mtime = current_time(inode);
- inode->i_ctime = inode->i_mtime;
+ inode->i_mtime = inode_set_ctime_current(inode);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
updated_inode = true;
btrfs_end_transaction(trans);
@@ -2721,11 +2703,10 @@ out_only_mutex:
* for detecting, at fsync time, if the inode isn't yet in the
* log tree or it's there but not up to date.
*/
- struct timespec64 now = current_time(inode);
+ struct timespec64 now = inode_set_ctime_current(inode);
inode_inc_iversion(inode);
inode->i_mtime = now;
- inode->i_ctime = now;
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
@@ -2796,7 +2777,7 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
if (IS_ERR(trans))
return PTR_ERR(trans);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
i_size_write(inode, end);
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
@@ -3018,7 +2999,7 @@ static long btrfs_fallocate(struct file *file, int mode,
struct extent_changeset *data_reserved = NULL;
struct falloc_range *range;
struct falloc_range *tmp;
- struct list_head reserve_list;
+ LIST_HEAD(reserve_list);
u64 cur_offset;
u64 last_byte;
u64 alloc_start;
@@ -3110,7 +3091,6 @@ static long btrfs_fallocate(struct file *file, int mode,
btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
/* First, check if we exceed the qgroup limit */
- INIT_LIST_HEAD(&reserve_list);
while (cur_offset < alloc_end) {
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
alloc_end - cur_offset);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 880800418075..27fad70451aa 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1219,10 +1219,9 @@ static noinline_for_stack int write_pinned_extent_entries(
start = block_group->start;
while (start < block_group->start + block_group->length) {
- ret = find_first_extent_bit(unpin, start,
- &extent_start, &extent_end,
- EXTENT_DIRTY, NULL);
- if (ret)
+ if (!find_first_extent_bit(unpin, start,
+ &extent_start, &extent_end,
+ EXTENT_DIRTY, NULL))
return 0;
/* This pinned extent is out of our range */
@@ -2705,13 +2704,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
spin_lock(&ctl->tree_lock);
- /* Count initial region as zone_unusable until it gets activated. */
if (!used)
to_free = size;
- else if (initial &&
- test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &block_group->fs_info->flags) &&
- (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
- to_free = 0;
else if (initial)
to_free = block_group->zone_capacity;
else if (offset >= block_group->alloc_offset)
@@ -2739,8 +2733,7 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
reclaimable_unusable = block_group->zone_unusable -
(block_group->length - block_group->zone_capacity);
/* All the region is now unusable. Mark it as unused and reclaim */
- if (block_group->zone_unusable == block_group->length &&
- block_group->alloc_offset) {
+ if (block_group->zone_unusable == block_group->length) {
btrfs_mark_bg_unused(block_group);
} else if (bg_reclaim_threshold &&
reclaimable_unusable >=
@@ -2944,7 +2937,8 @@ void btrfs_dump_free_space(struct btrfs_block_group *block_group,
btrfs_info(fs_info, "block group has cluster?: %s",
list_empty(&block_group->cluster_list) ? "no" : "yes");
btrfs_info(fs_info,
- "%d blocks of free space at or bigger than bytes is", count);
+ "%d free space entries at or bigger than %llu bytes",
+ count, bytes);
}
void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group,
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index f169378e2ca6..c0e734082dcc 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -1517,8 +1517,10 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
} else if (prev_bit == 1 && bit == 0) {
u64 space_added;
- ret = add_new_free_space(block_group, extent_start,
- offset, &space_added);
+ ret = btrfs_add_new_free_space(block_group,
+ extent_start,
+ offset,
+ &space_added);
if (ret)
goto out;
total_found += space_added;
@@ -1533,7 +1535,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
}
}
if (prev_bit == 1) {
- ret = add_new_free_space(block_group, extent_start, end, NULL);
+ ret = btrfs_add_new_free_space(block_group, extent_start, end, NULL);
if (ret)
goto out;
extent_count++;
@@ -1590,8 +1592,9 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
ASSERT(key.objectid < end && key.objectid + key.offset <= end);
- ret = add_new_free_space(block_group, key.objectid,
- key.objectid + key.offset, &space_added);
+ ret = btrfs_add_new_free_space(block_group, key.objectid,
+ key.objectid + key.offset,
+ &space_added);
if (ret)
goto out;
total_found += space_added;
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
index 203d2a267828..a523d64d5491 100644
--- a/fs/btrfs/fs.h
+++ b/fs/btrfs/fs.h
@@ -46,8 +46,6 @@ static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
* Runtime (in-memory) states of filesystem
*/
enum {
- /* Global indicator of serious filesystem errors */
- BTRFS_FS_STATE_ERROR,
/*
* Filesystem is being remounted, allow to skip some operations, like
* defrag
@@ -686,6 +684,12 @@ struct btrfs_fs_info {
bool qgroup_rescan_running;
u8 qgroup_drop_subtree_thres;
+ /*
+ * If this is not 0, then it indicates a serious filesystem error has
+ * happened and it contains that error (negative errno value).
+ */
+ int fs_error;
+
/* Filesystem state */
unsigned long fs_state;
@@ -766,6 +770,9 @@ struct btrfs_fs_info {
u64 data_reloc_bg;
struct mutex zoned_data_reloc_io_lock;
+ struct btrfs_block_group *active_meta_bg;
+ struct btrfs_block_group *active_system_bg;
+
u64 nr_global_roots;
spinlock_t zone_active_bgs_lock;
@@ -962,8 +969,8 @@ static inline void btrfs_wake_unfinished_drop(struct btrfs_fs_info *fs_info)
clear_and_wake_up_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags);
}
-#define BTRFS_FS_ERROR(fs_info) (unlikely(test_bit(BTRFS_FS_STATE_ERROR, \
- &(fs_info)->fs_state)))
+#define BTRFS_FS_ERROR(fs_info) (READ_ONCE((fs_info)->fs_error))
+
#define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \
(unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
&(fs_info)->fs_state)))
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index aa090b0b5d29..f09fbdc43f0f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -124,11 +124,11 @@ static struct kmem_cache *btrfs_inode_cachep;
static int btrfs_setsize(struct inode *inode, struct iattr *attr);
static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback);
-static noinline int cow_file_range(struct btrfs_inode *inode,
- struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written, int unlock,
- u64 *done_offset);
+
+static noinline int run_delalloc_cow(struct btrfs_inode *inode,
+ struct page *locked_page, u64 start,
+ u64 end, struct writeback_control *wbc,
+ bool pages_dirty);
static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
u64 len, u64 orig_start, u64 block_start,
u64 block_len, u64 orig_block_len,
@@ -423,11 +423,10 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
while (index <= end_index) {
/*
- * For locked page, we will call end_extent_writepage() on it
- * in run_delalloc_range() for the error handling. That
- * end_extent_writepage() function will call
- * btrfs_mark_ordered_io_finished() to clear page Ordered and
- * run the ordered extent accounting.
+ * For locked page, we will call btrfs_mark_ordered_io_finished
+ * through btrfs_mark_ordered_io_finished() on it
+ * in run_delalloc_range() for the error handling, which will
+ * clear page Ordered and run the ordered extent accounting.
*
* Here we can't just clear the Ordered bit, or
* btrfs_mark_ordered_io_finished() would skip the accounting
@@ -815,24 +814,22 @@ static inline void inode_should_defrag(struct btrfs_inode *inode,
}
/*
- * we create compressed extents in two phases. The first
- * phase compresses a range of pages that have already been
- * locked (both pages and state bits are locked).
+ * Work queue call back to started compression on a file and pages.
*
- * This is done inside an ordered work queue, and the compression
- * is spread across many cpus. The actual IO submission is step
- * two, and the ordered work queue takes care of making sure that
- * happens in the same order things were put onto the queue by
- * writepages and friends.
+ * This is done inside an ordered work queue, and the compression is spread
+ * across many cpus. The actual IO submission is step two, and the ordered work
+ * queue takes care of making sure that happens in the same order things were
+ * put onto the queue by writepages and friends.
*
- * If this code finds it can't get good compression, it puts an
- * entry onto the work queue to write the uncompressed bytes. This
- * makes sure that both compressed inodes and uncompressed inodes
- * are written in the same order that the flusher thread sent them
- * down.
+ * If this code finds it can't get good compression, it puts an entry onto the
+ * work queue to write the uncompressed bytes. This makes sure that both
+ * compressed inodes and uncompressed inodes are written in the same order that
+ * the flusher thread sent them down.
*/
-static noinline int compress_file_range(struct async_chunk *async_chunk)
+static void compress_file_range(struct btrfs_work *work)
{
+ struct async_chunk *async_chunk =
+ container_of(work, struct async_chunk, work);
struct btrfs_inode *inode = async_chunk->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct address_space *mapping = inode->vfs_inode.i_mapping;
@@ -842,19 +839,24 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
u64 actual_end;
u64 i_size;
int ret = 0;
- struct page **pages = NULL;
+ struct page **pages;
unsigned long nr_pages;
unsigned long total_compressed = 0;
unsigned long total_in = 0;
+ unsigned int poff;
int i;
- int will_compress;
int compress_type = fs_info->compress_type;
- int compressed_extents = 0;
- int redirty = 0;
inode_should_defrag(inode, start, end, end - start + 1, SZ_16K);
/*
+ * We need to call clear_page_dirty_for_io on each page in the range.
+ * Otherwise applications with the file mmap'd can wander in and change
+ * the page contents while we are compressing them.
+ */
+ extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
+
+ /*
* We need to save i_size before now because it could change in between
* us evaluating the size and assigning it. This is because we lock and
* unlock the page in truncate and fallocate, and then modify the i_size
@@ -868,7 +870,7 @@ static noinline int compress_file_range(struct async_chunk *async_chunk)
barrier();
actual_end = min_t(u64, i_size, end + 1);
again:
- will_compress = 0;
+ pages = NULL;
nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
nr_pages = min_t(unsigned long, nr_pages, BTRFS_MAX_COMPRESSED_PAGES);
@@ -912,78 +914,57 @@ again:
ret = 0;
/*
- * we do compression for mount -o compress and when the
- * inode has not been flagged as nocompress. This flag can
- * change at any time if we discover bad compression ratios.
+ * We do compression for mount -o compress and when the inode has not
+ * been flagged as NOCOMPRESS. This flag can change at any time if we
+ * discover bad compression ratios.
*/
- if (inode_need_compress(inode, start, end)) {
- WARN_ON(pages);
- pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
- if (!pages) {
- /* just bail out to the uncompressed code */
- nr_pages = 0;
- goto cont;
- }
-
- if (inode->defrag_compress)
- compress_type = inode->defrag_compress;
- else if (inode->prop_compress)
- compress_type = inode->prop_compress;
+ if (!inode_need_compress(inode, start, end))
+ goto cleanup_and_bail_uncompressed;
+ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
+ if (!pages) {
/*
- * we need to call clear_page_dirty_for_io on each
- * page in the range. Otherwise applications with the file
- * mmap'd can wander in and change the page contents while
- * we are compressing them.
- *
- * If the compression fails for any reason, we set the pages
- * dirty again later on.
- *
- * Note that the remaining part is redirtied, the start pointer
- * has moved, the end is the original one.
+ * Memory allocation failure is not a fatal error, we can fall
+ * back to uncompressed code.
*/
- if (!redirty) {
- extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end);
- redirty = 1;
- }
+ goto cleanup_and_bail_uncompressed;
+ }
- /* Compression level is applied here and only here */
- ret = btrfs_compress_pages(
- compress_type | (fs_info->compress_level << 4),
- mapping, start,
- pages,
- &nr_pages,
- &total_in,
- &total_compressed);
+ if (inode->defrag_compress)
+ compress_type = inode->defrag_compress;
+ else if (inode->prop_compress)
+ compress_type = inode->prop_compress;
+
+ /* Compression level is applied here. */
+ ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4),
+ mapping, start, pages, &nr_pages, &total_in,
+ &total_compressed);
+ if (ret)
+ goto mark_incompressible;
- if (!ret) {
- unsigned long offset = offset_in_page(total_compressed);
- struct page *page = pages[nr_pages - 1];
+ /*
+ * Zero the tail end of the last page, as we might be sending it down
+ * to disk.
+ */
+ poff = offset_in_page(total_compressed);
+ if (poff)
+ memzero_page(pages[nr_pages - 1], poff, PAGE_SIZE - poff);
- /* zero the tail end of the last page, we might be
- * sending it down to disk
- */
- if (offset)
- memzero_page(page, offset, PAGE_SIZE - offset);
- will_compress = 1;
- }
- }
-cont:
/*
+ * Try to create an inline extent.
+ *
+ * If we didn't compress the entire range, try to create an uncompressed
+ * inline extent, else a compressed one.
+ *
* Check cow_file_range() for why we don't even try to create inline
- * extent for subpage case.
+ * extent for the subpage case.
*/
if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
- /* lets try to make an inline extent */
- if (ret || total_in < actual_end) {
- /* we didn't compress the entire range, try
- * to make an uncompressed inline extent.
- */
- ret = cow_file_range_inline(inode, actual_end,
- 0, BTRFS_COMPRESS_NONE,
- NULL, false);
+ if (total_in < actual_end) {
+ ret = cow_file_range_inline(inode, actual_end, 0,
+ BTRFS_COMPRESS_NONE, NULL,
+ false);
} else {
- /* try making a compressed inline extent */
ret = cow_file_range_inline(inode, actual_end,
total_compressed,
compress_type, pages,
@@ -1013,99 +994,52 @@ cont:
PAGE_UNLOCK |
PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
-
- /*
- * Ensure we only free the compressed pages if we have
- * them allocated, as we can still reach here with
- * inode_need_compress() == false.
- */
- if (pages) {
- for (i = 0; i < nr_pages; i++) {
- WARN_ON(pages[i]->mapping);
- put_page(pages[i]);
- }
- kfree(pages);
- }
- return 0;
+ goto free_pages;
}
}
- if (will_compress) {
- /*
- * we aren't doing an inline extent round the compressed size
- * up to a block size boundary so the allocator does sane
- * things
- */
- total_compressed = ALIGN(total_compressed, blocksize);
+ /*
+ * We aren't doing an inline extent. Round the compressed size up to a
+ * block size boundary so the allocator does sane things.
+ */
+ total_compressed = ALIGN(total_compressed, blocksize);
- /*
- * one last check to make sure the compression is really a
- * win, compare the page count read with the blocks on disk,
- * compression must free at least one sector size
- */
- total_in = round_up(total_in, fs_info->sectorsize);
- if (total_compressed + blocksize <= total_in) {
- compressed_extents++;
+ /*
+ * One last check to make sure the compression is really a win, compare
+ * the page count read with the blocks on disk, compression must free at
+ * least one sector.
+ */
+ total_in = round_up(total_in, fs_info->sectorsize);
+ if (total_compressed + blocksize > total_in)
+ goto mark_incompressible;
- /*
- * The async work queues will take care of doing actual
- * allocation on disk for these compressed pages, and
- * will submit them to the elevator.
- */
- add_async_extent(async_chunk, start, total_in,
- total_compressed, pages, nr_pages,
- compress_type);
-
- if (start + total_in < end) {
- start += total_in;
- pages = NULL;
- cond_resched();
- goto again;
- }
- return compressed_extents;
- }
+ /*
+ * The async work queues will take care of doing actual allocation on
+ * disk for these compressed pages, and will submit the bios.
+ */
+ add_async_extent(async_chunk, start, total_in, total_compressed, pages,
+ nr_pages, compress_type);
+ if (start + total_in < end) {
+ start += total_in;
+ cond_resched();
+ goto again;
}
+ return;
+
+mark_incompressible:
+ if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress)
+ inode->flags |= BTRFS_INODE_NOCOMPRESS;
+cleanup_and_bail_uncompressed:
+ add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
+ BTRFS_COMPRESS_NONE);
+free_pages:
if (pages) {
- /*
- * the compression code ran but failed to make things smaller,
- * free any pages it allocated and our page pointer array
- */
for (i = 0; i < nr_pages; i++) {
WARN_ON(pages[i]->mapping);
put_page(pages[i]);
}
kfree(pages);
- pages = NULL;
- total_compressed = 0;
- nr_pages = 0;
-
- /* flag the file so we don't compress in the future */
- if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) &&
- !(inode->prop_compress)) {
- inode->flags |= BTRFS_INODE_NOCOMPRESS;
- }
- }
-cleanup_and_bail_uncompressed:
- /*
- * No compression, but we still need to write the pages in the file
- * we've been given so far. redirty the locked page if it corresponds
- * to our extent and set things up for the async work queue to run
- * cow_file_range to do the normal delalloc dance.
- */
- if (async_chunk->locked_page &&
- (page_offset(async_chunk->locked_page) >= start &&
- page_offset(async_chunk->locked_page)) <= end) {
- __set_page_dirty_nobuffers(async_chunk->locked_page);
- /* unlocked later on in the async handlers */
}
-
- if (redirty)
- extent_range_redirty_for_io(&inode->vfs_inode, start, end);
- add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
- BTRFS_COMPRESS_NONE);
- compressed_extents++;
-
- return compressed_extents;
}
static void free_async_extent_pages(struct async_extent *async_extent)
@@ -1124,14 +1058,12 @@ static void free_async_extent_pages(struct async_extent *async_extent)
async_extent->pages = NULL;
}
-static int submit_uncompressed_range(struct btrfs_inode *inode,
- struct async_extent *async_extent,
- struct page *locked_page)
+static void submit_uncompressed_range(struct btrfs_inode *inode,
+ struct async_extent *async_extent,
+ struct page *locked_page)
{
u64 start = async_extent->start;
u64 end = async_extent->start + async_extent->ram_size - 1;
- unsigned long nr_written = 0;
- int page_started = 0;
int ret;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
@@ -1140,45 +1072,33 @@ static int submit_uncompressed_range(struct btrfs_inode *inode,
.no_cgroup_owner = 1,
};
- /*
- * Call cow_file_range() to run the delalloc range directly, since we
- * won't go to NOCOW or async path again.
- *
- * Also we call cow_file_range() with @unlock_page == 0, so that we
- * can directly submit them without interruption.
- */
- ret = cow_file_range(inode, locked_page, start, end, &page_started,
- &nr_written, 0, NULL);
- /* Inline extent inserted, page gets unlocked and everything is done */
- if (page_started)
- return 0;
-
+ wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
+ ret = run_delalloc_cow(inode, locked_page, start, end, &wbc, false);
+ wbc_detach_inode(&wbc);
if (ret < 0) {
btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1);
if (locked_page) {
const u64 page_start = page_offset(locked_page);
- const u64 page_end = page_start + PAGE_SIZE - 1;
set_page_writeback(locked_page);
end_page_writeback(locked_page);
- end_extent_writepage(locked_page, ret, page_start, page_end);
+ btrfs_mark_ordered_io_finished(inode, locked_page,
+ page_start, PAGE_SIZE,
+ !ret);
+ btrfs_page_clear_uptodate(inode->root->fs_info,
+ locked_page, page_start,
+ PAGE_SIZE);
+ mapping_set_error(locked_page->mapping, ret);
unlock_page(locked_page);
}
- return ret;
}
-
- /* All pages will be unlocked, including @locked_page */
- wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode);
- ret = extent_write_locked_range(&inode->vfs_inode, start, end, &wbc);
- wbc_detach_inode(&wbc);
- return ret;
}
-static int submit_one_async_extent(struct btrfs_inode *inode,
- struct async_chunk *async_chunk,
- struct async_extent *async_extent,
- u64 *alloc_hint)
+static void submit_one_async_extent(struct async_chunk *async_chunk,
+ struct async_extent *async_extent,
+ u64 *alloc_hint)
{
+ struct btrfs_inode *inode = async_chunk->inode;
struct extent_io_tree *io_tree = &inode->io_tree;
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -1206,9 +1126,8 @@ static int submit_one_async_extent(struct btrfs_inode *inode,
}
lock_extent(io_tree, start, end, NULL);
- /* We have fall back to uncompressed write */
- if (!async_extent->pages) {
- ret = submit_uncompressed_range(inode, async_extent, locked_page);
+ if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
+ submit_uncompressed_range(inode, async_extent, locked_page);
goto done;
}
@@ -1217,7 +1136,6 @@ static int submit_one_async_extent(struct btrfs_inode *inode,
async_extent->compressed_size,
0, *alloc_hint, &ins, 1, 1);
if (ret) {
- free_async_extent_pages(async_extent);
/*
* Here we used to try again by going back to non-compressed
* path for ENOSPC. But we can't reserve space even for
@@ -1272,7 +1190,7 @@ done:
if (async_chunk->blkcg_css)
kthread_associate_blkcg(NULL);
kfree(async_extent);
- return ret;
+ return;
out_free_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
@@ -1286,39 +1204,13 @@ out_free:
PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
free_async_extent_pages(async_extent);
- goto done;
-}
-
-/*
- * Phase two of compressed writeback. This is the ordered portion of the code,
- * which only gets called in the order the work was queued. We walk all the
- * async extents created by compress_file_range and send them down to the disk.
- */
-static noinline void submit_compressed_extents(struct async_chunk *async_chunk)
-{
- struct btrfs_inode *inode = async_chunk->inode;
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct async_extent *async_extent;
- u64 alloc_hint = 0;
- int ret = 0;
-
- while (!list_empty(&async_chunk->extents)) {
- u64 extent_start;
- u64 ram_size;
-
- async_extent = list_entry(async_chunk->extents.next,
- struct async_extent, list);
- list_del(&async_extent->list);
- extent_start = async_extent->start;
- ram_size = async_extent->ram_size;
-
- ret = submit_one_async_extent(inode, async_chunk, async_extent,
- &alloc_hint);
- btrfs_debug(fs_info,
+ if (async_chunk->blkcg_css)
+ kthread_associate_blkcg(NULL);
+ btrfs_debug(fs_info,
"async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
- inode->root->root_key.objectid,
- btrfs_ino(inode), extent_start, ram_size, ret);
- }
+ root->root_key.objectid, btrfs_ino(inode), start,
+ async_extent->ram_size, ret);
+ kfree(async_extent);
}
static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
@@ -1362,25 +1254,18 @@ static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
* locked_page is the page that writepage had locked already. We use
* it to make sure we don't do extra locks or unlocks.
*
- * *page_started is set to one if we unlock locked_page and do everything
- * required to start IO on it. It may be clean and already done with
- * IO when we return.
- *
- * When unlock == 1, we unlock the pages in successfully allocated regions.
- * When unlock == 0, we leave them locked for writing them out.
+ * When this function fails, it unlocks all pages except @locked_page.
*
- * However, we unlock all the pages except @locked_page in case of failure.
+ * When this function successfully creates an inline extent, it returns 1 and
+ * unlocks all pages including locked_page and starts I/O on them.
+ * (In reality inline extents are limited to a single page, so locked_page is
+ * the only page handled anyway).
*
- * In summary, page locking state will be as follow:
+ * When this function succeed and creates a normal extent, the page locking
+ * status depends on the passed in flags:
*
- * - page_started == 1 (return value)
- * - All the pages are unlocked. IO is started.
- * - Note that this can happen only on success
- * - unlock == 1
- * - All the pages except @locked_page are unlocked in any case
- * - unlock == 0
- * - On success, all the pages are locked for writing out them
- * - On failure, all the pages except @locked_page are unlocked
+ * - If @keep_locked is set, all pages are kept locked.
+ * - Else all pages except for @locked_page are unlocked.
*
* When a failure happens in the second or later iteration of the
* while-loop, the ordered extents created in previous iterations are kept
@@ -1389,10 +1274,9 @@ static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
* example.
*/
static noinline int cow_file_range(struct btrfs_inode *inode,
- struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written, int unlock,
- u64 *done_offset)
+ struct page *locked_page, u64 start, u64 end,
+ u64 *done_offset,
+ bool keep_locked, bool no_inline)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -1431,7 +1315,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
* This means we can trigger inline extent even if we didn't want to.
* So here we skip inline extent creation completely.
*/
- if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
+ if (start == 0 && fs_info->sectorsize == PAGE_SIZE && !no_inline) {
u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode),
end + 1);
@@ -1451,9 +1335,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
- *nr_written = *nr_written +
- (end - start + PAGE_SIZE) / PAGE_SIZE;
- *page_started = 1;
/*
* locked_page is locked by the caller of
* writepage_delalloc(), not locked by
@@ -1463,11 +1344,12 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
* as it doesn't have any subpage::writers recorded.
*
* Here we manually unlock the page, since the caller
- * can't use page_started to determine if it's an
- * inline extent or a compressed extent.
+ * can't determine if it's an inline extent or a
+ * compressed extent.
*/
unlock_page(locked_page);
- goto out;
+ ret = 1;
+ goto done;
} else if (ret < 0) {
goto out_unlock;
}
@@ -1498,6 +1380,31 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
min_alloc_size, 0, alloc_hint,
&ins, 1, 1);
+ if (ret == -EAGAIN) {
+ /*
+ * btrfs_reserve_extent only returns -EAGAIN for zoned
+ * file systems, which is an indication that there are
+ * no active zones to allocate from at the moment.
+ *
+ * If this is the first loop iteration, wait for at
+ * least one zone to finish before retrying the
+ * allocation. Otherwise ask the caller to write out
+ * the already allocated blocks before coming back to
+ * us, or return -ENOSPC if it can't handle retries.
+ */
+ ASSERT(btrfs_is_zoned(fs_info));
+ if (start == orig_start) {
+ wait_on_bit_io(&inode->root->fs_info->flags,
+ BTRFS_FS_NEED_ZONE_FINISH,
+ TASK_UNINTERRUPTIBLE);
+ continue;
+ }
+ if (done_offset) {
+ *done_offset = start - 1;
+ return 0;
+ }
+ ret = -ENOSPC;
+ }
if (ret < 0)
goto out_unlock;
cur_alloc_size = ins.offset;
@@ -1558,7 +1465,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
* Do set the Ordered (Private2) bit so we know this page was
* properly setup for writepage.
*/
- page_ops = unlock ? PAGE_UNLOCK : 0;
+ page_ops = (keep_locked ? 0 : PAGE_UNLOCK);
page_ops |= PAGE_SET_ORDERED;
extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
@@ -1581,7 +1488,9 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
if (ret)
goto out_unlock;
}
-out:
+done:
+ if (done_offset)
+ *done_offset = end;
return ret;
out_drop_extent_cache:
@@ -1591,21 +1500,6 @@ out_reserve:
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
out_unlock:
/*
- * If done_offset is non-NULL and ret == -EAGAIN, we expect the
- * caller to write out the successfully allocated region and retry.
- */
- if (done_offset && ret == -EAGAIN) {
- if (orig_start < start)
- *done_offset = start - 1;
- else
- *done_offset = start;
- return ret;
- } else if (ret == -EAGAIN) {
- /* Convert to -ENOSPC since the caller cannot retry. */
- ret = -ENOSPC;
- }
-
- /*
* Now, we have three regions to clean up:
*
* |-------(1)----|---(2)---|-------------(3)----------|
@@ -1627,10 +1521,10 @@ out_unlock:
* EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
* function.
*
- * However, in case of unlock == 0, we still need to unlock the pages
+ * However, in case of @keep_locked, we still need to unlock the pages
* (except @locked_page) to ensure all the pages are unlocked.
*/
- if (!unlock && orig_start < start) {
+ if (keep_locked && orig_start < start) {
if (!locked_page)
mapping_set_error(inode->vfs_inode.i_mapping, ret);
extent_clear_unlock_delalloc(inode, orig_start, start - 1,
@@ -1671,43 +1565,28 @@ out_unlock:
}
/*
- * work queue call back to started compression on a file and pages
- */
-static noinline void async_cow_start(struct btrfs_work *work)
-{
- struct async_chunk *async_chunk;
- int compressed_extents;
-
- async_chunk = container_of(work, struct async_chunk, work);
-
- compressed_extents = compress_file_range(async_chunk);
- if (compressed_extents == 0) {
- btrfs_add_delayed_iput(async_chunk->inode);
- async_chunk->inode = NULL;
- }
-}
-
-/*
- * work queue call back to submit previously compressed pages
+ * Phase two of compressed writeback. This is the ordered portion of the code,
+ * which only gets called in the order the work was queued. We walk all the
+ * async extents created by compress_file_range and send them down to the disk.
*/
-static noinline void async_cow_submit(struct btrfs_work *work)
+static noinline void submit_compressed_extents(struct btrfs_work *work)
{
struct async_chunk *async_chunk = container_of(work, struct async_chunk,
work);
struct btrfs_fs_info *fs_info = btrfs_work_owner(work);
+ struct async_extent *async_extent;
unsigned long nr_pages;
+ u64 alloc_hint = 0;
nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
PAGE_SHIFT;
- /*
- * ->inode could be NULL if async_chunk_start has failed to compress,
- * in which case we don't have anything to submit, yet we need to
- * always adjust ->async_delalloc_pages as its paired with the init
- * happening in run_delalloc_compressed
- */
- if (async_chunk->inode)
- submit_compressed_extents(async_chunk);
+ while (!list_empty(&async_chunk->extents)) {
+ async_extent = list_entry(async_chunk->extents.next,
+ struct async_extent, list);
+ list_del(&async_extent->list);
+ submit_one_async_extent(async_chunk, async_extent, &alloc_hint);
+ }
/* atomic_sub_return implies a barrier */
if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
@@ -1721,8 +1600,7 @@ static noinline void async_cow_free(struct btrfs_work *work)
struct async_cow *async_cow;
async_chunk = container_of(work, struct async_chunk, work);
- if (async_chunk->inode)
- btrfs_add_delayed_iput(async_chunk->inode);
+ btrfs_add_delayed_iput(async_chunk->inode);
if (async_chunk->blkcg_css)
css_put(async_chunk->blkcg_css);
@@ -1732,10 +1610,8 @@ static noinline void async_cow_free(struct btrfs_work *work)
}
static bool run_delalloc_compressed(struct btrfs_inode *inode,
- struct writeback_control *wbc,
- struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written)
+ struct page *locked_page, u64 start,
+ u64 end, struct writeback_control *wbc)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct cgroup_subsys_state *blkcg_css = wbc_blkcg_css(wbc);
@@ -1809,65 +1685,42 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode,
async_chunk[i].blkcg_css = NULL;
}
- btrfs_init_work(&async_chunk[i].work, async_cow_start,
- async_cow_submit, async_cow_free);
+ btrfs_init_work(&async_chunk[i].work, compress_file_range,
+ submit_compressed_extents, async_cow_free);
nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
atomic_add(nr_pages, &fs_info->async_delalloc_pages);
btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work);
- *nr_written += nr_pages;
start = cur_end + 1;
}
- *page_started = 1;
return true;
}
-static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
- struct page *locked_page, u64 start,
- u64 end, int *page_started,
- unsigned long *nr_written,
- struct writeback_control *wbc)
+/*
+ * Run the delalloc range from start to end, and write back any dirty pages
+ * covered by the range.
+ */
+static noinline int run_delalloc_cow(struct btrfs_inode *inode,
+ struct page *locked_page, u64 start,
+ u64 end, struct writeback_control *wbc,
+ bool pages_dirty)
{
u64 done_offset = end;
int ret;
- bool locked_page_done = false;
while (start <= end) {
- ret = cow_file_range(inode, locked_page, start, end, page_started,
- nr_written, 0, &done_offset);
- if (ret && ret != -EAGAIN)
+ ret = cow_file_range(inode, locked_page, start, end, &done_offset,
+ true, false);
+ if (ret)
return ret;
-
- if (*page_started) {
- ASSERT(ret == 0);
- return 0;
- }
-
- if (ret == 0)
- done_offset = end;
-
- if (done_offset == start) {
- wait_on_bit_io(&inode->root->fs_info->flags,
- BTRFS_FS_NEED_ZONE_FINISH,
- TASK_UNINTERRUPTIBLE);
- continue;
- }
-
- if (!locked_page_done) {
- __set_page_dirty_nobuffers(locked_page);
- account_page_redirty(locked_page);
- }
- locked_page_done = true;
- extent_write_locked_range(&inode->vfs_inode, start, done_offset,
- wbc);
+ extent_write_locked_range(&inode->vfs_inode, locked_page, start,
+ done_offset, wbc, pages_dirty);
start = done_offset + 1;
}
- *page_started = 1;
-
- return 0;
+ return 1;
}
static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
@@ -1894,8 +1747,7 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
}
static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
- const u64 start, const u64 end,
- int *page_started, unsigned long *nr_written)
+ const u64 start, const u64 end)
{
const bool is_space_ino = btrfs_is_free_space_inode(inode);
const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
@@ -1903,6 +1755,7 @@ static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
struct extent_io_tree *io_tree = &inode->io_tree;
u64 range_start = start;
u64 count;
+ int ret;
/*
* If EXTENT_NORESERVE is set it means that when the buffered write was
@@ -1955,8 +1808,14 @@ static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
NULL);
}
- return cow_file_range(inode, locked_page, start, end, page_started,
- nr_written, 1, NULL);
+ /*
+ * Don't try to create inline extents, as a mix of inline extent that
+ * is written out and unlocked directly and a normal NOCOW extent
+ * doesn't work.
+ */
+ ret = cow_file_range(inode, locked_page, start, end, NULL, false, true);
+ ASSERT(ret != 1);
+ return ret;
}
struct can_nocow_file_extent_args {
@@ -2105,9 +1964,7 @@ static int can_nocow_file_extent(struct btrfs_path *path,
*/
static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
struct page *locked_page,
- const u64 start, const u64 end,
- int *page_started,
- unsigned long *nr_written)
+ const u64 start, const u64 end)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_root *root = inode->root;
@@ -2117,25 +1974,26 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
int ret;
bool check_prev = true;
u64 ino = btrfs_ino(inode);
- struct btrfs_block_group *bg;
- bool nocow = false;
struct can_nocow_file_extent_args nocow_args = { 0 };
+ /*
+ * Normally on a zoned device we're only doing COW writes, but in case
+ * of relocation on a zoned filesystem serializes I/O so that we're only
+ * writing sequentially and can end up here as well.
+ */
+ ASSERT(!btrfs_is_zoned(fs_info) || btrfs_is_data_reloc_root(root));
+
path = btrfs_alloc_path();
if (!path) {
- extent_clear_unlock_delalloc(inode, start, end, locked_page,
- EXTENT_LOCKED | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, PAGE_UNLOCK |
- PAGE_START_WRITEBACK |
- PAGE_END_WRITEBACK);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
nocow_args.end = end;
nocow_args.writeback_path = true;
while (1) {
+ struct btrfs_block_group *nocow_bg = NULL;
struct btrfs_ordered_extent *ordered;
struct btrfs_key found_key;
struct btrfs_file_extent_item *fi;
@@ -2146,8 +2004,6 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
int extent_type;
bool is_prealloc;
- nocow = false;
-
ret = btrfs_lookup_file_extent(NULL, root, path, ino,
cur_offset, 0);
if (ret < 0)
@@ -2172,11 +2028,8 @@ next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
- if (ret < 0) {
- if (cow_start != (u64)-1)
- cur_offset = cow_start;
+ if (ret < 0)
goto error;
- }
if (ret > 0)
break;
leaf = path->nodes[0];
@@ -2209,7 +2062,7 @@ next_slot:
if (found_key.offset > cur_offset) {
extent_end = found_key.offset;
extent_type = 0;
- goto out_check;
+ goto must_cow;
}
/*
@@ -2239,24 +2092,22 @@ next_slot:
nocow_args.start = cur_offset;
ret = can_nocow_file_extent(path, &found_key, inode, &nocow_args);
- if (ret < 0) {
- if (cow_start != (u64)-1)
- cur_offset = cow_start;
+ if (ret < 0)
goto error;
- } else if (ret == 0) {
- goto out_check;
- }
+ if (ret == 0)
+ goto must_cow;
ret = 0;
- bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr);
- if (bg)
- nocow = true;
-out_check:
- /*
- * If nocow is false then record the beginning of the range
- * that needs to be COWed
- */
- if (!nocow) {
+ nocow_bg = btrfs_inc_nocow_writers(fs_info, nocow_args.disk_bytenr);
+ if (!nocow_bg) {
+must_cow:
+ /*
+ * If we can't perform NOCOW writeback for the range,
+ * then record the beginning of the range that needs to
+ * be COWed. It will be written out before the next
+ * NOCOW range if we find one, or when exiting this
+ * loop.
+ */
if (cow_start == (u64)-1)
cow_start = cur_offset;
cur_offset = extent_end;
@@ -2275,11 +2126,12 @@ out_check:
*/
if (cow_start != (u64)-1) {
ret = fallback_to_cow(inode, locked_page,
- cow_start, found_key.offset - 1,
- page_started, nr_written);
- if (ret)
- goto error;
+ cow_start, found_key.offset - 1);
cow_start = (u64)-1;
+ if (ret) {
+ btrfs_dec_nocow_writers(nocow_bg);
+ goto error;
+ }
}
nocow_end = cur_offset + nocow_args.num_bytes - 1;
@@ -2296,6 +2148,7 @@ out_check:
ram_bytes, BTRFS_COMPRESS_NONE,
BTRFS_ORDERED_PREALLOC);
if (IS_ERR(em)) {
+ btrfs_dec_nocow_writers(nocow_bg);
ret = PTR_ERR(em);
goto error;
}
@@ -2309,6 +2162,7 @@ out_check:
? (1 << BTRFS_ORDERED_PREALLOC)
: (1 << BTRFS_ORDERED_NOCOW),
BTRFS_COMPRESS_NONE);
+ btrfs_dec_nocow_writers(nocow_bg);
if (IS_ERR(ordered)) {
if (is_prealloc) {
btrfs_drop_extent_map_range(inode, cur_offset,
@@ -2318,11 +2172,6 @@ out_check:
goto error;
}
- if (nocow) {
- btrfs_dec_nocow_writers(bg);
- nocow = false;
- }
-
if (btrfs_is_data_reloc_root(root))
/*
* Error handled later, as we must prevent
@@ -2357,17 +2206,24 @@ out_check:
if (cow_start != (u64)-1) {
cur_offset = end;
- ret = fallback_to_cow(inode, locked_page, cow_start, end,
- page_started, nr_written);
+ ret = fallback_to_cow(inode, locked_page, cow_start, end);
+ cow_start = (u64)-1;
if (ret)
goto error;
}
-error:
- if (nocow)
- btrfs_dec_nocow_writers(bg);
+ btrfs_free_path(path);
+ return 0;
- if (ret && cur_offset < end)
+error:
+ /*
+ * If an error happened while a COW region is outstanding, cur_offset
+ * needs to be reset to cow_start to ensure the COW region is unlocked
+ * as well.
+ */
+ if (cow_start != (u64)-1)
+ cur_offset = cow_start;
+ if (cur_offset < end)
extent_clear_unlock_delalloc(inode, cur_offset, end,
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC | EXTENT_DEFRAG |
@@ -2395,49 +2251,37 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
* being touched for the first time.
*/
int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
- u64 start, u64 end, int *page_started, unsigned long *nr_written,
- struct writeback_control *wbc)
+ u64 start, u64 end, struct writeback_control *wbc)
{
- int ret = 0;
const bool zoned = btrfs_is_zoned(inode->root->fs_info);
+ int ret;
/*
- * The range must cover part of the @locked_page, or the returned
- * @page_started can confuse the caller.
+ * The range must cover part of the @locked_page, or a return of 1
+ * can confuse the caller.
*/
ASSERT(!(end <= page_offset(locked_page) ||
start >= page_offset(locked_page) + PAGE_SIZE));
if (should_nocow(inode, start, end)) {
- /*
- * Normally on a zoned device we're only doing COW writes, but
- * in case of relocation on a zoned filesystem we have taken
- * precaution, that we're only writing sequentially. It's safe
- * to use run_delalloc_nocow() here, like for regular
- * preallocated inodes.
- */
- ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
- ret = run_delalloc_nocow(inode, locked_page, start, end,
- page_started, nr_written);
+ ret = run_delalloc_nocow(inode, locked_page, start, end);
goto out;
}
if (btrfs_inode_can_compress(inode) &&
inode_need_compress(inode, start, end) &&
- run_delalloc_compressed(inode, wbc, locked_page, start,
- end, page_started, nr_written))
- goto out;
+ run_delalloc_compressed(inode, locked_page, start, end, wbc))
+ return 1;
if (zoned)
- ret = run_delalloc_zoned(inode, locked_page, start, end,
- page_started, nr_written, wbc);
+ ret = run_delalloc_cow(inode, locked_page, start, end, wbc,
+ true);
else
- ret = cow_file_range(inode, locked_page, start, end,
- page_started, nr_written, 1, NULL);
+ ret = cow_file_range(inode, locked_page, start, end, NULL,
+ false, false);
out:
- ASSERT(ret <= 0);
- if (ret)
+ if (ret < 0)
btrfs_cleanup_ordered_extents(inode, locked_page, start,
end - start + 1);
return ret;
@@ -2840,23 +2684,19 @@ struct btrfs_writepage_fixup {
static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
{
- struct btrfs_writepage_fixup *fixup;
+ struct btrfs_writepage_fixup *fixup =
+ container_of(work, struct btrfs_writepage_fixup, work);
struct btrfs_ordered_extent *ordered;
struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL;
- struct page *page;
- struct btrfs_inode *inode;
- u64 page_start;
- u64 page_end;
+ struct page *page = fixup->page;
+ struct btrfs_inode *inode = fixup->inode;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ u64 page_start = page_offset(page);
+ u64 page_end = page_offset(page) + PAGE_SIZE - 1;
int ret = 0;
bool free_delalloc_space = true;
- fixup = container_of(work, struct btrfs_writepage_fixup, work);
- page = fixup->page;
- inode = fixup->inode;
- page_start = page_offset(page);
- page_end = page_offset(page) + PAGE_SIZE - 1;
-
/*
* This is similar to page_mkwrite, we need to reserve the space before
* we take the page lock.
@@ -2949,10 +2789,12 @@ out_page:
* to reflect the errors and clean the page.
*/
mapping_set_error(page->mapping, ret);
- end_extent_writepage(page, ret, page_start, page_end);
+ btrfs_mark_ordered_io_finished(inode, page, page_start,
+ PAGE_SIZE, !ret);
+ btrfs_page_clear_uptodate(fs_info, page, page_start, PAGE_SIZE);
clear_page_dirty_for_io(page);
}
- btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE);
+ btrfs_page_clear_checked(fs_info, page, page_start, PAGE_SIZE);
unlock_page(page);
put_page(page);
kfree(fixup);
@@ -3359,6 +3201,13 @@ out:
btrfs_free_reserved_extent(fs_info,
ordered_extent->disk_bytenr,
ordered_extent->disk_num_bytes, 1);
+ /*
+ * Actually free the qgroup rsv which was released when
+ * the ordered extent was created.
+ */
+ btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid,
+ ordered_extent->qgroup_rsv,
+ BTRFS_QGROUP_RSV_DATA);
}
}
@@ -3384,15 +3233,6 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
return btrfs_finish_one_ordered(ordered);
}
-void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
- struct page *page, u64 start,
- u64 end, bool uptodate)
-{
- trace_btrfs_writepage_end_io_hook(inode, start, end, uptodate);
-
- btrfs_mark_ordered_io_finished(inode, page, start, end + 1 - start, uptodate);
-}
-
/*
* Verify the checksum for a single sector without any extra action that depend
* on the type of I/O.
@@ -3662,9 +3502,16 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
*/
if (found_key.offset == last_objectid) {
+ /*
+ * We found the same inode as before. This means we were
+ * not able to remove its items via eviction triggered
+ * by an iput(). A transaction abort may have happened,
+ * due to -ENOSPC for example, so try to grab the error
+ * that lead to a transaction abort, if any.
+ */
btrfs_err(fs_info,
"Error removing orphan entry, stopping orphan cleanup");
- ret = -EINVAL;
+ ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL;
goto out;
}
@@ -3917,8 +3764,8 @@ static int btrfs_read_locked_inode(struct inode *inode,
inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
- inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
- inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
+ inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
+ btrfs_timespec_nsec(leaf, &inode_item->ctime));
BTRFS_I(inode)->i_otime.tv_sec =
btrfs_timespec_sec(leaf, &inode_item->otime);
@@ -4089,9 +3936,9 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
inode->i_mtime.tv_nsec);
btrfs_set_token_timespec_sec(&token, &item->ctime,
- inode->i_ctime.tv_sec);
+ inode_get_ctime(inode).tv_sec);
btrfs_set_token_timespec_nsec(&token, &item->ctime,
- inode->i_ctime.tv_nsec);
+ inode_get_ctime(inode).tv_nsec);
btrfs_set_token_timespec_sec(&token, &item->otime,
BTRFS_I(inode)->i_otime.tv_sec);
@@ -4289,9 +4136,8 @@ err:
btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
inode_inc_iversion(&inode->vfs_inode);
inode_inc_iversion(&dir->vfs_inode);
- inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
- dir->vfs_inode.i_mtime = inode->vfs_inode.i_ctime;
- dir->vfs_inode.i_ctime = inode->vfs_inode.i_ctime;
+ inode_set_ctime_current(&inode->vfs_inode);
+ dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode);
ret = btrfs_update_inode(trans, root, dir);
out:
return ret;
@@ -4464,8 +4310,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
inode_inc_iversion(&dir->vfs_inode);
- dir->vfs_inode.i_mtime = current_time(&dir->vfs_inode);
- dir->vfs_inode.i_ctime = dir->vfs_inode.i_mtime;
+ dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode);
ret = btrfs_update_inode_fallback(trans, root, dir);
if (ret)
btrfs_abort_transaction(trans, ret);
@@ -5115,8 +4960,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
if (newsize != oldsize) {
inode_inc_iversion(inode);
if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
- inode->i_mtime = current_time(inode);
- inode->i_ctime = inode->i_mtime;
+ inode->i_mtime = inode_set_ctime_current(inode);
}
}
@@ -5738,11 +5582,11 @@ struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root
return btrfs_iget_path(s, ino, root, NULL);
}
-static struct inode *new_simple_dir(struct super_block *s,
+static struct inode *new_simple_dir(struct inode *dir,
struct btrfs_key *key,
struct btrfs_root *root)
{
- struct inode *inode = new_inode(s);
+ struct inode *inode = new_inode(dir->i_sb);
if (!inode)
return ERR_PTR(-ENOMEM);
@@ -5760,10 +5604,11 @@ static struct inode *new_simple_dir(struct super_block *s,
inode->i_opflags &= ~IOP_XATTR;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
- inode->i_mtime = current_time(inode);
- inode->i_atime = inode->i_mtime;
- inode->i_ctime = inode->i_mtime;
+ inode->i_mtime = inode_set_ctime_current(inode);
+ inode->i_atime = dir->i_atime;
BTRFS_I(inode)->i_otime = inode->i_mtime;
+ inode->i_uid = dir->i_uid;
+ inode->i_gid = dir->i_gid;
return inode;
}
@@ -5822,7 +5667,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
if (ret != -ENOENT)
inode = ERR_PTR(ret);
else
- inode = new_simple_dir(dir->i_sb, &location, root);
+ inode = new_simple_dir(dir, &location, root);
} else {
inode = btrfs_iget(dir->i_sb, location.objectid, sub_root);
btrfs_put_root(sub_root);
@@ -6007,8 +5852,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
struct btrfs_key found_key;
struct btrfs_path *path;
void *addr;
- struct list_head ins_list;
- struct list_head del_list;
+ LIST_HEAD(ins_list);
+ LIST_HEAD(del_list);
int ret;
char *name_ptr;
int name_len;
@@ -6027,8 +5872,6 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
addr = private->filldir_buf;
path->reada = READA_FORWARD;
- INIT_LIST_HEAD(&ins_list);
- INIT_LIST_HEAD(&del_list);
put = btrfs_readdir_get_delayed_items(inode, private->last_index,
&ins_list, &del_list);
@@ -6165,8 +6008,7 @@ static int btrfs_dirty_inode(struct btrfs_inode *inode)
* This is a copy of file_update_time. We need this so we can return error on
* ENOSPC for updating the inode in the case of file write and mmap writes.
*/
-static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
- int flags)
+static int btrfs_update_time(struct inode *inode, int flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
bool dirty = flags & ~S_VERSION;
@@ -6174,14 +6016,7 @@ static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
if (btrfs_root_readonly(root))
return -EROFS;
- if (flags & S_VERSION)
- dirty |= inode_maybe_inc_iversion(inode, dirty);
- if (flags & S_CTIME)
- inode->i_ctime = *now;
- if (flags & S_MTIME)
- inode->i_mtime = *now;
- if (flags & S_ATIME)
- inode->i_atime = *now;
+ dirty = inode_update_timestamps(inode, flags);
return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0;
}
@@ -6429,9 +6264,8 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
goto discard;
}
- inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
inode->i_atime = inode->i_mtime;
- inode->i_ctime = inode->i_mtime;
BTRFS_I(inode)->i_otime = inode->i_mtime;
/*
@@ -6596,12 +6430,10 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
* log replay procedure is responsible for setting them to their correct
* values (the ones it had when the fsync was done).
*/
- if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
- struct timespec64 now = current_time(&parent_inode->vfs_inode);
+ if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
+ parent_inode->vfs_inode.i_mtime =
+ inode_set_ctime_current(&parent_inode->vfs_inode);
- parent_inode->vfs_inode.i_mtime = now;
- parent_inode->vfs_inode.i_ctime = now;
- }
ret = btrfs_update_inode(trans, root, parent_inode);
if (ret)
btrfs_abort_transaction(trans, ret);
@@ -6741,7 +6573,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
BTRFS_I(inode)->dir_index = 0ULL;
inc_nlink(inode);
inode_inc_iversion(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
ihold(inode);
set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
@@ -8807,7 +8639,7 @@ static int btrfs_getattr(struct mnt_idmap *idmap,
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
- generic_fillattr(idmap, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
spin_lock(&BTRFS_I(inode)->lock);
@@ -8831,7 +8663,6 @@ static int btrfs_rename_exchange(struct inode *old_dir,
struct btrfs_root *dest = BTRFS_I(new_dir)->root;
struct inode *new_inode = new_dentry->d_inode;
struct inode *old_inode = old_dentry->d_inode;
- struct timespec64 ctime = current_time(old_inode);
struct btrfs_rename_ctx old_rename_ctx;
struct btrfs_rename_ctx new_rename_ctx;
u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
@@ -8962,12 +8793,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
inode_inc_iversion(new_dir);
inode_inc_iversion(old_inode);
inode_inc_iversion(new_inode);
- old_dir->i_mtime = ctime;
- old_dir->i_ctime = ctime;
- new_dir->i_mtime = ctime;
- new_dir->i_ctime = ctime;
- old_inode->i_ctime = ctime;
- new_inode->i_ctime = ctime;
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
if (old_dentry->d_parent != new_dentry->d_parent) {
btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
@@ -9231,11 +9057,7 @@ static int btrfs_rename(struct mnt_idmap *idmap,
inode_inc_iversion(old_dir);
inode_inc_iversion(new_dir);
inode_inc_iversion(old_inode);
- old_dir->i_mtime = current_time(old_dir);
- old_dir->i_ctime = old_dir->i_mtime;
- new_dir->i_mtime = old_dir->i_mtime;
- new_dir->i_ctime = old_dir->i_mtime;
- old_inode->i_ctime = old_dir->i_mtime;
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
if (old_dentry->d_parent != new_dentry->d_parent)
btrfs_record_unlink_dir(trans, BTRFS_I(old_dir),
@@ -9257,7 +9079,6 @@ static int btrfs_rename(struct mnt_idmap *idmap,
if (new_inode) {
inode_inc_iversion(new_inode);
- new_inode->i_ctime = current_time(new_inode);
if (unlikely(btrfs_ino(BTRFS_I(new_inode)) ==
BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
ret = btrfs_unlink_subvol(trans, BTRFS_I(new_dir), new_dentry);
@@ -9390,14 +9211,11 @@ static int start_delalloc_inodes(struct btrfs_root *root,
struct btrfs_inode *binode;
struct inode *inode;
struct btrfs_delalloc_work *work, *next;
- struct list_head works;
- struct list_head splice;
+ LIST_HEAD(works);
+ LIST_HEAD(splice);
int ret = 0;
bool full_flush = wbc->nr_to_write == LONG_MAX;
- INIT_LIST_HEAD(&works);
- INIT_LIST_HEAD(&splice);
-
mutex_lock(&root->delalloc_mutex);
spin_lock(&root->delalloc_lock);
list_splice_init(&root->delalloc_inodes, &splice);
@@ -9485,14 +9303,12 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
.range_end = LLONG_MAX,
};
struct btrfs_root *root;
- struct list_head splice;
+ LIST_HEAD(splice);
int ret;
if (BTRFS_FS_ERROR(fs_info))
return -EROFS;
- INIT_LIST_HEAD(&splice);
-
mutex_lock(&fs_info->delalloc_root_mutex);
spin_lock(&fs_info->delalloc_root_lock);
list_splice_init(&fs_info->delalloc_roots, &splice);
@@ -9797,7 +9613,7 @@ next:
*alloc_hint = ins.objectid + ins.offset;
inode_inc_iversion(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
(actual_len > inode->i_size) &&
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a895d105464b..a18ee7b5a166 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -384,7 +384,7 @@ update_flags:
binode->flags = binode_flags;
btrfs_sync_inode_flags_to_i_flags(inode);
inode_inc_iversion(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
out_end_trans:
diff --git a/fs/btrfs/messages.c b/fs/btrfs/messages.c
index 23fc11af498a..7695decc7243 100644
--- a/fs/btrfs/messages.c
+++ b/fs/btrfs/messages.c
@@ -10,14 +10,13 @@
#ifdef CONFIG_PRINTK
#define STATE_STRING_PREFACE ": state "
-#define STATE_STRING_BUF_LEN (sizeof(STATE_STRING_PREFACE) + BTRFS_FS_STATE_COUNT)
+#define STATE_STRING_BUF_LEN (sizeof(STATE_STRING_PREFACE) + BTRFS_FS_STATE_COUNT + 1)
/*
* Characters to print to indicate error conditions or uncommon filesystem state.
* RO is not an error.
*/
static const char fs_state_chars[] = {
- [BTRFS_FS_STATE_ERROR] = 'E',
[BTRFS_FS_STATE_REMOUNTING] = 'M',
[BTRFS_FS_STATE_RO] = 0,
[BTRFS_FS_STATE_TRANS_ABORTED] = 'A',
@@ -37,6 +36,11 @@ static void btrfs_state_to_string(const struct btrfs_fs_info *info, char *buf)
memcpy(curr, STATE_STRING_PREFACE, sizeof(STATE_STRING_PREFACE));
curr += sizeof(STATE_STRING_PREFACE) - 1;
+ if (BTRFS_FS_ERROR(info)) {
+ *curr++ = 'E';
+ states_printed = true;
+ }
+
for_each_set_bit(bit, &fs_state, sizeof(fs_state)) {
WARN_ON_ONCE(bit >= BTRFS_FS_STATE_COUNT);
if ((bit < BTRFS_FS_STATE_COUNT) && fs_state_chars[bit]) {
@@ -155,7 +159,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
* Today we only save the error info to memory. Long term we'll also
* send it down to the disk.
*/
- set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
+ WRITE_ONCE(fs_info->fs_error, errno);
/* Don't go through full error handling during mount. */
if (!(sb->s_flags & SB_BORN))
@@ -252,12 +256,6 @@ void __cold _btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt,
}
#endif
-void __cold btrfs_print_v0_err(struct btrfs_fs_info *fs_info)
-{
- btrfs_err(fs_info,
-"Unsupported V0 extent filesystem detected. Aborting. Please re-create your filesystem with a newer kernel");
-}
-
#if BITS_PER_LONG == 32
void __cold btrfs_warn_32bit_limit(struct btrfs_fs_info *fs_info)
{
diff --git a/fs/btrfs/messages.h b/fs/btrfs/messages.h
index deedc1a168e2..1ae6f8e23e07 100644
--- a/fs/btrfs/messages.h
+++ b/fs/btrfs/messages.h
@@ -181,8 +181,6 @@ do { \
#define ASSERT(expr) (void)(expr)
#endif
-void __cold btrfs_print_v0_err(struct btrfs_fs_info *fs_info);
-
__printf(5, 6)
__cold
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index a629532283bc..b46ab348e8e5 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -410,6 +410,10 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
unsigned long flags;
u64 cur = file_offset;
+ trace_btrfs_writepage_end_io_hook(inode, file_offset,
+ file_offset + num_bytes - 1,
+ uptodate);
+
spin_lock_irqsave(&tree->lock, flags);
while (cur < file_offset + num_bytes) {
u64 entry_end;
@@ -736,11 +740,9 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
const u64 range_start, const u64 range_len)
{
struct btrfs_root *root;
- struct list_head splice;
+ LIST_HEAD(splice);
u64 done;
- INIT_LIST_HEAD(&splice);
-
mutex_lock(&fs_info->ordered_operations_mutex);
spin_lock(&fs_info->ordered_root_lock);
list_splice_init(&fs_info->ordered_roots, &splice);
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index aa06d9ca911d..0c93439e929f 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -95,8 +95,10 @@ static void print_extent_item(const struct extent_buffer *eb, int slot, int type
int ref_index = 0;
if (unlikely(item_size < sizeof(*ei))) {
- btrfs_print_v0_err(eb->fs_info);
- btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
+ btrfs_err(eb->fs_info,
+ "unexpected extent item size, has %u expect >= %zu",
+ item_size, sizeof(*ei));
+ btrfs_handle_fs_error(eb->fs_info, -EUCLEAN, NULL);
}
ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item);
@@ -291,10 +293,6 @@ void btrfs_print_leaf(const struct extent_buffer *l)
btrfs_file_extent_num_bytes(l, fi),
btrfs_file_extent_ram_bytes(l, fi));
break;
- case BTRFS_EXTENT_REF_V0_KEY:
- btrfs_print_v0_err(fs_info);
- btrfs_handle_fs_error(fs_info, -EINVAL, NULL);
- break;
case BTRFS_BLOCK_GROUP_ITEM_KEY:
bi = btrfs_item_ptr(l, i,
struct btrfs_block_group_item);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 2637d6b157ff..b99230db3c82 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -3590,15 +3590,16 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
* going to clear all tracking information for a clean start.
*/
- trans = btrfs_join_transaction(fs_info->fs_root);
- if (IS_ERR(trans)) {
+ trans = btrfs_attach_transaction_barrier(fs_info->fs_root);
+ if (IS_ERR(trans) && trans != ERR_PTR(-ENOENT)) {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
return PTR_ERR(trans);
- }
- ret = btrfs_commit_transaction(trans);
- if (ret) {
- fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
- return ret;
+ } else if (trans != ERR_PTR(-ENOENT)) {
+ ret = btrfs_commit_transaction(trans);
+ if (ret) {
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+ return ret;
+ }
}
qgroup_rescan_zero_tracking(fs_info);
@@ -3757,9 +3758,11 @@ static int try_flush_qgroup(struct btrfs_root *root)
goto out;
btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
- trans = btrfs_join_transaction(root);
+ trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ if (ret == -ENOENT)
+ ret = 0;
goto out;
}
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0249ea52bb80..3e014b9370a3 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -584,8 +584,7 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
return 0;
- if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
- last->operation == BTRFS_RBIO_READ_REBUILD)
+ if (last->operation == BTRFS_RBIO_READ_REBUILD)
return 0;
return 1;
@@ -784,10 +783,7 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
spin_unlock(&rbio->bio_list_lock);
spin_unlock(&h->lock);
- if (next->operation == BTRFS_RBIO_READ_REBUILD)
- start_async_work(next, recover_rbio_work_locked);
- else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
- steal_rbio(rbio, next);
+ if (next->operation == BTRFS_RBIO_READ_REBUILD) {
start_async_work(next, recover_rbio_work_locked);
} else if (next->operation == BTRFS_RBIO_WRITE) {
steal_rbio(rbio, next);
@@ -1517,11 +1513,11 @@ static void submit_read_wait_bio_list(struct btrfs_raid_bio *rbio,
while ((bio = bio_list_pop(bio_list))) {
bio->bi_end_io = raid_wait_read_end_io;
- if (trace_raid56_scrub_read_recover_enabled()) {
+ if (trace_raid56_read_enabled()) {
struct raid56_bio_trace_info trace_info = { 0 };
bio_get_trace_info(rbio, bio, &trace_info);
- trace_raid56_scrub_read_recover(rbio, bio, &trace_info);
+ trace_raid56_read(rbio, bio, &trace_info);
}
submit_bio(bio);
}
@@ -1698,8 +1694,7 @@ static int verify_one_sector(struct btrfs_raid_bio *rbio,
* If we're rebuilding a read, we have to use pages from the
* bio list if possible.
*/
- if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
- rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
+ if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
} else {
sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
@@ -1763,8 +1758,7 @@ static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
* If we're rebuilding a read, we have to use pages from the
* bio list if possible.
*/
- if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
- rbio->operation == BTRFS_RBIO_REBUILD_MISSING)) {
+ if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
} else {
sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
@@ -1897,8 +1891,7 @@ static int recover_sectors(struct btrfs_raid_bio *rbio)
goto out;
}
- if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
- rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
+ if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
spin_lock(&rbio->bio_list_lock);
set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
spin_unlock(&rbio->bio_list_lock);
@@ -2112,8 +2105,8 @@ static void fill_data_csums(struct btrfs_raid_bio *rbio)
goto error;
}
- ret = btrfs_lookup_csums_bitmap(csum_root, start, start + len - 1,
- rbio->csum_buf, rbio->csum_bitmap, false);
+ ret = btrfs_lookup_csums_bitmap(csum_root, NULL, start, start + len - 1,
+ rbio->csum_buf, rbio->csum_bitmap);
if (ret < 0)
goto error;
if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits))
@@ -2198,11 +2191,11 @@ static void submit_write_bios(struct btrfs_raid_bio *rbio,
while ((bio = bio_list_pop(bio_list))) {
bio->bi_end_io = raid_wait_write_end_io;
- if (trace_raid56_write_stripe_enabled()) {
+ if (trace_raid56_write_enabled()) {
struct raid56_bio_trace_info trace_info = { 0 };
bio_get_trace_info(rbio, bio, &trace_info);
- trace_raid56_write_stripe(rbio, bio, &trace_info);
+ trace_raid56_write(rbio, bio, &trace_info);
}
submit_bio(bio);
}
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index 0e84c9c9293f..45e6ff78316f 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -14,7 +14,6 @@ enum btrfs_rbio_ops {
BTRFS_RBIO_WRITE,
BTRFS_RBIO_READ_REBUILD,
BTRFS_RBIO_PARITY_SCRUB,
- BTRFS_RBIO_REBUILD_MISSING,
};
struct btrfs_raid_bio {
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index 0474bbe39da7..65d2bd6910f2 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -30,8 +30,7 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
inode_inc_iversion(inode);
if (!no_time_update) {
- inode->i_mtime = current_time(inode);
- inode->i_ctime = inode->i_mtime;
+ inode->i_mtime = inode_set_ctime_current(inode);
}
/*
* We round up to the block size at eof when determining which
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 46c3c1d57266..9951a0caf5bb 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3006,9 +3006,6 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
if (!page)
return -ENOMEM;
}
- ret = set_page_extent_mapped(page);
- if (ret < 0)
- goto release_page;
if (PageReadahead(page))
page_cache_async_readahead(inode->i_mapping, ra, NULL,
@@ -3024,6 +3021,15 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
}
}
+ /*
+ * We could have lost page private when we dropped the lock to read the
+ * page above, make sure we set_page_extent_mapped here so we have any
+ * of the subpage blocksize stuff we need in place.
+ */
+ ret = set_page_extent_mapped(page);
+ if (ret < 0)
+ goto release_page;
+
page_start = page_offset(page);
page_end = page_start + PAGE_SIZE - 1;
@@ -3250,12 +3256,13 @@ static int add_tree_block(struct reloc_control *rc,
if (type == BTRFS_TREE_BLOCK_REF_KEY)
owner = btrfs_extent_inline_ref_offset(eb, iref);
}
- } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
- btrfs_print_v0_err(eb->fs_info);
- btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
- return -EINVAL;
} else {
- BUG();
+ btrfs_print_leaf(eb);
+ btrfs_err(rc->block_group->fs_info,
+ "unrecognized tree backref at tree block %llu slot %u",
+ eb->start, path->slots[0]);
+ btrfs_release_path(path);
+ return -EUCLEAN;
}
btrfs_release_path(path);
@@ -3498,6 +3505,8 @@ int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
last = rc->block_group->start + rc->block_group->length;
while (1) {
+ bool block_found;
+
cond_resched();
if (rc->search_start >= last) {
ret = 1;
@@ -3548,11 +3557,11 @@ next:
goto next;
}
- ret = find_first_extent_bit(&rc->processed_blocks,
- key.objectid, &start, &end,
- EXTENT_DIRTY, NULL);
+ block_found = find_first_extent_bit(&rc->processed_blocks,
+ key.objectid, &start, &end,
+ EXTENT_DIRTY, NULL);
- if (ret == 0 && start <= key.objectid) {
+ if (block_found && start <= key.objectid) {
btrfs_release_path(path);
rc->search_start = end + 1;
} else {
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 7289f5bff397..b877203f1dc5 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -43,9 +43,20 @@ struct scrub_ctx;
/*
* The following value only influences the performance.
*
- * This determines the batch size for stripe submitted in one go.
+ * This detemines how many stripes would be submitted in one go,
+ * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
*/
-#define SCRUB_STRIPES_PER_SCTX 8 /* That would be 8 64K stripe per-device. */
+#define SCRUB_STRIPES_PER_GROUP 8
+
+/*
+ * How many groups we have for each sctx.
+ *
+ * This would be 8M per device, the same value as the old scrub in-flight bios
+ * size limit.
+ */
+#define SCRUB_GROUPS_PER_SCTX 16
+
+#define SCRUB_TOTAL_STRIPES (SCRUB_GROUPS_PER_SCTX * SCRUB_STRIPES_PER_GROUP)
/*
* The following value times PAGE_SIZE needs to be large enough to match the
@@ -172,9 +183,11 @@ struct scrub_stripe {
};
struct scrub_ctx {
- struct scrub_stripe stripes[SCRUB_STRIPES_PER_SCTX];
+ struct scrub_stripe stripes[SCRUB_TOTAL_STRIPES];
struct scrub_stripe *raid56_data_stripes;
struct btrfs_fs_info *fs_info;
+ struct btrfs_path extent_path;
+ struct btrfs_path csum_path;
int first_free;
int cur_stripe;
atomic_t cancel_req;
@@ -315,10 +328,10 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
if (!sctx)
return;
- for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++)
+ for (i = 0; i < SCRUB_TOTAL_STRIPES; i++)
release_scrub_stripe(&sctx->stripes[i]);
- kfree(sctx);
+ kvfree(sctx);
}
static void scrub_put_ctx(struct scrub_ctx *sctx)
@@ -333,13 +346,20 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
struct scrub_ctx *sctx;
int i;
- sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
+ /* Since sctx has inline 128 stripes, it can go beyond 64K easily. Use
+ * kvzalloc().
+ */
+ sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
if (!sctx)
goto nomem;
refcount_set(&sctx->refs, 1);
sctx->is_dev_replace = is_dev_replace;
sctx->fs_info = fs_info;
- for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++) {
+ sctx->extent_path.search_commit_root = 1;
+ sctx->extent_path.skip_locking = 1;
+ sctx->csum_path.search_commit_root = 1;
+ sctx->csum_path.skip_locking = 1;
+ for (i = 0; i < SCRUB_TOTAL_STRIPES; i++) {
int ret;
ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
@@ -970,6 +990,9 @@ skip:
spin_unlock(&sctx->stat_lock);
}
+static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
+ unsigned long write_bitmap, bool dev_replace);
+
/*
* The main entrance for all read related scrub work, including:
*
@@ -978,13 +1001,16 @@ skip:
* - Go through the remaining mirrors and try to read as large blocksize as
* possible
* - Go through all mirrors (including the failed mirror) sector-by-sector
+ * - Submit writeback for repaired sectors
*
- * Writeback does not happen here, it needs extra synchronization.
+ * Writeback for dev-replace does not happen here, it needs extra
+ * synchronization for zoned devices.
*/
static void scrub_stripe_read_repair_worker(struct work_struct *work)
{
struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
- struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ struct scrub_ctx *sctx = stripe->sctx;
+ struct btrfs_fs_info *fs_info = sctx->fs_info;
int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
stripe->bg->length);
int mirror;
@@ -1049,7 +1075,23 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work)
goto out;
}
out:
- scrub_stripe_report_errors(stripe->sctx, stripe);
+ /*
+ * Submit the repaired sectors. For zoned case, we cannot do repair
+ * in-place, but queue the bg to be relocated.
+ */
+ if (btrfs_is_zoned(fs_info)) {
+ if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
+ btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
+ } else if (!sctx->readonly) {
+ unsigned long repaired;
+
+ bitmap_andnot(&repaired, &stripe->init_error_bitmap,
+ &stripe->error_bitmap, stripe->nr_sectors);
+ scrub_write_sectors(sctx, stripe, repaired, false);
+ wait_scrub_stripe_io(stripe);
+ }
+
+ scrub_stripe_report_errors(sctx, stripe);
set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
wake_up(&stripe->repair_wait);
}
@@ -1262,7 +1304,6 @@ static int get_raid56_logic_offset(u64 physical, int num,
/* Work out the disk rotation on this stripe-set */
rot = stripe_nr % map->num_stripes;
- stripe_nr /= map->num_stripes;
/* calculate which stripe this data locates */
rot += i;
stripe_index = rot % map->num_stripes;
@@ -1468,6 +1509,8 @@ static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
* Return <0 for error.
*/
static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
+ struct btrfs_path *extent_path,
+ struct btrfs_path *csum_path,
struct btrfs_device *dev, u64 physical,
int mirror_num, u64 logical_start,
u32 logical_len,
@@ -1477,7 +1520,6 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
const u64 logical_end = logical_start + logical_len;
- struct btrfs_path path = { 0 };
u64 cur_logical = logical_start;
u64 stripe_end;
u64 extent_start;
@@ -1493,14 +1535,13 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
/* The range must be inside the bg. */
ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
- path.search_commit_root = 1;
- path.skip_locking = 1;
-
- ret = find_first_extent_item(extent_root, &path, logical_start, logical_len);
+ ret = find_first_extent_item(extent_root, extent_path, logical_start,
+ logical_len);
/* Either error or not found. */
if (ret)
goto out;
- get_extent_info(&path, &extent_start, &extent_len, &extent_flags, &extent_gen);
+ get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
+ &extent_gen);
if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
stripe->nr_meta_extents++;
if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
@@ -1528,7 +1569,7 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
/* Fill the extent info for the remaining sectors. */
while (cur_logical <= stripe_end) {
- ret = find_first_extent_item(extent_root, &path, cur_logical,
+ ret = find_first_extent_item(extent_root, extent_path, cur_logical,
stripe_end - cur_logical + 1);
if (ret < 0)
goto out;
@@ -1536,7 +1577,7 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
ret = 0;
break;
}
- get_extent_info(&path, &extent_start, &extent_len,
+ get_extent_info(extent_path, &extent_start, &extent_len,
&extent_flags, &extent_gen);
if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
stripe->nr_meta_extents++;
@@ -1561,9 +1602,9 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
*/
ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
- ret = btrfs_lookup_csums_bitmap(csum_root, stripe->logical,
- stripe_end, stripe->csums,
- &csum_bitmap, true);
+ ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
+ stripe->logical, stripe_end,
+ stripe->csums, &csum_bitmap);
if (ret < 0)
goto out;
if (ret > 0)
@@ -1576,7 +1617,6 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
}
set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
out:
- btrfs_release_path(&path);
return ret;
}
@@ -1654,6 +1694,28 @@ static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
return false;
}
+static void submit_initial_group_read(struct scrub_ctx *sctx,
+ unsigned int first_slot,
+ unsigned int nr_stripes)
+{
+ struct blk_plug plug;
+
+ ASSERT(first_slot < SCRUB_TOTAL_STRIPES);
+ ASSERT(first_slot + nr_stripes <= SCRUB_TOTAL_STRIPES);
+
+ scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
+ btrfs_stripe_nr_to_offset(nr_stripes));
+ blk_start_plug(&plug);
+ for (int i = 0; i < nr_stripes; i++) {
+ struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
+
+ /* Those stripes should be initialized. */
+ ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
+ scrub_submit_initial_read(sctx, stripe);
+ }
+ blk_finish_plug(&plug);
+}
+
static int flush_scrub_stripes(struct scrub_ctx *sctx)
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
@@ -1666,11 +1728,11 @@ static int flush_scrub_stripes(struct scrub_ctx *sctx)
ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
- scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
- btrfs_stripe_nr_to_offset(nr_stripes));
- for (int i = 0; i < nr_stripes; i++) {
- stripe = &sctx->stripes[i];
- scrub_submit_initial_read(sctx, stripe);
+ /* Submit the stripes which are populated but not submitted. */
+ if (nr_stripes % SCRUB_STRIPES_PER_GROUP) {
+ const int first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
+
+ submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
}
for (int i = 0; i < nr_stripes; i++) {
@@ -1680,32 +1742,6 @@ static int flush_scrub_stripes(struct scrub_ctx *sctx)
test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
}
- /*
- * Submit the repaired sectors. For zoned case, we cannot do repair
- * in-place, but queue the bg to be relocated.
- */
- if (btrfs_is_zoned(fs_info)) {
- for (int i = 0; i < nr_stripes; i++) {
- stripe = &sctx->stripes[i];
-
- if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) {
- btrfs_repair_one_zone(fs_info,
- sctx->stripes[0].bg->start);
- break;
- }
- }
- } else if (!sctx->readonly) {
- for (int i = 0; i < nr_stripes; i++) {
- unsigned long repaired;
-
- stripe = &sctx->stripes[i];
-
- bitmap_andnot(&repaired, &stripe->init_error_bitmap,
- &stripe->error_bitmap, stripe->nr_sectors);
- scrub_write_sectors(sctx, stripe, repaired, false);
- }
- }
-
/* Submit for dev-replace. */
if (sctx->is_dev_replace) {
/*
@@ -1750,28 +1786,40 @@ static void raid56_scrub_wait_endio(struct bio *bio)
static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
struct btrfs_device *dev, int mirror_num,
- u64 logical, u32 length, u64 physical)
+ u64 logical, u32 length, u64 physical,
+ u64 *found_logical_ret)
{
struct scrub_stripe *stripe;
int ret;
- /* No available slot, submit all stripes and wait for them. */
- if (sctx->cur_stripe >= SCRUB_STRIPES_PER_SCTX) {
- ret = flush_scrub_stripes(sctx);
- if (ret < 0)
- return ret;
- }
+ /*
+ * There should always be one slot left, as caller filling the last
+ * slot should flush them all.
+ */
+ ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
stripe = &sctx->stripes[sctx->cur_stripe];
-
- /* We can queue one stripe using the remaining slot. */
scrub_reset_stripe(stripe);
- ret = scrub_find_fill_first_stripe(bg, dev, physical, mirror_num,
- logical, length, stripe);
+ ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
+ &sctx->csum_path, dev, physical,
+ mirror_num, logical, length, stripe);
/* Either >0 as no more extents or <0 for error. */
if (ret)
return ret;
+ if (found_logical_ret)
+ *found_logical_ret = stripe->logical;
sctx->cur_stripe++;
+
+ /* We filled one group, submit it. */
+ if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
+ const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
+
+ submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
+ }
+
+ /* Last slot used, flush them all. */
+ if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
+ return flush_scrub_stripes(sctx);
return 0;
}
@@ -1785,6 +1833,8 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_raid_bio *rbio;
struct btrfs_io_context *bioc = NULL;
+ struct btrfs_path extent_path = { 0 };
+ struct btrfs_path csum_path = { 0 };
struct bio *bio;
struct scrub_stripe *stripe;
bool all_empty = true;
@@ -1795,6 +1845,16 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
ASSERT(sctx->raid56_data_stripes);
+ /*
+ * For data stripe search, we cannot re-use the same extent/csum paths,
+ * as the data stripe bytenr may be smaller than previous extent. Thus
+ * we have to use our own extent/csum paths.
+ */
+ extent_path.search_commit_root = 1;
+ extent_path.skip_locking = 1;
+ csum_path.search_commit_root = 1;
+ csum_path.skip_locking = 1;
+
for (int i = 0; i < data_stripes; i++) {
int stripe_index;
int rot;
@@ -1809,7 +1869,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
scrub_reset_stripe(stripe);
set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
- ret = scrub_find_fill_first_stripe(bg,
+ ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
map->stripes[stripe_index].dev, physical, 1,
full_stripe_start + btrfs_stripe_nr_to_offset(i),
BTRFS_STRIPE_LEN, stripe);
@@ -1854,24 +1914,6 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
/* For now, no zoned support for RAID56. */
ASSERT(!btrfs_is_zoned(sctx->fs_info));
- /* Writeback for the repaired sectors. */
- for (int i = 0; i < data_stripes; i++) {
- unsigned long repaired;
-
- stripe = &sctx->raid56_data_stripes[i];
-
- bitmap_andnot(&repaired, &stripe->init_error_bitmap,
- &stripe->error_bitmap, stripe->nr_sectors);
- scrub_write_sectors(sctx, stripe, repaired, false);
- }
-
- /* Wait for the above writebacks to finish. */
- for (int i = 0; i < data_stripes; i++) {
- stripe = &sctx->raid56_data_stripes[i];
-
- wait_scrub_stripe_io(stripe);
- }
-
/*
* Now all data stripes are properly verified. Check if we have any
* unrepaired, if so abort immediately or we could further corrupt the
@@ -1937,6 +1979,8 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
bio_put(bio);
btrfs_bio_counter_dec(fs_info);
+ btrfs_release_path(&extent_path);
+ btrfs_release_path(&csum_path);
out:
return ret;
}
@@ -1958,18 +2002,15 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
const u64 logical_end = logical_start + logical_length;
- /* An artificial limit, inherit from old scrub behavior */
- struct btrfs_path path = { 0 };
u64 cur_logical = logical_start;
int ret;
/* The range must be inside the bg */
ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
- path.search_commit_root = 1;
- path.skip_locking = 1;
/* Go through each extent items inside the logical range */
while (cur_logical < logical_end) {
+ u64 found_logical;
u64 cur_physical = physical + cur_logical - logical_start;
/* Canceled? */
@@ -1994,7 +2035,7 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
cur_logical, logical_end - cur_logical,
- cur_physical);
+ cur_physical, &found_logical);
if (ret > 0) {
/* No more extent, just update the accounting */
sctx->stat.last_physical = physical + logical_length;
@@ -2004,14 +2045,11 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
if (ret < 0)
break;
- ASSERT(sctx->cur_stripe > 0);
- cur_logical = sctx->stripes[sctx->cur_stripe - 1].logical
- + BTRFS_STRIPE_LEN;
+ cur_logical = found_logical + BTRFS_STRIPE_LEN;
/* Don't hold CPU for too long time */
cond_resched();
}
- btrfs_release_path(&path);
return ret;
}
@@ -2109,6 +2147,9 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
u64 stripe_logical;
int stop_loop = 0;
+ /* Extent_path should be released by now. */
+ ASSERT(sctx->extent_path.nodes[0] == NULL);
+
scrub_blocked_if_needed(fs_info);
if (sctx->is_dev_replace &&
@@ -2227,6 +2268,9 @@ out:
ret2 = flush_scrub_stripes(sctx);
if (!ret)
ret = ret2;
+ btrfs_release_path(&sctx->extent_path);
+ btrfs_release_path(&sctx->csum_path);
+
if (sctx->raid56_data_stripes) {
for (int i = 0; i < nr_data_stripes(map); i++)
release_scrub_stripe(&sctx->raid56_data_stripes[i]);
@@ -2711,8 +2755,7 @@ static void scrub_workers_put(struct btrfs_fs_info *fs_info)
/*
* get a reference count on fs_info->scrub_workers. start worker if necessary
*/
-static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
- int is_dev_replace)
+static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
{
struct workqueue_struct *scrub_workers = NULL;
unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
@@ -2722,10 +2765,7 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
return 0;
- if (is_dev_replace)
- scrub_workers = alloc_ordered_workqueue("btrfs-scrub", flags);
- else
- scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
+ scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
if (!scrub_workers)
return -ENOMEM;
@@ -2777,7 +2817,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (IS_ERR(sctx))
return PTR_ERR(sctx);
- ret = scrub_workers_get(fs_info, is_dev_replace);
+ ret = scrub_workers_get(fs_info);
if (ret)
goto out_free_ctx;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 8bfd44750efe..3a566150c531 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -3685,7 +3685,7 @@ static void tail_append_pending_moves(struct send_ctx *sctx,
static int apply_children_dir_moves(struct send_ctx *sctx)
{
struct pending_dir_move *pm;
- struct list_head stack;
+ LIST_HEAD(stack);
u64 parent_ino = sctx->cur_ino;
int ret = 0;
@@ -3693,7 +3693,6 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
if (!pm)
return 0;
- INIT_LIST_HEAD(&stack);
tail_append_pending_moves(sctx, pm, &stack);
while (!list_empty(&stack)) {
@@ -4165,7 +4164,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
int ret = 0;
struct recorded_ref *cur;
struct recorded_ref *cur2;
- struct list_head check_dirs;
+ LIST_HEAD(check_dirs);
struct fs_path *valid_path = NULL;
u64 ow_inode = 0;
u64 ow_gen;
@@ -4184,7 +4183,6 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
* which is always '..'
*/
BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
- INIT_LIST_HEAD(&check_dirs);
valid_path = fs_path_alloc();
if (!valid_path) {
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 75e7fa337e66..d7e8cd4f140c 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -389,11 +389,7 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
return 0;
used = btrfs_space_info_used(space_info, true);
- if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) &&
- (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
- avail = 0;
- else
- avail = calc_available_free_space(fs_info, space_info, flush);
+ avail = calc_available_free_space(fs_info, space_info, flush);
if (used + bytes < space_info->total_bytes + avail)
return 1;
@@ -510,6 +506,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
int dump_block_groups)
{
struct btrfs_block_group *cache;
+ u64 total_avail = 0;
int index = 0;
spin_lock(&info->lock);
@@ -523,18 +520,27 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
down_read(&info->groups_sem);
again:
list_for_each_entry(cache, &info->block_groups[index], list) {
+ u64 avail;
+
spin_lock(&cache->lock);
+ avail = cache->length - cache->used - cache->pinned -
+ cache->reserved - cache->delalloc_bytes -
+ cache->bytes_super - cache->zone_unusable;
btrfs_info(fs_info,
- "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
- cache->start, cache->length, cache->used, cache->pinned,
- cache->reserved, cache->zone_unusable,
- cache->ro ? "[readonly]" : "");
+"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
+ cache->start, cache->length, cache->used, cache->pinned,
+ cache->reserved, cache->delalloc_bytes,
+ cache->bytes_super, cache->zone_unusable,
+ avail, cache->ro ? "[readonly]" : "");
spin_unlock(&cache->lock);
btrfs_dump_free_space(cache, bytes);
+ total_avail += avail;
}
if (++index < BTRFS_NR_RAID_TYPES)
goto again;
up_read(&info->groups_sem);
+
+ btrfs_info(fs_info, "%llu bytes available across all block groups", total_avail);
}
static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
@@ -715,9 +721,11 @@ static void flush_space(struct btrfs_fs_info *fs_info,
else
nr = -1;
- trans = btrfs_join_transaction(root);
+ trans = btrfs_join_transaction_nostart(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ if (ret == -ENOENT)
+ ret = 0;
break;
}
ret = btrfs_run_delayed_items_nr(trans, nr);
@@ -733,9 +741,11 @@ static void flush_space(struct btrfs_fs_info *fs_info,
break;
case FLUSH_DELAYED_REFS_NR:
case FLUSH_DELAYED_REFS:
- trans = btrfs_join_transaction(root);
+ trans = btrfs_join_transaction_nostart(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ if (ret == -ENOENT)
+ ret = 0;
break;
}
if (state == FLUSH_DELAYED_REFS_NR)
@@ -747,18 +757,6 @@ static void flush_space(struct btrfs_fs_info *fs_info,
break;
case ALLOC_CHUNK:
case ALLOC_CHUNK_FORCE:
- /*
- * For metadata space on zoned filesystem, reaching here means we
- * don't have enough space left in active_total_bytes. Try to
- * activate a block group first, because we may have inactive
- * block group already allocated.
- */
- ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false);
- if (ret < 0)
- break;
- else if (ret == 1)
- break;
-
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
@@ -770,22 +768,6 @@ static void flush_space(struct btrfs_fs_info *fs_info,
CHUNK_ALLOC_FORCE);
btrfs_end_transaction(trans);
- /*
- * For metadata space on zoned filesystem, allocating a new chunk
- * is not enough. We still need to activate the block * group.
- * Active the newly allocated block group by (maybe) finishing
- * a block group.
- */
- if (ret == 1) {
- ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true);
- /*
- * Revert to the original ret regardless we could finish
- * one block group or not.
- */
- if (ret >= 0)
- ret = 1;
- }
-
if (ret > 0 || ret == -ENOSPC)
ret = 0;
break;
@@ -800,9 +782,18 @@ static void flush_space(struct btrfs_fs_info *fs_info,
break;
case COMMIT_TRANS:
ASSERT(current->journal_info == NULL);
- trans = btrfs_join_transaction(root);
+ /*
+ * We don't want to start a new transaction, just attach to the
+ * current one or wait it fully commits in case its commit is
+ * happening at the moment. Note: we don't use a nostart join
+ * because that does not wait for a transaction to fully commit
+ * (only for it to be unblocked, state TRANS_STATE_UNBLOCKED).
+ */
+ trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ if (ret == -ENOENT)
+ ret = 0;
break;
}
ret = btrfs_commit_transaction(trans);
@@ -1408,8 +1399,18 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
}
}
- /* Attempt to steal from the global rsv if we can. */
- if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
+ /*
+ * Attempt to steal from the global rsv if we can, except if the fs was
+ * turned into error mode due to a transaction abort when flushing space
+ * above, in that case fail with the abort error instead of returning
+ * success to the caller if we can steal from the global rsv - this is
+ * just to have caller fail immeditelly instead of later when trying to
+ * modify the fs, making it easier to debug -ENOSPC problems.
+ */
+ if (BTRFS_FS_ERROR(fs_info)) {
+ ticket->error = BTRFS_FS_ERROR(fs_info);
+ remove_ticket(space_info, ticket);
+ } else if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
ticket->error = -ENOSPC;
remove_ticket(space_info, ticket);
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index f1dd172d8d5b..09bfe68d2ea3 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -709,12 +709,16 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
break;
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
case Opt_check_integrity_including_extent_data:
+ btrfs_warn(info,
+ "integrity checker is deprecated and will be removed in 6.7");
btrfs_info(info,
"enabling check integrity including extent data");
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY_DATA);
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
break;
case Opt_check_integrity:
+ btrfs_warn(info,
+ "integrity checker is deprecated and will be removed in 6.7");
btrfs_info(info, "enabling check integrity");
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
break;
@@ -727,6 +731,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
goto out;
}
info->check_integrity_print_mask = intarg;
+ btrfs_warn(info,
+ "integrity checker is deprecated and will be removed in 6.7");
btrfs_info(info, "check_integrity_print_mask 0x%x",
info->check_integrity_print_mask);
break;
@@ -2144,7 +2150,7 @@ static struct file_system_type btrfs_fs_type = {
.name = "btrfs",
.mount = btrfs_mount,
.kill_sb = btrfs_kill_super,
- .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
+ .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | FS_MGTIME,
};
static struct file_system_type btrfs_root_fs_type = {
@@ -2152,7 +2158,8 @@ static struct file_system_type btrfs_root_fs_type = {
.name = "btrfs",
.mount = btrfs_mount_root,
.kill_sb = btrfs_kill_super,
- .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | FS_ALLOW_IDMAP,
+ .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA |
+ FS_ALLOW_IDMAP | FS_MGTIME,
};
MODULE_ALIAS_FS("btrfs");
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 25294e624851..b1d1ac25237b 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -414,6 +414,12 @@ static ssize_t supported_sectorsizes_show(struct kobject *kobj,
BTRFS_ATTR(static_feature, supported_sectorsizes,
supported_sectorsizes_show);
+static ssize_t acl_show(struct kobject *kobj, struct kobj_attribute *a, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", !!IS_ENABLED(CONFIG_BTRFS_FS_POSIX_ACL));
+}
+BTRFS_ATTR(static_feature, acl, acl_show);
+
/*
* Features which only depend on kernel version.
*
@@ -421,6 +427,7 @@ BTRFS_ATTR(static_feature, supported_sectorsizes,
* btrfs_supported_feature_attrs.
*/
static struct attribute *btrfs_supported_static_feature_attrs[] = {
+ BTRFS_ATTR_PTR(static_feature, acl),
BTRFS_ATTR_PTR(static_feature, rmdir_subvol),
BTRFS_ATTR_PTR(static_feature, supported_checksums),
BTRFS_ATTR_PTR(static_feature, send_stream_version),
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index f6bc6d738555..1cc86af97dc6 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -319,86 +319,139 @@ out:
return ret;
}
-static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb,
- unsigned long len)
+static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb)
{
unsigned long i;
- for (i = 0; i < len * BITS_PER_BYTE; i++) {
+ for (i = 0; i < eb->len * BITS_PER_BYTE; i++) {
int bit, bit1;
bit = !!test_bit(i, bitmap);
bit1 = !!extent_buffer_test_bit(eb, 0, i);
if (bit1 != bit) {
- test_err("bits do not match");
+ u8 has;
+ u8 expect;
+
+ read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1);
+ expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE));
+
+ test_err(
+ "bits do not match, start byte 0 bit %lu, byte %lu has 0x%02x expect 0x%02x",
+ i, i / BITS_PER_BYTE, has, expect);
return -EINVAL;
}
bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
i % BITS_PER_BYTE);
if (bit1 != bit) {
- test_err("offset bits do not match");
+ u8 has;
+ u8 expect;
+
+ read_extent_buffer(eb, &has, i / BITS_PER_BYTE, 1);
+ expect = bitmap_get_value8(bitmap, ALIGN(i, BITS_PER_BYTE));
+
+ test_err(
+ "bits do not match, start byte %lu bit %lu, byte %lu has 0x%02x expect 0x%02x",
+ i / BITS_PER_BYTE, i % BITS_PER_BYTE,
+ i / BITS_PER_BYTE, has, expect);
return -EINVAL;
}
}
return 0;
}
-static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
- unsigned long len)
+static int test_bitmap_set(const char *name, unsigned long *bitmap,
+ struct extent_buffer *eb,
+ unsigned long byte_start, unsigned long bit_start,
+ unsigned long bit_len)
+{
+ int ret;
+
+ bitmap_set(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len);
+ extent_buffer_bitmap_set(eb, byte_start, bit_start, bit_len);
+ ret = check_eb_bitmap(bitmap, eb);
+ if (ret < 0)
+ test_err("%s test failed", name);
+ return ret;
+}
+
+static int test_bitmap_clear(const char *name, unsigned long *bitmap,
+ struct extent_buffer *eb,
+ unsigned long byte_start, unsigned long bit_start,
+ unsigned long bit_len)
+{
+ int ret;
+
+ bitmap_clear(bitmap, byte_start * BITS_PER_BYTE + bit_start, bit_len);
+ extent_buffer_bitmap_clear(eb, byte_start, bit_start, bit_len);
+ ret = check_eb_bitmap(bitmap, eb);
+ if (ret < 0)
+ test_err("%s test failed", name);
+ return ret;
+}
+static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb)
{
unsigned long i, j;
+ unsigned long byte_len = eb->len;
u32 x;
int ret;
- memset(bitmap, 0, len);
- memzero_extent_buffer(eb, 0, len);
- if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
- test_err("bitmap was not zeroed");
- return -EINVAL;
- }
+ ret = test_bitmap_clear("clear all run 1", bitmap, eb, 0, 0,
+ byte_len * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
- bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
- extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
- ret = check_eb_bitmap(bitmap, eb, len);
- if (ret) {
- test_err("setting all bits failed");
+ ret = test_bitmap_set("set all", bitmap, eb, 0, 0, byte_len * BITS_PER_BYTE);
+ if (ret < 0)
return ret;
- }
- bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
- extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
- ret = check_eb_bitmap(bitmap, eb, len);
- if (ret) {
- test_err("clearing all bits failed");
+ ret = test_bitmap_clear("clear all run 2", bitmap, eb, 0, 0,
+ byte_len * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_set("same byte set", bitmap, eb, 0, 2, 4);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_clear("same byte partial clear", bitmap, eb, 0, 4, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_set("cross byte set", bitmap, eb, 2, 4, 8);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_set("cross multi byte set", bitmap, eb, 4, 4, 24);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_clear("cross byte clear", bitmap, eb, 2, 6, 4);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_clear("cross multi byte clear", bitmap, eb, 4, 6, 20);
+ if (ret < 0)
return ret;
- }
/* Straddling pages test */
- if (len > PAGE_SIZE) {
- bitmap_set(bitmap,
- (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
- sizeof(long) * BITS_PER_BYTE);
- extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
- sizeof(long) * BITS_PER_BYTE);
- ret = check_eb_bitmap(bitmap, eb, len);
- if (ret) {
- test_err("setting straddling pages failed");
+ if (byte_len > PAGE_SIZE) {
+ ret = test_bitmap_set("cross page set", bitmap, eb,
+ PAGE_SIZE - sizeof(long) / 2, 0,
+ sizeof(long) * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+
+ ret = test_bitmap_set("cross page set all", bitmap, eb, 0, 0,
+ byte_len * BITS_PER_BYTE);
+ if (ret < 0)
return ret;
- }
- bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
- bitmap_clear(bitmap,
- (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
- sizeof(long) * BITS_PER_BYTE);
- extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
- extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
+ ret = test_bitmap_clear("cross page clear", bitmap, eb,
+ PAGE_SIZE - sizeof(long) / 2, 0,
sizeof(long) * BITS_PER_BYTE);
- ret = check_eb_bitmap(bitmap, eb, len);
- if (ret) {
- test_err("clearing straddling pages failed");
+ if (ret < 0)
return ret;
- }
}
/*
@@ -406,9 +459,12 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
* something repetitive that could miss some hypothetical off-by-n bug.
*/
x = 0;
- bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
- extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
- for (i = 0; i < len * BITS_PER_BYTE / 32; i++) {
+ ret = test_bitmap_clear("clear all run 3", bitmap, eb, 0, 0,
+ byte_len * BITS_PER_BYTE);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < byte_len * BITS_PER_BYTE / 32; i++) {
x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU;
for (j = 0; j < 32; j++) {
if (x & (1U << j)) {
@@ -418,7 +474,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
}
}
- ret = check_eb_bitmap(bitmap, eb, len);
+ ret = check_eb_bitmap(bitmap, eb);
if (ret) {
test_err("random bit pattern failed");
return ret;
@@ -456,7 +512,7 @@ static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
goto out;
}
- ret = __test_eb_bitmaps(bitmap, eb, nodesize);
+ ret = __test_eb_bitmaps(bitmap, eb);
if (ret)
goto out;
@@ -473,7 +529,7 @@ static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
goto out;
}
- ret = __test_eb_bitmaps(bitmap, eb, nodesize);
+ ret = __test_eb_bitmaps(bitmap, eb);
out:
free_extent_buffer(eb);
kfree(bitmap);
@@ -592,6 +648,146 @@ out:
return ret;
}
+static void dump_eb_and_memory_contents(struct extent_buffer *eb, void *memory,
+ const char *test_name)
+{
+ for (int i = 0; i < eb->len; i++) {
+ struct page *page = eb->pages[i >> PAGE_SHIFT];
+ void *addr = page_address(page) + offset_in_page(i);
+
+ if (memcmp(addr, memory + i, 1) != 0) {
+ test_err("%s failed", test_name);
+ test_err("eb and memory diffs at byte %u, eb has 0x%02x memory has 0x%02x",
+ i, *(u8 *)addr, *(u8 *)(memory + i));
+ return;
+ }
+ }
+}
+
+static int verify_eb_and_memory(struct extent_buffer *eb, void *memory,
+ const char *test_name)
+{
+ for (int i = 0; i < (eb->len >> PAGE_SHIFT); i++) {
+ void *eb_addr = page_address(eb->pages[i]);
+
+ if (memcmp(memory + (i << PAGE_SHIFT), eb_addr, PAGE_SIZE) != 0) {
+ dump_eb_and_memory_contents(eb, memory, test_name);
+ return -EUCLEAN;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Init both memory and extent buffer contents to the same randomly generated
+ * contents.
+ */
+static void init_eb_and_memory(struct extent_buffer *eb, void *memory)
+{
+ get_random_bytes(memory, eb->len);
+ write_extent_buffer(eb, memory, 0, eb->len);
+}
+
+static int test_eb_mem_ops(u32 sectorsize, u32 nodesize)
+{
+ struct btrfs_fs_info *fs_info;
+ struct extent_buffer *eb = NULL;
+ void *memory = NULL;
+ int ret;
+
+ test_msg("running extent buffer memory operation tests");
+
+ fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
+ if (!fs_info) {
+ test_std_err(TEST_ALLOC_FS_INFO);
+ return -ENOMEM;
+ }
+
+ memory = kvzalloc(nodesize, GFP_KERNEL);
+ if (!memory) {
+ test_err("failed to allocate memory");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ eb = __alloc_dummy_extent_buffer(fs_info, SZ_1M, nodesize);
+ if (!eb) {
+ test_std_err(TEST_ALLOC_EXTENT_BUFFER);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ init_eb_and_memory(eb, memory);
+ ret = verify_eb_and_memory(eb, memory, "full eb write");
+ if (ret < 0)
+ goto out;
+
+ memcpy(memory, memory + 16, 16);
+ memcpy_extent_buffer(eb, 0, 16, 16);
+ ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 1");
+ if (ret < 0)
+ goto out;
+
+ memcpy(memory, memory + 2048, 16);
+ memcpy_extent_buffer(eb, 0, 2048, 16);
+ ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 2");
+ if (ret < 0)
+ goto out;
+ memcpy(memory, memory + 2048, 2048);
+ memcpy_extent_buffer(eb, 0, 2048, 2048);
+ ret = verify_eb_and_memory(eb, memory, "same page non-overlapping memcpy 3");
+ if (ret < 0)
+ goto out;
+
+ memmove(memory + 512, memory + 256, 512);
+ memmove_extent_buffer(eb, 512, 256, 512);
+ ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 1");
+ if (ret < 0)
+ goto out;
+
+ memmove(memory + 2048, memory + 512, 2048);
+ memmove_extent_buffer(eb, 2048, 512, 2048);
+ ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 2");
+ if (ret < 0)
+ goto out;
+ memmove(memory + 512, memory + 2048, 2048);
+ memmove_extent_buffer(eb, 512, 2048, 2048);
+ ret = verify_eb_and_memory(eb, memory, "same page overlapping memcpy 3");
+ if (ret < 0)
+ goto out;
+
+ if (nodesize > PAGE_SIZE) {
+ memcpy(memory, memory + 4096 - 128, 256);
+ memcpy_extent_buffer(eb, 0, 4096 - 128, 256);
+ ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 1");
+ if (ret < 0)
+ goto out;
+
+ memcpy(memory + 4096 - 128, memory + 4096 + 128, 256);
+ memcpy_extent_buffer(eb, 4096 - 128, 4096 + 128, 256);
+ ret = verify_eb_and_memory(eb, memory, "cross page non-overlapping memcpy 2");
+ if (ret < 0)
+ goto out;
+
+ memmove(memory + 4096 - 128, memory + 4096 - 64, 256);
+ memmove_extent_buffer(eb, 4096 - 128, 4096 - 64, 256);
+ ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 1");
+ if (ret < 0)
+ goto out;
+
+ memmove(memory + 4096 - 64, memory + 4096 - 128, 256);
+ memmove_extent_buffer(eb, 4096 - 64, 4096 - 128, 256);
+ ret = verify_eb_and_memory(eb, memory, "cross page overlapping memcpy 2");
+ if (ret < 0)
+ goto out;
+ }
+out:
+ free_extent_buffer(eb);
+ kvfree(memory);
+ btrfs_free_dummy_fs_info(fs_info);
+ return ret;
+}
+
int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
{
int ret;
@@ -607,6 +803,10 @@ int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
goto out;
ret = test_eb_bitmaps(sectorsize, nodesize);
+ if (ret)
+ goto out;
+
+ ret = test_eb_mem_ops(sectorsize, nodesize);
out:
return ret;
}
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index ed0f36ae5346..29bdd08b241f 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include "btrfs-tests.h"
#include "../ctree.h"
+#include "../btrfs_inode.h"
#include "../volumes.h"
#include "../disk-io.h"
#include "../block-group.h"
@@ -442,6 +443,406 @@ static int test_case_4(struct btrfs_fs_info *fs_info,
return ret;
}
+static int add_compressed_extent(struct extent_map_tree *em_tree,
+ u64 start, u64 len, u64 block_start)
+{
+ struct extent_map *em;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ em->start = start;
+ em->len = len;
+ em->block_start = block_start;
+ em->block_len = SZ_4K;
+ set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ free_extent_map(em);
+ if (ret < 0) {
+ test_err("cannot add extent map [%llu, %llu)", start, start + len);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct extent_range {
+ u64 start;
+ u64 len;
+};
+
+/* The valid states of the tree after every drop, as described below. */
+struct extent_range valid_ranges[][7] = {
+ {
+ { .start = 0, .len = SZ_8K }, /* [0, 8K) */
+ { .start = SZ_4K * 3, .len = SZ_4K * 3}, /* [12k, 24k) */
+ { .start = SZ_4K * 6, .len = SZ_4K * 3}, /* [24k, 36k) */
+ { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
+ { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */
+ },
+ {
+ { .start = 0, .len = SZ_8K }, /* [0, 8K) */
+ { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */
+ { .start = SZ_4K * 6, .len = SZ_4K * 3}, /* [24k, 36k) */
+ { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
+ { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */
+ },
+ {
+ { .start = 0, .len = SZ_8K }, /* [0, 8K) */
+ { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */
+ { .start = SZ_4K * 6, .len = SZ_4K}, /* [24k, 28k) */
+ { .start = SZ_32K, .len = SZ_4K}, /* [32k, 36k) */
+ { .start = SZ_32K + SZ_4K, .len = SZ_4K}, /* [36k, 40k) */
+ { .start = SZ_4K * 10, .len = SZ_4K * 6}, /* [40k, 64k) */
+ },
+ {
+ { .start = 0, .len = SZ_8K}, /* [0, 8K) */
+ { .start = SZ_4K * 5, .len = SZ_4K}, /* [20k, 24k) */
+ { .start = SZ_4K * 6, .len = SZ_4K}, /* [24k, 28k) */
+ }
+};
+
+static int validate_range(struct extent_map_tree *em_tree, int index)
+{
+ struct rb_node *n;
+ int i;
+
+ for (i = 0, n = rb_first_cached(&em_tree->map);
+ valid_ranges[index][i].len && n;
+ i++, n = rb_next(n)) {
+ struct extent_map *entry = rb_entry(n, struct extent_map, rb_node);
+
+ if (entry->start != valid_ranges[index][i].start) {
+ test_err("mapping has start %llu expected %llu",
+ entry->start, valid_ranges[index][i].start);
+ return -EINVAL;
+ }
+
+ if (entry->len != valid_ranges[index][i].len) {
+ test_err("mapping has len %llu expected %llu",
+ entry->len, valid_ranges[index][i].len);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * We exited because we don't have any more entries in the extent_map
+ * but we still expect more valid entries.
+ */
+ if (valid_ranges[index][i].len) {
+ test_err("missing an entry");
+ return -EINVAL;
+ }
+
+ /* We exited the loop but still have entries in the extent map. */
+ if (n) {
+ test_err("we have a left over entry in the extent map we didn't expect");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Test scenario:
+ *
+ * Test the various edge cases of btrfs_drop_extent_map_range, create the
+ * following ranges
+ *
+ * [0, 12k)[12k, 24k)[24k, 36k)[36k, 40k)[40k,64k)
+ *
+ * And then we'll drop:
+ *
+ * [8k, 12k) - test the single front split
+ * [12k, 20k) - test the single back split
+ * [28k, 32k) - test the double split
+ * [32k, 64k) - test whole em dropping
+ *
+ * They'll have the EXTENT_FLAG_COMPRESSED flag set to keep the em tree from
+ * merging the em's.
+ */
+static int test_case_5(void)
+{
+ struct extent_map_tree *em_tree;
+ struct inode *inode;
+ u64 start, end;
+ int ret;
+
+ test_msg("Running btrfs_drop_extent_map_range tests");
+
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
+ return -ENOMEM;
+ }
+
+ em_tree = &BTRFS_I(inode)->extent_tree;
+
+ /* [0, 12k) */
+ ret = add_compressed_extent(em_tree, 0, SZ_4K * 3, 0);
+ if (ret) {
+ test_err("cannot add extent range [0, 12K)");
+ goto out;
+ }
+
+ /* [12k, 24k) */
+ ret = add_compressed_extent(em_tree, SZ_4K * 3, SZ_4K * 3, SZ_4K);
+ if (ret) {
+ test_err("cannot add extent range [12k, 24k)");
+ goto out;
+ }
+
+ /* [24k, 36k) */
+ ret = add_compressed_extent(em_tree, SZ_4K * 6, SZ_4K * 3, SZ_8K);
+ if (ret) {
+ test_err("cannot add extent range [12k, 24k)");
+ goto out;
+ }
+
+ /* [36k, 40k) */
+ ret = add_compressed_extent(em_tree, SZ_32K + SZ_4K, SZ_4K, SZ_4K * 3);
+ if (ret) {
+ test_err("cannot add extent range [12k, 24k)");
+ goto out;
+ }
+
+ /* [40k, 64k) */
+ ret = add_compressed_extent(em_tree, SZ_4K * 10, SZ_4K * 6, SZ_16K);
+ if (ret) {
+ test_err("cannot add extent range [12k, 24k)");
+ goto out;
+ }
+
+ /* Drop [8k, 12k) */
+ start = SZ_8K;
+ end = (3 * SZ_4K) - 1;
+ btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
+ ret = validate_range(&BTRFS_I(inode)->extent_tree, 0);
+ if (ret)
+ goto out;
+
+ /* Drop [12k, 20k) */
+ start = SZ_4K * 3;
+ end = SZ_16K + SZ_4K - 1;
+ btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
+ ret = validate_range(&BTRFS_I(inode)->extent_tree, 1);
+ if (ret)
+ goto out;
+
+ /* Drop [28k, 32k) */
+ start = SZ_32K - SZ_4K;
+ end = SZ_32K - 1;
+ btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
+ ret = validate_range(&BTRFS_I(inode)->extent_tree, 2);
+ if (ret)
+ goto out;
+
+ /* Drop [32k, 64k) */
+ start = SZ_32K;
+ end = SZ_64K - 1;
+ btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
+ ret = validate_range(&BTRFS_I(inode)->extent_tree, 3);
+ if (ret)
+ goto out;
+out:
+ iput(inode);
+ return ret;
+}
+
+/*
+ * Test the btrfs_add_extent_mapping helper which will attempt to create an em
+ * for areas between two existing ems. Validate it doesn't do this when there
+ * are two unmerged em's side by side.
+ */
+static int test_case_6(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree)
+{
+ struct extent_map *em = NULL;
+ int ret;
+
+ ret = add_compressed_extent(em_tree, 0, SZ_4K, 0);
+ if (ret)
+ goto out;
+
+ ret = add_compressed_extent(em_tree, SZ_4K, SZ_4K, 0);
+ if (ret)
+ goto out;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ em->start = SZ_4K;
+ em->len = SZ_4K;
+ em->block_start = SZ_16K;
+ em->block_len = SZ_16K;
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, 0, SZ_8K);
+ write_unlock(&em_tree->lock);
+
+ if (ret != 0) {
+ test_err("got an error when adding our em: %d", ret);
+ goto out;
+ }
+
+ ret = -EINVAL;
+ if (em->start != 0) {
+ test_err("unexpected em->start at %llu, wanted 0", em->start);
+ goto out;
+ }
+ if (em->len != SZ_4K) {
+ test_err("unexpected em->len %llu, expected 4K", em->len);
+ goto out;
+ }
+ ret = 0;
+out:
+ free_extent_map(em);
+ free_extent_map_tree(em_tree);
+ return ret;
+}
+
+/*
+ * Regression test for btrfs_drop_extent_map_range. Calling with skip_pinned ==
+ * true would mess up the start/end calculations and subsequent splits would be
+ * incorrect.
+ */
+static int test_case_7(void)
+{
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+ struct inode *inode;
+ int ret;
+
+ test_msg("Running btrfs_drop_extent_cache with pinned");
+
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
+ return -ENOMEM;
+ }
+
+ em_tree = &BTRFS_I(inode)->extent_tree;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* [0, 16K), pinned */
+ em->start = 0;
+ em->len = SZ_16K;
+ em->block_start = 0;
+ em->block_len = SZ_4K;
+ set_bit(EXTENT_FLAG_PINNED, &em->flags);
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("couldn't add extent map");
+ goto out;
+ }
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* [32K, 48K), not pinned */
+ em->start = SZ_32K;
+ em->len = SZ_16K;
+ em->block_start = SZ_32K;
+ em->block_len = SZ_16K;
+ write_lock(&em_tree->lock);
+ ret = add_extent_mapping(em_tree, em, 0);
+ write_unlock(&em_tree->lock);
+ if (ret < 0) {
+ test_err("couldn't add extent map");
+ goto out;
+ }
+ free_extent_map(em);
+
+ /*
+ * Drop [0, 36K) This should skip the [0, 4K) extent and then split the
+ * [32K, 48K) extent.
+ */
+ btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (36 * SZ_1K) - 1, true);
+
+ /* Make sure our extent maps look sane. */
+ ret = -EINVAL;
+
+ em = lookup_extent_mapping(em_tree, 0, SZ_16K);
+ if (!em) {
+ test_err("didn't find an em at 0 as expected");
+ goto out;
+ }
+
+ if (em->start != 0) {
+ test_err("em->start is %llu, expected 0", em->start);
+ goto out;
+ }
+
+ if (em->len != SZ_16K) {
+ test_err("em->len is %llu, expected 16K", em->len);
+ goto out;
+ }
+
+ free_extent_map(em);
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, SZ_16K, SZ_16K);
+ read_unlock(&em_tree->lock);
+ if (em) {
+ test_err("found an em when we weren't expecting one");
+ goto out;
+ }
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, SZ_32K, SZ_16K);
+ read_unlock(&em_tree->lock);
+ if (!em) {
+ test_err("didn't find an em at 32K as expected");
+ goto out;
+ }
+
+ if (em->start != (36 * SZ_1K)) {
+ test_err("em->start is %llu, expected 36K", em->start);
+ goto out;
+ }
+
+ if (em->len != (12 * SZ_1K)) {
+ test_err("em->len is %llu, expected 12K", em->len);
+ goto out;
+ }
+
+ free_extent_map(em);
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, 48 * SZ_1K, (u64)-1);
+ read_unlock(&em_tree->lock);
+ if (em) {
+ test_err("found an unexpected em above 48K");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ free_extent_map(em);
+ iput(inode);
+ return ret;
+}
+
struct rmap_test_vector {
u64 raid_type;
u64 physical_start;
@@ -619,6 +1020,17 @@ int btrfs_test_extent_map(void)
if (ret)
goto out;
ret = test_case_4(fs_info, em_tree);
+ if (ret)
+ goto out;
+ ret = test_case_5();
+ if (ret)
+ goto out;
+ ret = test_case_6(fs_info, em_tree);
+ if (ret)
+ goto out;
+ ret = test_case_7();
+ if (ret)
+ goto out;
test_msg("running rmap tests");
for (i = 0; i < ARRAY_SIZE(rmap_tests); i++) {
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 91b6c2fdc420..874e4394df86 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -292,10 +292,11 @@ loop:
spin_unlock(&fs_info->trans_lock);
/*
- * If we are ATTACH, we just want to catch the current transaction,
- * and commit it. If there is no transaction, just return ENOENT.
+ * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the
+ * current transaction, and commit it. If there is no transaction, just
+ * return ENOENT.
*/
- if (type == TRANS_ATTACH)
+ if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART)
return -ENOENT;
/*
@@ -591,8 +592,13 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
u64 delayed_refs_bytes = 0;
qgroup_reserved = num_items * fs_info->nodesize;
- ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
- enforce_qgroups);
+ /*
+ * Use prealloc for now, as there might be a currently running
+ * transaction that could free this reserved space prematurely
+ * by committing.
+ */
+ ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserved,
+ enforce_qgroups, false);
if (ret)
return ERR_PTR(ret);
@@ -705,6 +711,14 @@ again:
h->reloc_reserved = reloc_reserved;
}
+ /*
+ * Now that we have found a transaction to be a part of, convert the
+ * qgroup reservation from prealloc to pertrans. A different transaction
+ * can't race in and free our pertrans out from under us.
+ */
+ if (qgroup_reserved)
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+
got_it:
if (!current->journal_info)
current->journal_info = h;
@@ -752,7 +766,7 @@ alloc_fail:
btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
num_bytes, NULL);
reserve_fail:
- btrfs_qgroup_free_meta_pertrans(root, qgroup_reserved);
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
return ERR_PTR(ret);
}
@@ -785,7 +799,10 @@ struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *
/*
* Similar to regular join but it never starts a transaction when none is
- * running or after waiting for the current one to finish.
+ * running or when there's a running one at a state >= TRANS_STATE_UNBLOCKED.
+ * This is similar to btrfs_attach_transaction() but it allows the join to
+ * happen if the transaction commit already started but it's not yet in the
+ * "doing" phase (the state is < TRANS_STATE_COMMIT_DOING).
*/
struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
{
@@ -1060,8 +1077,8 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
u64 start = 0;
u64 end;
- while (!find_first_extent_bit(dirty_pages, start, &start, &end,
- mark, &cached_state)) {
+ while (find_first_extent_bit(dirty_pages, start, &start, &end,
+ mark, &cached_state)) {
bool wait_writeback = false;
err = convert_extent_bit(dirty_pages, start, end,
@@ -1114,8 +1131,8 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
u64 start = 0;
u64 end;
- while (!find_first_extent_bit(dirty_pages, start, &start, &end,
- EXTENT_NEED_WAIT, &cached_state)) {
+ while (find_first_extent_bit(dirty_pages, start, &start, &end,
+ EXTENT_NEED_WAIT, &cached_state)) {
/*
* Ignore -ENOMEM errors returned by clear_extent_bit().
* When committing the transaction, we'll remove any entries
@@ -1837,8 +1854,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
fname.disk_name.len * 2);
- parent_inode->i_mtime = current_time(parent_inode);
- parent_inode->i_ctime = parent_inode->i_mtime;
+ parent_inode->i_mtime = inode_set_ctime_current(parent_inode);
ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
if (ret) {
btrfs_abort_transaction(trans, ret);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 365a1cc0a3c3..d1e46b839519 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4148,9 +4148,9 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
inode->i_mtime.tv_nsec);
btrfs_set_token_timespec_sec(&token, &item->ctime,
- inode->i_ctime.tv_sec);
+ inode_get_ctime(inode).tv_sec);
btrfs_set_token_timespec_nsec(&token, &item->ctime,
- inode->i_ctime.tv_nsec);
+ inode_get_ctime(inode).tv_nsec);
/*
* We do not need to set the nbytes field, in fact during a fast fsync
@@ -4841,13 +4841,11 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_extent *tmp;
struct extent_map *em, *n;
- struct list_head extents;
+ LIST_HEAD(extents);
struct extent_map_tree *tree = &inode->extent_tree;
int ret = 0;
int num = 0;
- INIT_LIST_HEAD(&extents);
-
write_lock(&tree->lock);
list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
@@ -6794,8 +6792,8 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
while (true) {
struct btrfs_fs_info *fs_info = root->fs_info;
- struct extent_buffer *leaf = path->nodes[0];
- int slot = path->slots[0];
+ struct extent_buffer *leaf;
+ int slot;
struct btrfs_key search_key;
struct inode *inode;
u64 ino;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6aa9bf3661ac..9621455edebc 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -681,6 +681,14 @@ error_free_page:
return -EINVAL;
}
+u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb)
+{
+ bool has_metadata_uuid = (btrfs_super_incompat_flags(sb) &
+ BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
+
+ return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
+}
+
/*
* Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
* being created with a disk that has already completed its fsid change. Such
@@ -833,15 +841,8 @@ static noinline struct btrfs_device *device_list_add(const char *path,
found_transid > fs_devices->latest_generation) {
memcpy(fs_devices->fsid, disk_super->fsid,
BTRFS_FSID_SIZE);
-
- if (has_metadata_uuid)
- memcpy(fs_devices->metadata_uuid,
- disk_super->metadata_uuid,
- BTRFS_FSID_SIZE);
- else
- memcpy(fs_devices->metadata_uuid,
- disk_super->fsid, BTRFS_FSID_SIZE);
-
+ memcpy(fs_devices->metadata_uuid,
+ btrfs_sb_fsid_ptr(disk_super), BTRFS_FSID_SIZE);
fs_devices->fsid_change = false;
}
}
@@ -851,8 +852,9 @@ static noinline struct btrfs_device *device_list_add(const char *path,
if (fs_devices->opened) {
btrfs_err(NULL,
- "device %s belongs to fsid %pU, and the fs is already mounted",
- path, fs_devices->fsid);
+"device %s belongs to fsid %pU, and the fs is already mounted, scanned by %s (%d)",
+ path, fs_devices->fsid, current->comm,
+ task_pid_nr(current));
mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-EBUSY);
}
@@ -1424,9 +1426,9 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
lockdep_assert_held(&device->fs_info->chunk_mutex);
- if (!find_first_extent_bit(&device->alloc_state, *start,
- &physical_start, &physical_end,
- CHUNK_ALLOCATED, NULL)) {
+ if (find_first_extent_bit(&device->alloc_state, *start,
+ &physical_start, &physical_end,
+ CHUNK_ALLOCATED, NULL)) {
if (in_range(physical_start, *start, len) ||
in_range(*start, physical_start,
@@ -1438,18 +1440,18 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
return false;
}
-static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
+static u64 dev_extent_search_start(struct btrfs_device *device)
{
switch (device->fs_devices->chunk_alloc_policy) {
case BTRFS_CHUNK_ALLOC_REGULAR:
- return max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED);
+ return BTRFS_DEVICE_RANGE_RESERVED;
case BTRFS_CHUNK_ALLOC_ZONED:
/*
* We don't care about the starting region like regular
* allocator, because we anyway use/reserve the first two zones
* for superblock logging.
*/
- return ALIGN(start, device->zone_info->zone_size);
+ return 0;
default:
BUG();
}
@@ -1581,15 +1583,15 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
* correct usable device space, as device extent freed in current transaction
* is not reported as available.
*/
-static int find_free_dev_extent_start(struct btrfs_device *device,
- u64 num_bytes, u64 search_start, u64 *start,
- u64 *len)
+static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
+ u64 *start, u64 *len)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
struct btrfs_dev_extent *dev_extent;
struct btrfs_path *path;
+ u64 search_start;
u64 hole_size;
u64 max_hole_start;
u64 max_hole_size;
@@ -1599,7 +1601,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device,
int slot;
struct extent_buffer *l;
- search_start = dev_extent_search_start(device, search_start);
+ search_start = dev_extent_search_start(device);
WARN_ON(device->zone_info &&
!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
@@ -1725,13 +1727,6 @@ out:
return ret;
}
-int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
- u64 *start, u64 *len)
-{
- /* FIXME use last free of some kind */
- return find_free_dev_extent_start(device, num_bytes, 0, start, len);
-}
-
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
u64 start, u64 *dev_extent_len)
@@ -1917,15 +1912,13 @@ out:
static void update_dev_time(const char *device_path)
{
struct path path;
- struct timespec64 now;
int ret;
ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
if (ret)
return;
- now = current_time(d_inode(path.dentry));
- inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME | S_VERSION);
+ inode_update_time(d_inode(path.dentry), S_MTIME | S_CTIME | S_VERSION);
path_put(&path);
}
@@ -6219,6 +6212,45 @@ static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *
stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr);
}
+/*
+ * Map one logical range to one or more physical ranges.
+ *
+ * @length: (Mandatory) mapped length of this run.
+ * One logical range can be split into different segments
+ * due to factors like zones and RAID0/5/6/10 stripe
+ * boundaries.
+ *
+ * @bioc_ret: (Mandatory) returned btrfs_io_context structure.
+ * which has one or more physical ranges (btrfs_io_stripe)
+ * recorded inside.
+ * Caller should call btrfs_put_bioc() to free it after use.
+ *
+ * @smap: (Optional) single physical range optimization.
+ * If the map request can be fulfilled by one single
+ * physical range, and this is parameter is not NULL,
+ * then @bioc_ret would be NULL, and @smap would be
+ * updated.
+ *
+ * @mirror_num_ret: (Mandatory) returned mirror number if the original
+ * value is 0.
+ *
+ * Mirror number 0 means to choose any live mirrors.
+ *
+ * For non-RAID56 profiles, non-zero mirror_num means
+ * the Nth mirror. (e.g. mirror_num 1 means the first
+ * copy).
+ *
+ * For RAID56 profile, mirror 1 means rebuild from P and
+ * the remaining data stripes.
+ *
+ * For RAID6 profile, mirror > 2 means mark another
+ * data/P stripe error and rebuild from the remaining
+ * stripes..
+ *
+ * @need_raid_map: (Used only for integrity checker) whether the map wants
+ * a full stripe map (including all data and P/Q stripes)
+ * for RAID56. Should always be 1 except integrity checker.
+ */
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_io_context **bioc_ret,
@@ -6393,9 +6425,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
* I/O context structure.
*/
if (smap && num_alloc_stripes == 1 &&
- !((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1) &&
- (op == BTRFS_MAP_READ || !dev_replace_is_ongoing ||
- !dev_replace->tgtdev)) {
+ !((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1)) {
set_io_stripe(smap, map, stripe_index, stripe_offset, stripe_nr);
if (mirror_num_ret)
*mirror_num_ret = mirror_num;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index b8c51f16ba86..2128a032c3b7 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -650,8 +650,6 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
int btrfs_uuid_scan_kthread(void *data);
bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset);
-int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
- u64 *start, u64 *max_avail);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_get_dev_stats *stats);
@@ -749,5 +747,6 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info);
bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical);
bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
+u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb);
#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index fc4b20c2688a..96828a13dd43 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -264,7 +264,7 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
goto out;
inode_inc_iversion(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (ret)
btrfs_abort_transaction(trans, ret);
@@ -407,7 +407,7 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
ret = btrfs_set_prop(trans, inode, name, value, size, flags);
if (!ret) {
inode_inc_iversion(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
if (ret)
btrfs_abort_transaction(trans, ret);
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 72b90bc19a19..09bc325d075d 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -65,6 +65,9 @@
#define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT)
+static void wait_eb_writebacks(struct btrfs_block_group *block_group);
+static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written);
+
static inline bool sb_zone_is_full(const struct blk_zone *zone)
{
return (zone->cond == BLK_ZONE_COND_FULL) ||
@@ -465,8 +468,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
* use the cache.
*/
if (populate_cache && bdev_is_zoned(device->bdev)) {
- zone_info->zone_cache = vzalloc(sizeof(struct blk_zone) *
- zone_info->nr_zones);
+ zone_info->zone_cache = vcalloc(zone_info->nr_zones,
+ sizeof(struct blk_zone));
if (!zone_info->zone_cache) {
btrfs_err_in_rcu(device->fs_info,
"zoned: failed to allocate zone cache for %s",
@@ -1583,19 +1586,9 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
return;
WARN_ON(cache->bytes_super != 0);
-
- /* Check for block groups never get activated */
- if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &cache->fs_info->flags) &&
- cache->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM) &&
- !test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags) &&
- cache->alloc_offset == 0) {
- unusable = cache->length;
- free = 0;
- } else {
- unusable = (cache->alloc_offset - cache->used) +
- (cache->length - cache->zone_capacity);
- free = cache->zone_capacity - cache->alloc_offset;
- }
+ unusable = (cache->alloc_offset - cache->used) +
+ (cache->length - cache->zone_capacity);
+ free = cache->zone_capacity - cache->alloc_offset;
/* We only need ->free_space in ALLOC_SEQ block groups */
cache->cached = BTRFS_CACHE_FINISHED;
@@ -1707,10 +1700,21 @@ void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
{
struct btrfs_inode *inode = BTRFS_I(ordered->inode);
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_ordered_sum *sum =
- list_first_entry(&ordered->list, typeof(*sum), list);
- u64 logical = sum->logical;
- u64 len = sum->len;
+ struct btrfs_ordered_sum *sum;
+ u64 logical, len;
+
+ /*
+ * Write to pre-allocated region is for the data relocation, and so
+ * it should use WRITE operation. No split/rewrite are necessary.
+ */
+ if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
+ return;
+
+ ASSERT(!list_empty(&ordered->list));
+ /* The ordered->list can be empty in the above pre-alloc case. */
+ sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list);
+ logical = sum->logical;
+ len = sum->len;
while (len < ordered->disk_num_bytes) {
sum = list_next_entry(sum, list);
@@ -1747,41 +1751,121 @@ out:
}
}
-bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb,
- struct btrfs_block_group **cache_ret)
+static bool check_bg_is_active(struct btrfs_eb_write_context *ctx,
+ struct btrfs_block_group **active_bg)
{
- struct btrfs_block_group *cache;
- bool ret = true;
+ const struct writeback_control *wbc = ctx->wbc;
+ struct btrfs_block_group *block_group = ctx->zoned_bg;
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
- if (!btrfs_is_zoned(fs_info))
+ if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
return true;
- cache = btrfs_lookup_block_group(fs_info, eb->start);
- if (!cache)
- return true;
+ if (fs_info->treelog_bg == block_group->start) {
+ if (!btrfs_zone_activate(block_group)) {
+ int ret_fin = btrfs_zone_finish_one_bg(fs_info);
- if (cache->meta_write_pointer != eb->start) {
- btrfs_put_block_group(cache);
- cache = NULL;
- ret = false;
- } else {
- cache->meta_write_pointer = eb->start + eb->len;
- }
+ if (ret_fin != 1 || !btrfs_zone_activate(block_group))
+ return false;
+ }
+ } else if (*active_bg != block_group) {
+ struct btrfs_block_group *tgt = *active_bg;
- *cache_ret = cache;
+ /* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */
+ lockdep_assert_held(&fs_info->zoned_meta_io_lock);
- return ret;
+ if (tgt) {
+ /*
+ * If there is an unsent IO left in the allocated area,
+ * we cannot wait for them as it may cause a deadlock.
+ */
+ if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) {
+ if (wbc->sync_mode == WB_SYNC_NONE ||
+ (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync))
+ return false;
+ }
+
+ /* Pivot active metadata/system block group. */
+ btrfs_zoned_meta_io_unlock(fs_info);
+ wait_eb_writebacks(tgt);
+ do_zone_finish(tgt, true);
+ btrfs_zoned_meta_io_lock(fs_info);
+ if (*active_bg == tgt) {
+ btrfs_put_block_group(tgt);
+ *active_bg = NULL;
+ }
+ }
+ if (!btrfs_zone_activate(block_group))
+ return false;
+ if (*active_bg != block_group) {
+ ASSERT(*active_bg == NULL);
+ *active_bg = block_group;
+ btrfs_get_block_group(block_group);
+ }
+ }
+
+ return true;
}
-void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
- struct extent_buffer *eb)
+/*
+ * Check if @ctx->eb is aligned to the write pointer.
+ *
+ * Return:
+ * 0: @ctx->eb is at the write pointer. You can write it.
+ * -EAGAIN: There is a hole. The caller should handle the case.
+ * -EBUSY: There is a hole, but the caller can just bail out.
+ */
+int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+ struct btrfs_eb_write_context *ctx)
{
- if (!btrfs_is_zoned(eb->fs_info) || !cache)
- return;
+ const struct writeback_control *wbc = ctx->wbc;
+ const struct extent_buffer *eb = ctx->eb;
+ struct btrfs_block_group *block_group = ctx->zoned_bg;
+
+ if (!btrfs_is_zoned(fs_info))
+ return 0;
+
+ if (block_group) {
+ if (block_group->start > eb->start ||
+ block_group->start + block_group->length <= eb->start) {
+ btrfs_put_block_group(block_group);
+ block_group = NULL;
+ ctx->zoned_bg = NULL;
+ }
+ }
+
+ if (!block_group) {
+ block_group = btrfs_lookup_block_group(fs_info, eb->start);
+ if (!block_group)
+ return 0;
+ ctx->zoned_bg = block_group;
+ }
+
+ if (block_group->meta_write_pointer == eb->start) {
+ struct btrfs_block_group **tgt;
+
+ if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
+ return 0;
- ASSERT(cache->meta_write_pointer == eb->start + eb->len);
- cache->meta_write_pointer = eb->start;
+ if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ tgt = &fs_info->active_system_bg;
+ else
+ tgt = &fs_info->active_meta_bg;
+ if (check_bg_is_active(ctx, tgt))
+ return 0;
+ }
+
+ /*
+ * Since we may release fs_info->zoned_meta_io_lock, someone can already
+ * start writing this eb. In that case, we can just bail out.
+ */
+ if (block_group->meta_write_pointer > eb->start)
+ return -EBUSY;
+
+ /* If for_sync, this hole will be filled with trasnsaction commit. */
+ if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
+ return -EAGAIN;
+ return -EBUSY;
}
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
@@ -1879,10 +1963,10 @@ int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
bool btrfs_zone_activate(struct btrfs_block_group *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct btrfs_space_info *space_info = block_group->space_info;
struct map_lookup *map;
struct btrfs_device *device;
u64 physical;
+ const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA);
bool ret;
int i;
@@ -1891,7 +1975,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
map = block_group->physical_map;
- spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
ret = true;
@@ -1904,30 +1987,44 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
goto out_unlock;
}
+ spin_lock(&fs_info->zone_active_bgs_lock);
for (i = 0; i < map->num_stripes; i++) {
+ struct btrfs_zoned_device_info *zinfo;
+ int reserved = 0;
+
device = map->stripes[i].dev;
physical = map->stripes[i].physical;
+ zinfo = device->zone_info;
- if (device->zone_info->max_active_zones == 0)
+ if (zinfo->max_active_zones == 0)
continue;
+ if (is_data)
+ reserved = zinfo->reserved_active_zones;
+ /*
+ * For the data block group, leave active zones for one
+ * metadata block group and one system block group.
+ */
+ if (atomic_read(&zinfo->active_zones_left) <= reserved) {
+ ret = false;
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+ goto out_unlock;
+ }
+
if (!btrfs_dev_set_active_zone(device, physical)) {
/* Cannot activate the zone */
ret = false;
+ spin_unlock(&fs_info->zone_active_bgs_lock);
goto out_unlock;
}
+ if (!is_data)
+ zinfo->reserved_active_zones--;
}
+ spin_unlock(&fs_info->zone_active_bgs_lock);
/* Successfully activated all the zones */
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
- WARN_ON(block_group->alloc_offset != 0);
- if (block_group->zone_unusable == block_group->length) {
- block_group->zone_unusable = block_group->length - block_group->zone_capacity;
- space_info->bytes_zone_unusable -= block_group->zone_capacity;
- }
spin_unlock(&block_group->lock);
- btrfs_try_granting_tickets(fs_info, space_info);
- spin_unlock(&space_info->lock);
/* For the active block group list */
btrfs_get_block_group(block_group);
@@ -1940,7 +2037,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
out_unlock:
spin_unlock(&block_group->lock);
- spin_unlock(&space_info->lock);
return ret;
}
@@ -2006,6 +2102,10 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
* and block_group->meta_write_pointer for metadata.
*/
if (!fully_written) {
+ if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
+ spin_unlock(&block_group->lock);
+ return -EAGAIN;
+ }
spin_unlock(&block_group->lock);
ret = btrfs_inc_block_group_ro(block_group, false);
@@ -2034,7 +2134,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
return 0;
}
- if (block_group->reserved) {
+ if (block_group->reserved ||
+ test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
+ &block_group->runtime_flags)) {
spin_unlock(&block_group->lock);
btrfs_dec_block_group_ro(block_group);
return -EAGAIN;
@@ -2043,6 +2145,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
clear_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
block_group->alloc_offset = block_group->zone_capacity;
+ if (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM))
+ block_group->meta_write_pointer = block_group->start +
+ block_group->zone_capacity;
block_group->free_space_ctl->free_space = 0;
btrfs_clear_treelog_bg(block_group);
btrfs_clear_data_reloc_bg(block_group);
@@ -2052,18 +2157,21 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_device *device = map->stripes[i].dev;
const u64 physical = map->stripes[i].physical;
+ struct btrfs_zoned_device_info *zinfo = device->zone_info;
- if (device->zone_info->max_active_zones == 0)
+ if (zinfo->max_active_zones == 0)
continue;
ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
physical >> SECTOR_SHIFT,
- device->zone_info->zone_size >> SECTOR_SHIFT,
+ zinfo->zone_size >> SECTOR_SHIFT,
GFP_NOFS);
if (ret)
return ret;
+ if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+ zinfo->reserved_active_zones++;
btrfs_dev_clear_active_zone(device, physical);
}
@@ -2102,8 +2210,10 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
/* Check if there is a device with active zones left */
mutex_lock(&fs_info->chunk_mutex);
+ spin_lock(&fs_info->zone_active_bgs_lock);
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
struct btrfs_zoned_device_info *zinfo = device->zone_info;
+ int reserved = 0;
if (!device->bdev)
continue;
@@ -2113,17 +2223,21 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
break;
}
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ reserved = zinfo->reserved_active_zones;
+
switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
case 0: /* single */
- ret = (atomic_read(&zinfo->active_zones_left) >= 1);
+ ret = (atomic_read(&zinfo->active_zones_left) >= (1 + reserved));
break;
case BTRFS_BLOCK_GROUP_DUP:
- ret = (atomic_read(&zinfo->active_zones_left) >= 2);
+ ret = (atomic_read(&zinfo->active_zones_left) >= (2 + reserved));
break;
}
if (ret)
break;
}
+ spin_unlock(&fs_info->zone_active_bgs_lock);
mutex_unlock(&fs_info->chunk_mutex);
if (!ret)
@@ -2265,7 +2379,10 @@ void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logica
/* All relocation extents are written. */
if (block_group->start + block_group->alloc_offset == logical + length) {
- /* Now, release this block group for further allocations. */
+ /*
+ * Now, release this block group for further allocations and
+ * zone finish.
+ */
clear_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
&block_group->runtime_flags);
}
@@ -2289,7 +2406,8 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
spin_lock(&block_group->lock);
if (block_group->reserved || block_group->alloc_offset == 0 ||
- (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
+ (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) ||
+ test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) {
spin_unlock(&block_group->lock);
continue;
}
@@ -2365,3 +2483,55 @@ int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
return 0;
}
+
+/*
+ * Reserve zones for one metadata block group, one tree-log block group, and one
+ * system block group.
+ */
+void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+ struct btrfs_block_group *block_group;
+ struct btrfs_device *device;
+ /* Reserve zones for normal SINGLE metadata and tree-log block group. */
+ unsigned int metadata_reserve = 2;
+ /* Reserve a zone for SINGLE system block group. */
+ unsigned int system_reserve = 1;
+
+ if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags))
+ return;
+
+ /*
+ * This function is called from the mount context. So, there is no
+ * parallel process touching the bits. No need for read_seqretry().
+ */
+ if (fs_info->avail_metadata_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
+ metadata_reserve = 4;
+ if (fs_info->avail_system_alloc_bits & BTRFS_BLOCK_GROUP_DUP)
+ system_reserve = 2;
+
+ /* Apply the reservation on all the devices. */
+ mutex_lock(&fs_devices->device_list_mutex);
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ if (!device->bdev)
+ continue;
+
+ device->zone_info->reserved_active_zones =
+ metadata_reserve + system_reserve;
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
+
+ /* Release reservation for currently active block groups. */
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
+ struct map_lookup *map = block_group->physical_map;
+
+ if (!(block_group->flags &
+ (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
+ continue;
+
+ for (int i = 0; i < map->num_stripes; i++)
+ map->stripes[i].dev->zone_info->reserved_active_zones--;
+ }
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+}
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index 27322b926038..b9cec523b778 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -22,6 +22,11 @@ struct btrfs_zoned_device_info {
u8 zone_size_shift;
u32 nr_zones;
unsigned int max_active_zones;
+ /*
+ * Reserved active zones for one metadata and one system block group.
+ * It can vary per-device depending on the allocation status.
+ */
+ int reserved_active_zones;
atomic_t active_zones_left;
unsigned long *seq_zones;
unsigned long *empty_zones;
@@ -58,11 +63,8 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans,
struct extent_buffer *eb);
bool btrfs_use_zone_append(struct btrfs_bio *bbio);
void btrfs_record_physical_zoned(struct btrfs_bio *bbio);
-bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb,
- struct btrfs_block_group **cache_ret);
-void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
- struct extent_buffer *eb);
+int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+ struct btrfs_eb_write_context *ctx);
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length);
int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
u64 physical_start, u64 physical_pos);
@@ -81,6 +83,7 @@ void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logica
int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info);
int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info, bool do_finish);
+void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info);
#else /* CONFIG_BLK_DEV_ZONED */
static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
struct blk_zone *zone)
@@ -189,17 +192,10 @@ static inline void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
{
}
-static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb,
- struct btrfs_block_group **cache_ret)
-{
- return true;
-}
-
-static inline void btrfs_revert_meta_write_pointer(
- struct btrfs_block_group *cache,
- struct extent_buffer *eb)
+static inline int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
+ struct btrfs_eb_write_context *ctx)
{
+ return 0;
}
static inline int btrfs_zoned_issue_zeroout(struct btrfs_device *device,
@@ -262,6 +258,8 @@ static inline int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
return 0;
}
+static inline void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) { }
+
#endif
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
diff --git a/fs/buffer.c b/fs/buffer.c
index bd091329026c..084a6ade108a 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -49,6 +49,7 @@
#include <trace/events/block.h>
#include <linux/fscrypt.h>
#include <linux/fsverity.h>
+#include <linux/sched/isolation.h>
#include "internal.h"
@@ -1225,19 +1226,14 @@ EXPORT_SYMBOL(mark_buffer_dirty);
void mark_buffer_write_io_error(struct buffer_head *bh)
{
- struct super_block *sb;
-
set_buffer_write_io_error(bh);
/* FIXME: do we need to set this in both places? */
if (bh->b_folio && bh->b_folio->mapping)
mapping_set_error(bh->b_folio->mapping, -EIO);
- if (bh->b_assoc_map)
+ if (bh->b_assoc_map) {
mapping_set_error(bh->b_assoc_map, -EIO);
- rcu_read_lock();
- sb = READ_ONCE(bh->b_bdev->bd_super);
- if (sb)
- errseq_set(&sb->s_wb_err, -EIO);
- rcu_read_unlock();
+ errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
+ }
}
EXPORT_SYMBOL(mark_buffer_write_io_error);
@@ -1352,7 +1348,7 @@ static void bh_lru_install(struct buffer_head *bh)
* failing page migration.
* Skip putting upcoming bh into bh_lru until migration is done.
*/
- if (lru_cache_disabled()) {
+ if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
bh_lru_unlock();
return;
}
@@ -1382,6 +1378,10 @@ lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
check_irqs_on();
bh_lru_lock();
+ if (cpu_is_isolated(smp_processor_id())) {
+ bh_lru_unlock();
+ return NULL;
+ }
for (i = 0; i < BH_LRU_SIZE; i++) {
struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index 175a25fcade8..009d23cd435b 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -259,9 +259,7 @@ static void cachefiles_write_complete(struct kiocb *iocb, long ret)
_enter("%ld", ret);
- /* Tell lockdep we inherited freeze protection from submission thread */
- __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
- __sb_end_write(inode->i_sb, SB_FREEZE_WRITE);
+ kiocb_end_write(iocb);
if (ret < 0)
trace_cachefiles_io_error(object, inode, ret,
@@ -286,7 +284,6 @@ int __cachefiles_write(struct cachefiles_object *object,
{
struct cachefiles_cache *cache;
struct cachefiles_kiocb *ki;
- struct inode *inode;
unsigned int old_nofs;
ssize_t ret;
size_t len = iov_iter_count(iter);
@@ -322,19 +319,12 @@ int __cachefiles_write(struct cachefiles_object *object,
ki->iocb.ki_complete = cachefiles_write_complete;
atomic_long_add(ki->b_writing, &cache->b_writing);
- /* Open-code file_start_write here to grab freeze protection, which
- * will be released by another thread in aio_complete_rw(). Fool
- * lockdep by telling it the lock got released so that it doesn't
- * complain about the held lock when we return to userspace.
- */
- inode = file_inode(file);
- __sb_start_write(inode->i_sb, SB_FREEZE_WRITE);
- __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
+ kiocb_start_write(&ki->iocb);
get_file(ki->iocb.ki_filp);
cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
- trace_cachefiles_write(object, inode, ki->iocb.ki_pos, len);
+ trace_cachefiles_write(object, file_inode(file), ki->iocb.ki_pos, len);
old_nofs = memalloc_nofs_save();
ret = cachefiles_inject_write_error();
if (ret == 0)
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 6945a938d396..c91b293267d7 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -93,7 +93,7 @@ int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
char *value = NULL;
struct iattr newattrs;
struct inode *inode = d_inode(dentry);
- struct timespec64 old_ctime = inode->i_ctime;
+ struct timespec64 old_ctime = inode_get_ctime(inode);
umode_t new_mode = inode->i_mode, old_mode = inode->i_mode;
if (ceph_snap(inode) != CEPH_NOSNAP) {
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index e2bb0d0072da..09cd6d334604 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1400,7 +1400,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
arg->mtime = inode->i_mtime;
arg->atime = inode->i_atime;
- arg->ctime = inode->i_ctime;
+ arg->ctime = inode_get_ctime(inode);
arg->btime = ci->i_btime;
arg->change_attr = inode_peek_iversion_raw(inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 8e5f41d45283..fd05d68e2990 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -100,7 +100,7 @@ struct inode *ceph_get_snapdir(struct inode *parent)
inode->i_uid = parent->i_uid;
inode->i_gid = parent->i_gid;
inode->i_mtime = parent->i_mtime;
- inode->i_ctime = parent->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(parent));
inode->i_atime = parent->i_atime;
ci->i_rbytes = 0;
ci->i_btime = ceph_inode(parent)->i_btime;
@@ -688,6 +688,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
struct timespec64 *mtime, struct timespec64 *atime)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct timespec64 ictime = inode_get_ctime(inode);
int warn = 0;
if (issued & (CEPH_CAP_FILE_EXCL|
@@ -696,11 +697,11 @@ void ceph_fill_file_time(struct inode *inode, int issued,
CEPH_CAP_AUTH_EXCL|
CEPH_CAP_XATTR_EXCL)) {
if (ci->i_version == 0 ||
- timespec64_compare(ctime, &inode->i_ctime) > 0) {
+ timespec64_compare(ctime, &ictime) > 0) {
dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
- inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
+ ictime.tv_sec, ictime.tv_nsec,
ctime->tv_sec, ctime->tv_nsec);
- inode->i_ctime = *ctime;
+ inode_set_ctime_to_ts(inode, *ctime);
}
if (ci->i_version == 0 ||
ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
@@ -738,7 +739,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
} else {
/* we have no write|excl caps; whatever the MDS says is true */
if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
- inode->i_ctime = *ctime;
+ inode_set_ctime_to_ts(inode, *ctime);
inode->i_mtime = *mtime;
inode->i_atime = *atime;
ci->i_time_warp_seq = time_warp_seq;
@@ -2166,7 +2167,8 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
- inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
+ inode_get_ctime(inode).tv_sec,
+ inode_get_ctime(inode).tv_nsec,
attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
only ? "ctime only" : "ignored");
if (only) {
@@ -2191,7 +2193,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
if (dirtied) {
inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
&prealloc_cf);
- inode->i_ctime = attr->ia_ctime;
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
inode_inc_iversion_raw(inode);
}
@@ -2465,7 +2467,7 @@ int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
return err;
}
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
stat->ino = ceph_present_inode(inode);
/*
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 343d738448dc..c9920ade15f5 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -660,7 +660,7 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->size = i_size_read(inode);
capsnap->mtime = inode->i_mtime;
capsnap->atime = inode->i_atime;
- capsnap->ctime = inode->i_ctime;
+ capsnap->ctime = inode_get_ctime(inode);
capsnap->btime = ci->i_btime;
capsnap->change_attr = inode_peek_iversion_raw(inode);
capsnap->time_warp_seq = ci->i_time_warp_seq;
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 806183959c47..1cbd84cc82a8 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -1238,7 +1238,7 @@ retry:
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL,
&prealloc_cf);
ci->i_xattrs.dirty = true;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
}
spin_unlock(&ci->i_ceph_lock);
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
index 903ca8fa4b9b..ae023853a98f 100644
--- a/fs/coda/coda_linux.c
+++ b/fs/coda/coda_linux.c
@@ -127,7 +127,8 @@ void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr)
if (attr->va_mtime.tv_sec != -1)
inode->i_mtime = coda_to_timespec64(attr->va_mtime);
if (attr->va_ctime.tv_sec != -1)
- inode->i_ctime = coda_to_timespec64(attr->va_ctime);
+ inode_set_ctime_to_ts(inode,
+ coda_to_timespec64(attr->va_ctime));
}
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 1b960de2bf39..cb512b10473b 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -111,7 +111,7 @@ static inline void coda_dir_update_mtime(struct inode *dir)
/* optimistically we can also act as if our nose bleeds. The
* granularity of the mtime is coarse anyways so we might actually be
* right most of the time. Note: we only do this for directories. */
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
#endif
}
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 12b26bd13564..42346618b4ed 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -84,7 +84,7 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0);
coda_inode->i_size = file_inode(host_file)->i_size;
coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
- coda_inode->i_mtime = coda_inode->i_ctime = current_time(coda_inode);
+ coda_inode->i_mtime = inode_set_ctime_current(coda_inode);
inode_unlock(coda_inode);
file_end_write(host_file);
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index d661e6cf17ac..0c7c2528791e 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -256,7 +256,8 @@ int coda_getattr(struct mnt_idmap *idmap, const struct path *path,
{
int err = coda_revalidate_inode(d_inode(path->dentry));
if (!err)
- generic_fillattr(&nop_mnt_idmap, d_inode(path->dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask,
+ d_inode(path->dentry), stat);
return err;
}
@@ -269,7 +270,7 @@ int coda_setattr(struct mnt_idmap *idmap, struct dentry *de,
memset(&vattr, 0, sizeof(vattr));
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
coda_iattr_to_vattr(iattr, &vattr);
vattr.va_type = C_VNON; /* cannot set type */
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 1c15edbe70ff..fbdcb3582926 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -88,8 +88,7 @@ int configfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
static inline void set_default_inode_attr(struct inode * inode, umode_t mode)
{
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime =
- inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
}
static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
@@ -99,7 +98,7 @@ static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
inode->i_gid = iattr->ia_gid;
inode->i_atime = iattr->ia_atime;
inode->i_mtime = iattr->ia_mtime;
- inode->i_ctime = iattr->ia_ctime;
+ inode_set_ctime_to_ts(inode, iattr->ia_ctime);
}
struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent *sd,
@@ -172,7 +171,7 @@ struct inode *configfs_create(struct dentry *dentry, umode_t mode)
return ERR_PTR(-ENOMEM);
p_inode = d_inode(dentry->d_parent);
- p_inode->i_mtime = p_inode->i_ctime = current_time(p_inode);
+ p_inode->i_mtime = inode_set_ctime_current(p_inode);
configfs_set_inode_lock_class(sd, inode);
return inode;
}
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 27c6597aa1be..5ee7d7bbb361 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -133,7 +133,8 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
}
/* Struct copy intentional */
- inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
+ inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
+ zerotime);
/* inode->i_nlink is left 1 - arguably wrong for directories,
but it's the best we can do without reading the directory
contents. 1 yields the right result in GNU find, even
@@ -485,12 +486,16 @@ static void cramfs_kill_sb(struct super_block *sb)
{
struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
+ generic_shutdown_super(sb);
+
if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sb->s_mtd) {
if (sbi && sbi->mtd_point_size)
mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size);
- kill_mtd_super(sb);
+ put_mtd_device(sb->s_mtd);
+ sb->s_mtd = NULL;
} else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
- kill_block_super(sb);
+ sync_blockdev(sb->s_bdev);
+ blkdev_put(sb->s_bdev, sb);
}
kfree(sbi);
}
diff --git a/fs/dcache.c b/fs/dcache.c
index 52e6d5fdab6b..25ac74d30bff 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1664,7 +1664,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
if (dentry == _data && dentry->d_lockref.count == 1)
return D_WALK_CONTINUE;
- printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
+ WARN(1, "BUG: Dentry %p{i=%lx,n=%pd} "
" still in use (%d) [unmount of %s %s]\n",
dentry,
dentry->d_inode ?
@@ -1673,7 +1673,6 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
dentry->d_lockref.count,
dentry->d_sb->s_type->name,
dentry->d_sb->s_id);
- WARN_ON(1);
return D_WALK_CONTINUE;
}
@@ -3247,8 +3246,6 @@ void d_genocide(struct dentry *parent)
d_walk(parent, parent, d_genocide_kill);
}
-EXPORT_SYMBOL(d_genocide);
-
void d_tmpfile(struct file *file, struct inode *inode)
{
struct dentry *dentry = file->f_path.dentry;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 3f81f73c241a..83e57e9f9fa0 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -72,8 +72,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb)
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_atime = inode->i_mtime =
- inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
}
return inode;
}
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index fe3db0eda8e4..299c295a27a0 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -338,7 +338,7 @@ static int mknod_ptmx(struct super_block *sb)
}
inode->i_ino = 2;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
mode = S_IFCHR|opts->ptmxmode;
init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2));
@@ -451,7 +451,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
if (!inode)
goto fail;
inode->i_ino = 1;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
@@ -534,12 +534,12 @@ void devpts_kill_index(struct pts_fs_info *fsi, int idx)
/**
* devpts_pty_new -- create a new inode in /dev/pts/
- * @ptmx_inode: inode of the master
- * @device: major+minor of the node to be created
+ * @fsi: Filesystem info for this instance.
* @index: used as a name of the node
* @priv: what's given back by devpts_get_priv
*
- * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill.
+ * The dentry for the created inode is returned.
+ * Remove it from /dev/pts/ with devpts_pty_kill().
*/
struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
{
@@ -560,7 +560,7 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
inode->i_ino = index + 3;
inode->i_uid = opts->setuid ? opts->uid : current_fsuid();
inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index));
sprintf(s, "%d", index);
@@ -580,7 +580,7 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
/**
* devpts_get_priv -- get private data for a slave
- * @pts_inode: inode of the slave
+ * @dentry: dentry of the slave
*
* Returns whatever was passed as priv in devpts_pty_new for a given inode.
*/
@@ -593,7 +593,7 @@ void *devpts_get_priv(struct dentry *dentry)
/**
* devpts_pty_kill -- remove inode form /dev/pts/
- * @inode: inode of the slave to be removed
+ * @dentry: dentry of the slave to be removed
*
* This is an inverse operation of devpts_pty_new.
*/
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index c16f0d660cb7..03bd55069d86 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -441,10 +441,10 @@ int ecryptfs_encrypt_page(struct page *page)
}
lower_offset = lower_offset_for_page(crypt_stat, page);
- enc_extent_virt = kmap(enc_extent_page);
+ enc_extent_virt = kmap_local_page(enc_extent_page);
rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset,
PAGE_SIZE);
- kunmap(enc_extent_page);
+ kunmap_local(enc_extent_virt);
if (rc < 0) {
ecryptfs_printk(KERN_ERR,
"Error attempting to write lower page; rc = [%d]\n",
@@ -490,10 +490,10 @@ int ecryptfs_decrypt_page(struct page *page)
BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED));
lower_offset = lower_offset_for_page(crypt_stat, page);
- page_virt = kmap(page);
+ page_virt = kmap_local_page(page);
rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE,
ecryptfs_inode);
- kunmap(page);
+ kunmap_local(page_virt);
if (rc < 0) {
ecryptfs_printk(KERN_ERR,
"Error attempting to read lower page; rc = [%d]\n",
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 83274915ba6d..992d9c7e64ae 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -148,7 +148,7 @@ static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
}
fsstack_copy_attr_times(dir, lower_dir);
set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
- inode->i_ctime = dir->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
out_unlock:
dput(lower_dentry);
inode_unlock(lower_dir);
@@ -982,7 +982,7 @@ static int ecryptfs_getattr_link(struct mnt_idmap *idmap,
mount_crypt_stat = &ecryptfs_superblock_to_private(
dentry->d_sb)->mount_crypt_stat;
- generic_fillattr(&nop_mnt_idmap, d_inode(dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(dentry), stat);
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
char *target;
size_t targetsiz;
@@ -1011,7 +1011,8 @@ static int ecryptfs_getattr(struct mnt_idmap *idmap,
if (!rc) {
fsstack_copy_attr_all(d_inode(dentry),
ecryptfs_inode_to_lower(d_inode(dentry)));
- generic_fillattr(&nop_mnt_idmap, d_inode(dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask,
+ d_inode(dentry), stat);
stat->blocks = lower_stat.blocks;
}
return rc;
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 373c3e5747e6..e2483acc4366 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -125,7 +125,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
/* This is a header extent */
char *page_virt;
- page_virt = kmap_atomic(page);
+ page_virt = kmap_local_page(page);
memset(page_virt, 0, PAGE_SIZE);
/* TODO: Support more than one header extent */
if (view_extent_num == 0) {
@@ -138,7 +138,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
crypt_stat,
&written);
}
- kunmap_atomic(page_virt);
+ kunmap_local(page_virt);
flush_dcache_page(page);
if (rc) {
printk(KERN_ERR "%s: Error reading xattr "
@@ -255,7 +255,6 @@ out:
* @mapping: The eCryptfs object
* @pos: The file offset at which to start writing
* @len: Length of the write
- * @flags: Various flags
* @pagep: Pointer to return the page
* @fsdata: Pointer to return fs data (unused)
*
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 60bdcaddcbe5..3458f153a588 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -64,11 +64,11 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
offset = ((((loff_t)page_for_lower->index) << PAGE_SHIFT)
+ offset_in_page);
- virt = kmap(page_for_lower);
+ virt = kmap_local_page(page_for_lower);
rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
if (rc > 0)
rc = 0;
- kunmap(page_for_lower);
+ kunmap_local(virt);
return rc;
}
@@ -140,7 +140,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
ecryptfs_page_idx, rc);
goto out;
}
- ecryptfs_page_virt = kmap_atomic(ecryptfs_page);
+ ecryptfs_page_virt = kmap_local_page(ecryptfs_page);
/*
* pos: where we're now writing, offset: where the request was
@@ -163,7 +163,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
(data + data_offset), num_bytes);
data_offset += num_bytes;
}
- kunmap_atomic(ecryptfs_page_virt);
+ kunmap_local(ecryptfs_page_virt);
flush_dcache_page(ecryptfs_page);
SetPageUptodate(ecryptfs_page);
unlock_page(ecryptfs_page);
@@ -253,11 +253,11 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
int rc;
offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page);
- virt = kmap(page_for_ecryptfs);
+ virt = kmap_local_page(page_for_ecryptfs);
rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
if (rc > 0)
rc = 0;
- kunmap(page_for_ecryptfs);
+ kunmap_local(virt);
flush_dcache_page(page_for_ecryptfs);
return rc;
}
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index d57ee15874f9..59b52718a3a2 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -51,7 +51,7 @@ static ssize_t efivarfs_file_write(struct file *file,
} else {
inode_lock(inode);
i_size_write(inode, datasize + sizeof(attributes));
- inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
inode_unlock(inode);
}
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index b973a2c03dde..db9231f0e77b 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -25,7 +25,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
switch (mode & S_IFMT) {
case S_IFREG:
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index 3ba94bb005a6..3789d22ba501 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -105,8 +105,8 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
inode->i_size = be32_to_cpu(efs_inode->di_size);
inode->i_atime.tv_sec = be32_to_cpu(efs_inode->di_atime);
inode->i_mtime.tv_sec = be32_to_cpu(efs_inode->di_mtime);
- inode->i_ctime.tv_sec = be32_to_cpu(efs_inode->di_ctime);
- inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
+ inode_set_ctime(inode, be32_to_cpu(efs_inode->di_ctime), 0);
+ inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
/* this is the number of blocks in the file */
if (inode->i_size == 0) {
diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig
index f259d92c9720..f6dc961e6c2b 100644
--- a/fs/erofs/Kconfig
+++ b/fs/erofs/Kconfig
@@ -38,6 +38,7 @@ config EROFS_FS_DEBUG
config EROFS_FS_XATTR
bool "EROFS extended attributes"
depends on EROFS_FS
+ select XXHASH
default y
help
Extended attributes are name:value pairs associated with inodes by
@@ -99,6 +100,21 @@ config EROFS_FS_ZIP_LZMA
If unsure, say N.
+config EROFS_FS_ZIP_DEFLATE
+ bool "EROFS DEFLATE compressed data support"
+ depends on EROFS_FS_ZIP
+ select ZLIB_INFLATE
+ help
+ Saying Y here includes support for reading EROFS file systems
+ containing DEFLATE compressed data. It gives better compression
+ ratios than the default LZ4 format, while it costs more CPU
+ overhead.
+
+ DEFLATE support is an experimental feature for now and so most
+ file systems will be readable without selecting this option.
+
+ If unsure, say N.
+
config EROFS_FS_ONDEMAND
bool "EROFS fscache-based on-demand read support"
depends on CACHEFILES_ONDEMAND && (EROFS_FS=m && FSCACHE || EROFS_FS=y && FSCACHE=y)
diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile
index a3a98fc3e481..994d0b9deddf 100644
--- a/fs/erofs/Makefile
+++ b/fs/erofs/Makefile
@@ -5,4 +5,5 @@ erofs-objs := super.o inode.o data.o namei.o dir.o utils.o sysfs.o
erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o pcpubuf.o
erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o
+erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o
erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
index b1b846504027..349c3316ae6b 100644
--- a/fs/erofs/compress.h
+++ b/fs/erofs/compress.h
@@ -94,4 +94,6 @@ extern const struct z_erofs_decompressor erofs_decompressors[];
/* prototypes for specific algorithms */
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool);
+int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pagepool);
#endif
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index cfad1eac7fd9..332ec5f74002 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -379,4 +379,10 @@ const struct z_erofs_decompressor erofs_decompressors[] = {
.name = "lzma"
},
#endif
+#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
+ [Z_EROFS_COMPRESSION_DEFLATE] = {
+ .decompress = z_erofs_deflate_decompress,
+ .name = "deflate"
+ },
+#endif
};
diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c
new file mode 100644
index 000000000000..19e5bdeb30b6
--- /dev/null
+++ b/fs/erofs/decompressor_deflate.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/module.h>
+#include <linux/zlib.h>
+#include "compress.h"
+
+struct z_erofs_deflate {
+ struct z_erofs_deflate *next;
+ struct z_stream_s z;
+ u8 bounce[PAGE_SIZE];
+};
+
+static DEFINE_SPINLOCK(z_erofs_deflate_lock);
+static unsigned int z_erofs_deflate_nstrms, z_erofs_deflate_avail_strms;
+static struct z_erofs_deflate *z_erofs_deflate_head;
+static DECLARE_WAIT_QUEUE_HEAD(z_erofs_deflate_wq);
+
+module_param_named(deflate_streams, z_erofs_deflate_nstrms, uint, 0444);
+
+void z_erofs_deflate_exit(void)
+{
+ /* there should be no running fs instance */
+ while (z_erofs_deflate_avail_strms) {
+ struct z_erofs_deflate *strm;
+
+ spin_lock(&z_erofs_deflate_lock);
+ strm = z_erofs_deflate_head;
+ if (!strm) {
+ spin_unlock(&z_erofs_deflate_lock);
+ continue;
+ }
+ z_erofs_deflate_head = NULL;
+ spin_unlock(&z_erofs_deflate_lock);
+
+ while (strm) {
+ struct z_erofs_deflate *n = strm->next;
+
+ vfree(strm->z.workspace);
+ kfree(strm);
+ --z_erofs_deflate_avail_strms;
+ strm = n;
+ }
+ }
+}
+
+int __init z_erofs_deflate_init(void)
+{
+ /* by default, use # of possible CPUs instead */
+ if (!z_erofs_deflate_nstrms)
+ z_erofs_deflate_nstrms = num_possible_cpus();
+
+ for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
+ ++z_erofs_deflate_avail_strms) {
+ struct z_erofs_deflate *strm;
+
+ strm = kzalloc(sizeof(*strm), GFP_KERNEL);
+ if (!strm)
+ goto out_failed;
+
+ /* XXX: in-kernel zlib cannot shrink windowbits currently */
+ strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
+ if (!strm->z.workspace) {
+ kfree(strm);
+ goto out_failed;
+ }
+
+ spin_lock(&z_erofs_deflate_lock);
+ strm->next = z_erofs_deflate_head;
+ z_erofs_deflate_head = strm;
+ spin_unlock(&z_erofs_deflate_lock);
+ }
+ return 0;
+
+out_failed:
+ pr_err("failed to allocate zlib workspace\n");
+ z_erofs_deflate_exit();
+ return -ENOMEM;
+}
+
+int z_erofs_load_deflate_config(struct super_block *sb,
+ struct erofs_super_block *dsb,
+ struct z_erofs_deflate_cfgs *dfl, int size)
+{
+ if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) {
+ erofs_err(sb, "invalid deflate cfgs, size=%u", size);
+ return -EINVAL;
+ }
+
+ if (dfl->windowbits > MAX_WBITS) {
+ erofs_err(sb, "unsupported windowbits %u", dfl->windowbits);
+ return -EOPNOTSUPP;
+ }
+
+ erofs_info(sb, "EXPERIMENTAL DEFLATE feature in use. Use at your own risk!");
+ return 0;
+}
+
+int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pagepool)
+{
+ const unsigned int nrpages_out =
+ PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
+ const unsigned int nrpages_in =
+ PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
+ struct super_block *sb = rq->sb;
+ unsigned int insz, outsz, pofs;
+ struct z_erofs_deflate *strm;
+ u8 *kin, *kout = NULL;
+ bool bounced = false;
+ int no = -1, ni = 0, j = 0, zerr, err;
+
+ /* 1. get the exact DEFLATE compressed size */
+ kin = kmap_local_page(*rq->in);
+ err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
+ min_t(unsigned int, rq->inputsize,
+ sb->s_blocksize - rq->pageofs_in));
+ if (err) {
+ kunmap_local(kin);
+ return err;
+ }
+
+ /* 2. get an available DEFLATE context */
+again:
+ spin_lock(&z_erofs_deflate_lock);
+ strm = z_erofs_deflate_head;
+ if (!strm) {
+ spin_unlock(&z_erofs_deflate_lock);
+ wait_event(z_erofs_deflate_wq, READ_ONCE(z_erofs_deflate_head));
+ goto again;
+ }
+ z_erofs_deflate_head = strm->next;
+ spin_unlock(&z_erofs_deflate_lock);
+
+ /* 3. multi-call decompress */
+ insz = rq->inputsize;
+ outsz = rq->outputsize;
+ zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS);
+ if (zerr != Z_OK) {
+ err = -EIO;
+ goto failed_zinit;
+ }
+
+ pofs = rq->pageofs_out;
+ strm->z.avail_in = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in);
+ insz -= strm->z.avail_in;
+ strm->z.next_in = kin + rq->pageofs_in;
+ strm->z.avail_out = 0;
+
+ while (1) {
+ if (!strm->z.avail_out) {
+ if (++no >= nrpages_out || !outsz) {
+ erofs_err(sb, "insufficient space for decompressed data");
+ err = -EFSCORRUPTED;
+ break;
+ }
+
+ if (kout)
+ kunmap_local(kout);
+ strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs);
+ outsz -= strm->z.avail_out;
+ if (!rq->out[no]) {
+ rq->out[no] = erofs_allocpage(pagepool,
+ GFP_KERNEL | __GFP_NOFAIL);
+ set_page_private(rq->out[no],
+ Z_EROFS_SHORTLIVED_PAGE);
+ }
+ kout = kmap_local_page(rq->out[no]);
+ strm->z.next_out = kout + pofs;
+ pofs = 0;
+ }
+
+ if (!strm->z.avail_in && insz) {
+ if (++ni >= nrpages_in) {
+ erofs_err(sb, "invalid compressed data");
+ err = -EFSCORRUPTED;
+ break;
+ }
+
+ if (kout) { /* unlike kmap(), take care of the orders */
+ j = strm->z.next_out - kout;
+ kunmap_local(kout);
+ }
+ kunmap_local(kin);
+ strm->z.avail_in = min_t(u32, insz, PAGE_SIZE);
+ insz -= strm->z.avail_in;
+ kin = kmap_local_page(rq->in[ni]);
+ strm->z.next_in = kin;
+ bounced = false;
+ if (kout) {
+ kout = kmap_local_page(rq->out[no]);
+ strm->z.next_out = kout + j;
+ }
+ }
+
+ /*
+ * Handle overlapping: Use bounced buffer if the compressed
+ * data is under processing; Or use short-lived pages from the
+ * on-stack pagepool where pages share among the same request
+ * and not _all_ inplace I/O pages are needed to be doubled.
+ */
+ if (!bounced && rq->out[no] == rq->in[ni]) {
+ memcpy(strm->bounce, strm->z.next_in, strm->z.avail_in);
+ strm->z.next_in = strm->bounce;
+ bounced = true;
+ }
+
+ for (j = ni + 1; j < nrpages_in; ++j) {
+ struct page *tmppage;
+
+ if (rq->out[no] != rq->in[j])
+ continue;
+
+ DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb),
+ rq->in[j]));
+ tmppage = erofs_allocpage(pagepool,
+ GFP_KERNEL | __GFP_NOFAIL);
+ set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
+ copy_highpage(tmppage, rq->in[j]);
+ rq->in[j] = tmppage;
+ }
+
+ zerr = zlib_inflate(&strm->z, Z_SYNC_FLUSH);
+ if (zerr != Z_OK || !(outsz + strm->z.avail_out)) {
+ if (zerr == Z_OK && rq->partial_decoding)
+ break;
+ if (zerr == Z_STREAM_END && !outsz)
+ break;
+ erofs_err(sb, "failed to decompress %d in[%u] out[%u]",
+ zerr, rq->inputsize, rq->outputsize);
+ err = -EFSCORRUPTED;
+ break;
+ }
+ }
+
+ if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
+ err = -EIO;
+ if (kout)
+ kunmap_local(kout);
+failed_zinit:
+ kunmap_local(kin);
+ /* 4. push back DEFLATE stream context to the global list */
+ spin_lock(&z_erofs_deflate_lock);
+ strm->next = z_erofs_deflate_head;
+ z_erofs_deflate_head = strm;
+ spin_unlock(&z_erofs_deflate_lock);
+ wake_up(&z_erofs_deflate_wq);
+ return err;
+}
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
index 2c7b16e340fe..a03ec70ba6f2 100644
--- a/fs/erofs/erofs_fs.h
+++ b/fs/erofs/erofs_fs.h
@@ -13,6 +13,7 @@
#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
#define EROFS_FEATURE_COMPAT_MTIME 0x00000002
+#define EROFS_FEATURE_COMPAT_XATTR_FILTER 0x00000004
/*
* Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should
@@ -81,7 +82,8 @@ struct erofs_super_block {
__u8 xattr_prefix_count; /* # of long xattr name prefixes */
__le32 xattr_prefix_start; /* start of long xattr prefixes */
__le64 packed_nid; /* nid of the special packed inode */
- __u8 reserved2[24];
+ __u8 xattr_filter_reserved; /* reserved for xattr name filter */
+ __u8 reserved2[23];
};
/*
@@ -200,7 +202,7 @@ struct erofs_inode_extended {
* for read-only fs, no need to introduce h_refcount
*/
struct erofs_xattr_ibody_header {
- __le32 h_reserved;
+ __le32 h_name_filter; /* bit value 1 indicates not-present */
__u8 h_shared_count;
__u8 h_reserved2[7];
__le32 h_shared_xattrs[]; /* shared xattr id array */
@@ -221,6 +223,10 @@ struct erofs_xattr_ibody_header {
#define EROFS_XATTR_LONG_PREFIX 0x80
#define EROFS_XATTR_LONG_PREFIX_MASK 0x7f
+#define EROFS_XATTR_FILTER_BITS 32
+#define EROFS_XATTR_FILTER_DEFAULT UINT32_MAX
+#define EROFS_XATTR_FILTER_SEED 0x25BBE08F
+
/* xattr entry (for both inline & shared xattrs) */
struct erofs_xattr_entry {
__u8 e_name_len; /* length of name */
@@ -289,6 +295,7 @@ struct erofs_dirent {
enum {
Z_EROFS_COMPRESSION_LZ4 = 0,
Z_EROFS_COMPRESSION_LZMA = 1,
+ Z_EROFS_COMPRESSION_DEFLATE = 2,
Z_EROFS_COMPRESSION_MAX
};
#define Z_EROFS_ALL_COMPR_ALGS ((1 << Z_EROFS_COMPRESSION_MAX) - 1)
@@ -309,6 +316,12 @@ struct z_erofs_lzma_cfgs {
#define Z_EROFS_LZMA_MAX_DICT_SIZE (8 * Z_EROFS_PCLUSTER_MAX_SIZE)
+/* 6 bytes (+ length field = 8 bytes) */
+struct z_erofs_deflate_cfgs {
+ u8 windowbits; /* 8..15 for DEFLATE */
+ u8 reserved[5];
+} __packed;
+
/*
* bit 0 : COMPACTED_2B indexes (0 - off; 1 - on)
* e.g. for 4k logical cluster size, 4B if compacted 2B is off;
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index e12592727a54..edc8ec7581b8 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -105,8 +105,8 @@ static void *erofs_read_inode(struct erofs_buf *buf,
set_nlink(inode, le32_to_cpu(die->i_nlink));
/* extended inode has its own timestamp */
- inode->i_ctime.tv_sec = le64_to_cpu(die->i_mtime);
- inode->i_ctime.tv_nsec = le32_to_cpu(die->i_mtime_nsec);
+ inode_set_ctime(inode, le64_to_cpu(die->i_mtime),
+ le32_to_cpu(die->i_mtime_nsec));
inode->i_size = le64_to_cpu(die->i_size);
@@ -148,8 +148,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
set_nlink(inode, le16_to_cpu(dic->i_nlink));
/* use build time for compact inodes */
- inode->i_ctime.tv_sec = sbi->build_time;
- inode->i_ctime.tv_nsec = sbi->build_time_nsec;
+ inode_set_ctime(inode, sbi->build_time, sbi->build_time_nsec);
inode->i_size = le32_to_cpu(dic->i_size);
if (erofs_inode_is_data_compressed(vi->datalayout))
@@ -176,10 +175,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
vi->chunkbits = sb->s_blocksize_bits +
(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
}
- inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
- inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
- inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
- inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
+ inode->i_mtime = inode->i_atime = inode_get_ctime(inode);
inode->i_flags &= ~S_DAX;
if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
@@ -373,7 +369,7 @@ int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
STATX_ATTR_IMMUTABLE);
- generic_fillattr(idmap, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
return 0;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 36e32fa542f0..4ff88d0dd980 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -151,6 +151,7 @@ struct erofs_sb_info {
u32 xattr_prefix_start;
u8 xattr_prefix_count;
struct erofs_xattr_prefix_item *xattr_prefixes;
+ unsigned int xattr_filter_reserved;
#endif
u16 device_id_mask; /* valid bits of device id to be used */
@@ -251,6 +252,7 @@ EROFS_FEATURE_FUNCS(fragments, incompat, INCOMPAT_FRAGMENTS)
EROFS_FEATURE_FUNCS(dedupe, incompat, INCOMPAT_DEDUPE)
EROFS_FEATURE_FUNCS(xattr_prefixes, incompat, INCOMPAT_XATTR_PREFIXES)
EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM)
+EROFS_FEATURE_FUNCS(xattr_filter, compat, COMPAT_XATTR_FILTER)
/* atomic flag definitions */
#define EROFS_I_EA_INITED_BIT 0
@@ -270,6 +272,7 @@ struct erofs_inode {
unsigned char inode_isize;
unsigned int xattr_isize;
+ unsigned int xattr_name_filter;
unsigned int xattr_shared_count;
unsigned int *xattr_shared_xattrs;
@@ -519,6 +522,26 @@ static inline int z_erofs_load_lzma_config(struct super_block *sb,
}
#endif /* !CONFIG_EROFS_FS_ZIP_LZMA */
+#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
+int __init z_erofs_deflate_init(void);
+void z_erofs_deflate_exit(void);
+int z_erofs_load_deflate_config(struct super_block *sb,
+ struct erofs_super_block *dsb,
+ struct z_erofs_deflate_cfgs *dfl, int size);
+#else
+static inline int z_erofs_deflate_init(void) { return 0; }
+static inline int z_erofs_deflate_exit(void) { return 0; }
+static inline int z_erofs_load_deflate_config(struct super_block *sb,
+ struct erofs_super_block *dsb,
+ struct z_erofs_deflate_cfgs *dfl, int size) {
+ if (dfl) {
+ erofs_err(sb, "deflate algorithm isn't enabled");
+ return -EINVAL;
+ }
+ return 0;
+}
+#endif /* !CONFIG_EROFS_FS_ZIP_DEFLATE */
+
#ifdef CONFIG_EROFS_FS_ONDEMAND
int erofs_fscache_register_fs(struct super_block *sb);
void erofs_fscache_unregister_fs(struct super_block *sb);
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 566f68ddfa36..44a24d573f1f 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -19,10 +19,8 @@
#include <trace/events/erofs.h>
static struct kmem_cache *erofs_inode_cachep __read_mostly;
-struct file_system_type erofs_fs_type;
-void _erofs_err(struct super_block *sb, const char *function,
- const char *fmt, ...)
+void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -32,12 +30,11 @@ void _erofs_err(struct super_block *sb, const char *function,
vaf.fmt = fmt;
vaf.va = &args;
- pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
+ pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf);
va_end(args);
}
-void _erofs_info(struct super_block *sb, const char *function,
- const char *fmt, ...)
+void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -102,11 +99,9 @@ static void erofs_free_inode(struct inode *inode)
{
struct erofs_inode *vi = EROFS_I(inode);
- /* be careful of RCU symlink path */
if (inode->i_op == &erofs_fast_symlink_iops)
kfree(inode->i_link);
kfree(vi->xattr_shared_xattrs);
-
kmem_cache_free(erofs_inode_cachep, vi);
}
@@ -119,8 +114,7 @@ static bool check_layout_compatibility(struct super_block *sb,
/* check if current kernel meets all mandatory requirements */
if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
- erofs_err(sb,
- "unidentified incompatible feature %x, please upgrade kernel version",
+ erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel",
feature & ~EROFS_ALL_FEATURE_INCOMPAT);
return false;
}
@@ -201,6 +195,9 @@ static int erofs_load_compr_cfgs(struct super_block *sb,
case Z_EROFS_COMPRESSION_LZMA:
ret = z_erofs_load_lzma_config(sb, dsb, data, size);
break;
+ case Z_EROFS_COMPRESSION_DEFLATE:
+ ret = z_erofs_load_deflate_config(sb, dsb, data, size);
+ break;
default:
DBG_BUGON(1);
ret = -EFAULT;
@@ -388,6 +385,7 @@ static int erofs_read_superblock(struct super_block *sb)
sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
sbi->xattr_prefix_count = dsb->xattr_prefix_count;
+ sbi->xattr_filter_reserved = dsb->xattr_filter_reserved;
#endif
sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
sbi->root_nid = le16_to_cpu(dsb->root_nid);
@@ -420,16 +418,11 @@ static int erofs_read_superblock(struct super_block *sb)
if (erofs_is_fscache_mode(sb))
erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
- if (erofs_sb_has_fragments(sbi))
- erofs_info(sb, "EXPERIMENTAL compressed fragments feature in use. Use at your own risk!");
- if (erofs_sb_has_dedupe(sbi))
- erofs_info(sb, "EXPERIMENTAL global deduplication feature in use. Use at your own risk!");
out:
erofs_put_metabuf(&buf);
return ret;
}
-/* set up default EROFS parameters */
static void erofs_default_options(struct erofs_fs_context *ctx)
{
#ifdef CONFIG_EROFS_FS_ZIP
@@ -731,7 +724,6 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
xa_init(&sbi->managed_pslots);
#endif
- /* get the root inode */
inode = erofs_iget(sb, ROOT_NID(sbi));
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -748,7 +740,6 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
return -ENOMEM;
erofs_shrinker_register(sb);
- /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
if (IS_ERR(sbi->packed_inode)) {
@@ -881,10 +872,6 @@ static int erofs_init_fs_context(struct fs_context *fc)
return 0;
}
-/*
- * could be triggered after deactivate_locked_super()
- * is called, thus including umount and failed to initialize.
- */
static void erofs_kill_sb(struct super_block *sb)
{
struct erofs_sb_info *sbi;
@@ -913,7 +900,6 @@ static void erofs_kill_sb(struct super_block *sb)
sb->s_fs_info = NULL;
}
-/* called when ->s_root is non-NULL */
static void erofs_put_super(struct super_block *sb)
{
struct erofs_sb_info *const sbi = EROFS_SB(sb);
@@ -950,9 +936,9 @@ static int __init erofs_module_init(void)
erofs_check_ondisk_layout_definitions();
erofs_inode_cachep = kmem_cache_create("erofs_inode",
- sizeof(struct erofs_inode), 0,
- SLAB_RECLAIM_ACCOUNT,
- erofs_inode_init_once);
+ sizeof(struct erofs_inode), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
+ erofs_inode_init_once);
if (!erofs_inode_cachep)
return -ENOMEM;
@@ -964,6 +950,10 @@ static int __init erofs_module_init(void)
if (err)
goto lzma_err;
+ err = z_erofs_deflate_init();
+ if (err)
+ goto deflate_err;
+
erofs_pcpubuf_init();
err = z_erofs_init_zip_subsystem();
if (err)
@@ -984,6 +974,8 @@ fs_err:
sysfs_err:
z_erofs_exit_zip_subsystem();
zip_err:
+ z_erofs_deflate_exit();
+deflate_err:
z_erofs_lzma_exit();
lzma_err:
erofs_exit_shrinker();
@@ -1001,13 +993,13 @@ static void __exit erofs_module_exit(void)
erofs_exit_sysfs();
z_erofs_exit_zip_subsystem();
+ z_erofs_deflate_exit();
z_erofs_lzma_exit();
erofs_exit_shrinker();
kmem_cache_destroy(erofs_inode_cachep);
erofs_pcpubuf_exit();
}
-/* get filesystem statistics */
static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index 40178b6e0688..09d341675e89 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -5,6 +5,7 @@
* Copyright (C) 2021-2022, Alibaba Cloud
*/
#include <linux/security.h>
+#include <linux/xxhash.h>
#include "xattr.h"
struct erofs_xattr_iter {
@@ -87,6 +88,7 @@ static int erofs_init_inode_xattrs(struct inode *inode)
}
ih = it.kaddr + erofs_blkoff(sb, it.pos);
+ vi->xattr_name_filter = le32_to_cpu(ih->h_name_filter);
vi->xattr_shared_count = ih->h_shared_count;
vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
sizeof(uint), GFP_KERNEL);
@@ -392,7 +394,10 @@ int erofs_getxattr(struct inode *inode, int index, const char *name,
void *buffer, size_t buffer_size)
{
int ret;
+ unsigned int hashbit;
struct erofs_xattr_iter it;
+ struct erofs_inode *vi = EROFS_I(inode);
+ struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
if (!name)
return -EINVAL;
@@ -401,6 +406,15 @@ int erofs_getxattr(struct inode *inode, int index, const char *name,
if (ret)
return ret;
+ /* reserved flag is non-zero if there's any change of on-disk format */
+ if (erofs_sb_has_xattr_filter(sbi) && !sbi->xattr_filter_reserved) {
+ hashbit = xxh32(name, strlen(name),
+ EROFS_XATTR_FILTER_SEED + index);
+ hashbit &= EROFS_XATTR_FILTER_BITS - 1;
+ if (vi->xattr_name_filter & (1U << hashbit))
+ return -ENOATTR;
+ }
+
it.index = index;
it.name = (struct qstr)QSTR_INIT(name, strlen(name));
if (it.name.len > EROFS_NAME_LEN)
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index de4f12152b62..036f610e044b 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -143,22 +143,17 @@ static inline void z_erofs_onlinepage_split(struct page *page)
atomic_inc((atomic_t *)&page->private);
}
-static inline void z_erofs_page_mark_eio(struct page *page)
+static void z_erofs_onlinepage_endio(struct page *page, int err)
{
- int orig;
+ int orig, v;
+
+ DBG_BUGON(!PagePrivate(page));
do {
orig = atomic_read((atomic_t *)&page->private);
- } while (atomic_cmpxchg((atomic_t *)&page->private, orig,
- orig | Z_EROFS_PAGE_EIO) != orig);
-}
-
-static inline void z_erofs_onlinepage_endio(struct page *page)
-{
- unsigned int v;
+ v = (orig - 1) | (err ? Z_EROFS_PAGE_EIO : 0);
+ } while (atomic_cmpxchg((atomic_t *)&page->private, orig, v) != orig);
- DBG_BUGON(!PagePrivate(page));
- v = atomic_dec_return((atomic_t *)&page->private);
if (!(v & ~Z_EROFS_PAGE_EIO)) {
set_page_private(page, 0);
ClearPagePrivate(page);
@@ -507,19 +502,17 @@ enum z_erofs_pclustermode {
*/
Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
/*
- * The current collection has been linked with the owned chain, and
- * could also be linked with the remaining collections, which means
- * if the processing page is the tail page of the collection, thus
- * the current collection can safely use the whole page (since
- * the previous collection is under control) for in-place I/O, as
- * illustrated below:
- * ________________________________________________________________
- * | tail (partial) page | head (partial) page |
- * | (of the current cl) | (of the previous collection) |
- * | | |
- * |__PCLUSTER_FOLLOWED___|___________PCLUSTER_FOLLOWED____________|
+ * The pcluster was just linked to a decompression chain by us. It can
+ * also be linked with the remaining pclusters, which means if the
+ * processing page is the tail page of a pcluster, this pcluster can
+ * safely use the whole page (since the previous pcluster is within the
+ * same chain) for in-place I/O, as illustrated below:
+ * ___________________________________________________
+ * | tail (partial) page | head (partial) page |
+ * | (of the current pcl) | (of the previous pcl) |
+ * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____|
*
- * [ (*) the above page can be used as inplace I/O. ]
+ * [ (*) the page above can be used as inplace I/O. ]
*/
Z_EROFS_PCLUSTER_FOLLOWED,
};
@@ -535,8 +528,6 @@ struct z_erofs_decompress_frontend {
z_erofs_next_pcluster_t owned_head;
enum z_erofs_pclustermode mode;
- /* used for applying cache strategy on the fly */
- bool backmost;
erofs_off_t headoffset;
/* a pointer used to pick up inplace I/O pages */
@@ -545,7 +536,7 @@ struct z_erofs_decompress_frontend {
#define DECOMPRESS_FRONTEND_INIT(__i) { \
.inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
- .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true }
+ .mode = Z_EROFS_PCLUSTER_FOLLOWED }
static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
{
@@ -554,7 +545,7 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
return false;
- if (fe->backmost)
+ if (!(fe->map.m_flags & EROFS_MAP_FULL_MAPPED))
return true;
if (cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
@@ -851,9 +842,11 @@ err_out:
return err;
}
-static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
+static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
{
struct erofs_map_blocks *map = &fe->map;
+ struct super_block *sb = fe->inode->i_sb;
+ erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
struct erofs_workgroup *grp = NULL;
int ret;
@@ -863,8 +856,7 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
if (!(map->m_flags & EROFS_MAP_META)) {
- grp = erofs_find_workgroup(fe->inode->i_sb,
- map->m_pa >> PAGE_SHIFT);
+ grp = erofs_find_workgroup(sb, blknr);
} else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
DBG_BUGON(1);
return -EFSCORRUPTED;
@@ -883,9 +875,26 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
} else if (ret) {
return ret;
}
+
z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
- /* since file-backed online pages are traversed in reverse order */
+ if (!z_erofs_is_inline_pcluster(fe->pcl)) {
+ /* bind cache first when cached decompression is preferred */
+ z_erofs_bind_cache(fe);
+ } else {
+ void *mptr;
+
+ mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP);
+ if (IS_ERR(mptr)) {
+ ret = PTR_ERR(mptr);
+ erofs_err(sb, "failed to get inline data %d", ret);
+ return ret;
+ }
+ get_page(map->buf.page);
+ WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
+ fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
+ }
+ /* file-backed inplace I/O pages are traversed in reverse order */
fe->icur = z_erofs_pclusterpages(fe->pcl);
return 0;
}
@@ -908,12 +917,12 @@ void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
call_rcu(&pcl->rcu, z_erofs_rcu_callback);
}
-static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
+static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe)
{
struct z_erofs_pcluster *pcl = fe->pcl;
if (!pcl)
- return false;
+ return;
z_erofs_bvec_iter_end(&fe->biter);
mutex_unlock(&pcl->lock);
@@ -929,37 +938,29 @@ static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
erofs_workgroup_put(&pcl->obj);
fe->pcl = NULL;
- return true;
}
-static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
- struct page *page, unsigned int pageofs,
- unsigned int len)
+static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
+ unsigned int cur, unsigned int end, erofs_off_t pos)
{
- struct super_block *sb = inode->i_sb;
- struct inode *packed_inode = EROFS_I_SB(inode)->packed_inode;
+ struct inode *packed_inode = EROFS_SB(sb)->packed_inode;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
- u8 *src, *dst;
- unsigned int i, cnt;
+ unsigned int cnt;
+ u8 *src;
if (!packed_inode)
return -EFSCORRUPTED;
buf.inode = packed_inode;
- pos += EROFS_I(inode)->z_fragmentoff;
- for (i = 0; i < len; i += cnt) {
- cnt = min_t(unsigned int, len - i,
+ for (; cur < end; cur += cnt, pos += cnt) {
+ cnt = min_t(unsigned int, end - cur,
sb->s_blocksize - erofs_blkoff(sb, pos));
src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP);
if (IS_ERR(src)) {
erofs_put_metabuf(&buf);
return PTR_ERR(src);
}
-
- dst = kmap_local_page(page);
- memcpy(dst + pageofs + i, src + erofs_blkoff(sb, pos), cnt);
- kunmap_local(dst);
- pos += cnt;
+ memcpy_to_page(page, cur, src + erofs_blkoff(sb, pos), cnt);
}
erofs_put_metabuf(&buf);
return 0;
@@ -972,94 +973,60 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct erofs_map_blocks *const map = &fe->map;
const loff_t offset = page_offset(page);
bool tight = true, exclusive;
- unsigned int cur, end, spiltted;
+ unsigned int cur, end, len, split;
int err = 0;
- /* register locked file pages as online pages in pack */
z_erofs_onlinepage_init(page);
- spiltted = 0;
+ split = 0;
end = PAGE_SIZE;
repeat:
- cur = end - 1;
-
- if (offset + cur < map->m_la ||
- offset + cur >= map->m_la + map->m_llen) {
- if (z_erofs_collector_end(fe))
- fe->backmost = false;
- map->m_la = offset + cur;
+ if (offset + end - 1 < map->m_la ||
+ offset + end - 1 >= map->m_la + map->m_llen) {
+ z_erofs_pcluster_end(fe);
+ map->m_la = offset + end - 1;
map->m_llen = 0;
err = z_erofs_map_blocks_iter(inode, map, 0);
if (err)
goto out;
- } else {
- if (fe->pcl)
- goto hitted;
- /* didn't get a valid pcluster previously (very rare) */
}
- if (!(map->m_flags & EROFS_MAP_MAPPED) ||
- map->m_flags & EROFS_MAP_FRAGMENT)
- goto hitted;
-
- err = z_erofs_collector_begin(fe);
- if (err)
- goto out;
-
- if (z_erofs_is_inline_pcluster(fe->pcl)) {
- void *mp;
-
- mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
- erofs_blknr(inode->i_sb, map->m_pa),
- EROFS_NO_KMAP);
- if (IS_ERR(mp)) {
- err = PTR_ERR(mp);
- erofs_err(inode->i_sb,
- "failed to get inline page, err %d", err);
- goto out;
- }
- get_page(fe->map.buf.page);
- WRITE_ONCE(fe->pcl->compressed_bvecs[0].page,
- fe->map.buf.page);
- fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
- } else {
- /* bind cache first when cached decompression is preferred */
- z_erofs_bind_cache(fe);
- }
-hitted:
- /*
- * Ensure the current partial page belongs to this submit chain rather
- * than other concurrent submit chains or the noio(bypass) chain since
- * those chains are handled asynchronously thus the page cannot be used
- * for inplace I/O or bvpage (should be processed in a strict order.)
- */
- tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
+ cur = offset > map->m_la ? 0 : map->m_la - offset;
+ /* bump split parts first to avoid several separate cases */
+ ++split;
- cur = end - min_t(erofs_off_t, offset + end - map->m_la, end);
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
zero_user_segment(page, cur, end);
+ tight = false;
goto next_part;
}
+
if (map->m_flags & EROFS_MAP_FRAGMENT) {
- unsigned int pageofs, skip, len;
+ erofs_off_t fpos = offset + cur - map->m_la;
- if (offset > map->m_la) {
- pageofs = 0;
- skip = offset - map->m_la;
- } else {
- pageofs = map->m_la & ~PAGE_MASK;
- skip = 0;
- }
- len = min_t(unsigned int, map->m_llen - skip, end - cur);
- err = z_erofs_read_fragment(inode, skip, page, pageofs, len);
+ len = min_t(unsigned int, map->m_llen - fpos, end - cur);
+ err = z_erofs_read_fragment(inode->i_sb, page, cur, cur + len,
+ EROFS_I(inode)->z_fragmentoff + fpos);
if (err)
goto out;
- ++spiltted;
tight = false;
goto next_part;
}
- exclusive = (!cur && (!spiltted || tight));
+ if (!fe->pcl) {
+ err = z_erofs_pcluster_begin(fe);
+ if (err)
+ goto out;
+ }
+
+ /*
+ * Ensure the current partial page belongs to this submit chain rather
+ * than other concurrent submit chains or the noio(bypass) chain since
+ * those chains are handled asynchronously thus the page cannot be used
+ * for inplace I/O or bvpage (should be processed in a strict order.)
+ */
+ tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
+ exclusive = (!cur && ((split <= 1) || tight));
if (cur)
tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
@@ -1072,8 +1039,6 @@ hitted:
goto out;
z_erofs_onlinepage_split(page);
- /* bump up the number of spiltted parts of a page */
- ++spiltted;
if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
fe->pcl->multibases = true;
if (fe->pcl->length < offset + end - map->m_la) {
@@ -1094,9 +1059,7 @@ next_part:
goto repeat;
out:
- if (err)
- z_erofs_page_mark_eio(page);
- z_erofs_onlinepage_endio(page);
+ z_erofs_onlinepage_endio(page, err);
return err;
}
@@ -1199,9 +1162,7 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be,
cur += len;
}
kunmap_local(dst);
- if (err)
- z_erofs_page_mark_eio(bvi->bvec.page);
- z_erofs_onlinepage_endio(bvi->bvec.page);
+ z_erofs_onlinepage_endio(bvi->bvec.page, err);
list_del(p);
kfree(bvi);
}
@@ -1372,9 +1333,7 @@ out:
/* recycle all individual short-lived pages */
if (z_erofs_put_shortlivedpage(be->pagepool, page))
continue;
- if (err)
- z_erofs_page_mark_eio(page);
- z_erofs_onlinepage_endio(page);
+ z_erofs_onlinepage_endio(page, err);
}
if (be->decompressed_pages != be->onstack_pages)
@@ -1410,7 +1369,10 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
owned = READ_ONCE(be.pcl->next);
z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
- erofs_workgroup_put(&be.pcl->obj);
+ if (z_erofs_is_inline_pcluster(be.pcl))
+ z_erofs_free_pcluster(be.pcl);
+ else
+ erofs_workgroup_put(&be.pcl->obj);
}
}
@@ -1848,15 +1810,10 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
if (page) {
- if (PageUptodate(page)) {
+ if (PageUptodate(page))
unlock_page(page);
- } else {
- err = z_erofs_do_read_page(f, page);
- if (err)
- erofs_err(inode->i_sb,
- "readmore error at page %lu @ nid %llu",
- index, EROFS_I(inode)->nid);
- }
+ else
+ (void)z_erofs_do_read_page(f, page);
put_page(page);
}
@@ -1868,25 +1825,25 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
static int z_erofs_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *const inode = page->mapping->host;
+ struct inode *const inode = folio->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
int err;
- trace_erofs_readpage(page, false);
- f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
+ trace_erofs_read_folio(folio, false);
+ f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
z_erofs_pcluster_readmore(&f, NULL, true);
- err = z_erofs_do_read_page(&f, page);
+ err = z_erofs_do_read_page(&f, &folio->page);
z_erofs_pcluster_readmore(&f, NULL, false);
- (void)z_erofs_collector_end(&f);
+ z_erofs_pcluster_end(&f);
/* if some compressed cluster ready, need submit them anyway */
z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
- if (err)
- erofs_err(inode->i_sb, "failed to read, err [%d]", err);
+ if (err && err != -EINTR)
+ erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu",
+ err, folio->index, EROFS_I(inode)->nid);
erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&f.pagepool);
@@ -1898,38 +1855,35 @@ static void z_erofs_readahead(struct readahead_control *rac)
struct inode *const inode = rac->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
- struct page *head = NULL, *page;
- unsigned int nr_pages;
+ struct folio *head = NULL, *folio;
+ unsigned int nr_folios;
+ int err;
f.headoffset = readahead_pos(rac);
z_erofs_pcluster_readmore(&f, rac, true);
- nr_pages = readahead_count(rac);
- trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
+ nr_folios = readahead_count(rac);
+ trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false);
- while ((page = readahead_page(rac))) {
- set_page_private(page, (unsigned long)head);
- head = page;
+ while ((folio = readahead_folio(rac))) {
+ folio->private = head;
+ head = folio;
}
+ /* traverse in reverse order for best metadata I/O performance */
while (head) {
- struct page *page = head;
- int err;
-
- /* traversal in reverse order */
- head = (void *)page_private(page);
+ folio = head;
+ head = folio_get_private(folio);
- err = z_erofs_do_read_page(&f, page);
- if (err)
- erofs_err(inode->i_sb,
- "readahead error at page %lu @ nid %llu",
- page->index, EROFS_I(inode)->nid);
- put_page(page);
+ err = z_erofs_do_read_page(&f, &folio->page);
+ if (err && err != -EINTR)
+ erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
+ folio->index, EROFS_I(inode)->nid);
}
z_erofs_pcluster_readmore(&f, rac, false);
- (void)z_erofs_collector_end(&f);
+ z_erofs_pcluster_end(&f);
- z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_pages), true);
+ z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_folios), true);
erofs_put_metabuf(&f.map.buf);
erofs_release_pages(&f.pagepool);
}
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 1909ddafd9c7..7b55111fd533 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -561,8 +561,9 @@ static int z_erofs_do_map_blocks(struct inode *inode,
if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
((flags & EROFS_GET_BLOCKS_READMORE) &&
- map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA &&
- map->m_llen >= i_blocksize(inode))) {
+ (map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA ||
+ map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE) &&
+ map->m_llen >= i_blocksize(inode))) {
err = z_erofs_get_extent_decompressedlen(&m);
if (!err)
map->m_flags |= EROFS_MAP_FULL_MAPPED;
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 8aa36cd37351..33a918f9566c 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -189,7 +189,7 @@ void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
{
lockdep_assert_held(&ctx->wqh.lock);
- *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
+ *cnt = ((ctx->flags & EFD_SEMAPHORE) && ctx->count) ? 1 : ctx->count;
ctx->count -= *cnt;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_do_read);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 4b1b3362f697..1d9a71a0c4c1 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -975,15 +975,11 @@ again:
static int ep_alloc(struct eventpoll **pep)
{
- int error;
- struct user_struct *user;
struct eventpoll *ep;
- user = get_current_user();
- error = -ENOMEM;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (unlikely(!ep))
- goto free_uid;
+ return -ENOMEM;
mutex_init(&ep->mtx);
rwlock_init(&ep->lock);
@@ -992,16 +988,12 @@ static int ep_alloc(struct eventpoll **pep)
INIT_LIST_HEAD(&ep->rdllist);
ep->rbr = RB_ROOT_CACHED;
ep->ovflist = EP_UNACTIVE_PTR;
- ep->user = user;
+ ep->user = get_current_user();
refcount_set(&ep->refcount, 1);
*pep = ep;
return 0;
-
-free_uid:
- free_uid(user);
- return error;
}
/*
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index 729ada9e26e8..f55498e5c23d 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -273,8 +273,6 @@ struct exfat_sb_info {
spinlock_t inode_hash_lock;
struct hlist_head inode_hashtable[EXFAT_HASH_SIZE];
-
- struct rcu_head rcu;
};
#define EXFAT_CACHE_VALID 0
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index 3cbd270e0cba..32395ef686a2 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -22,7 +22,7 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
if (err)
return err;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
if (!IS_SYNC(inode))
@@ -232,7 +232,7 @@ int exfat_getattr(struct mnt_idmap *idmap, const struct path *path,
struct inode *inode = d_backing_inode(path->dentry);
struct exfat_inode_info *ei = EXFAT_I(inode);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
exfat_truncate_atime(&stat->atime);
stat->result_mask |= STATX_BTIME;
stat->btime.tv_sec = ei->i_crtime.tv_sec;
@@ -290,7 +290,7 @@ int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
}
if (attr->ia_valid & ATTR_SIZE)
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
setattr_copy(&nop_mnt_idmap, inode, attr);
exfat_truncate_atime(&inode->i_atime);
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 481dd338f2b8..13329baeafbc 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -355,7 +355,7 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to)
if (to > i_size_read(inode)) {
truncate_pagecache(inode, i_size_read(inode));
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
exfat_truncate(inode);
}
}
@@ -398,7 +398,7 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
exfat_write_failed(mapping, pos+len);
if (!(err < 0) && !(ei->attr & ATTR_ARCHIVE)) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
ei->attr |= ATTR_ARCHIVE;
mark_inode_dirty(inode);
}
@@ -577,7 +577,7 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
inode->i_mtime = info->mtime;
- inode->i_ctime = info->mtime;
+ inode_set_ctime_to_ts(inode, info->mtime);
ei->i_crtime = info->crtime;
inode->i_atime = info->atime;
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index e0ff9d156f6f..1b9f587f6cca 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -569,7 +569,7 @@ static int exfat_create(struct mnt_idmap *idmap, struct inode *dir,
goto unlock;
inode_inc_iversion(dir);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
else
@@ -582,8 +582,7 @@ static int exfat_create(struct mnt_idmap *idmap, struct inode *dir,
goto unlock;
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime =
- EXFAT_I(inode)->i_crtime = current_time(inode);
+ inode->i_mtime = inode->i_atime = EXFAT_I(inode)->i_crtime = inode_set_ctime_current(inode);
exfat_truncate_atime(&inode->i_atime);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
@@ -817,7 +816,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
ei->dir.dir = DIR_DELETED;
inode_inc_iversion(dir);
- dir->i_mtime = dir->i_atime = current_time(dir);
+ dir->i_mtime = dir->i_atime = inode_set_ctime_current(dir);
exfat_truncate_atime(&dir->i_atime);
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
@@ -825,7 +824,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
mark_inode_dirty(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
exfat_truncate_atime(&inode->i_atime);
exfat_unhash_inode(inode);
exfat_d_version_set(dentry, inode_query_iversion(dir));
@@ -852,7 +851,7 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
goto unlock;
inode_inc_iversion(dir);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
else
@@ -866,8 +865,7 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
goto unlock;
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_atime = inode->i_ctime =
- EXFAT_I(inode)->i_crtime = current_time(inode);
+ inode->i_mtime = inode->i_atime = EXFAT_I(inode)->i_crtime = inode_set_ctime_current(inode);
exfat_truncate_atime(&inode->i_atime);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
@@ -979,7 +977,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
ei->dir.dir = DIR_DELETED;
inode_inc_iversion(dir);
- dir->i_mtime = dir->i_atime = current_time(dir);
+ dir->i_mtime = dir->i_atime = inode_set_ctime_current(dir);
exfat_truncate_atime(&dir->i_atime);
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
@@ -988,7 +986,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
exfat_truncate_atime(&inode->i_atime);
exfat_unhash_inode(inode);
exfat_d_version_set(dentry, inode_query_iversion(dir));
@@ -1312,8 +1310,8 @@ static int exfat_rename(struct mnt_idmap *idmap,
goto unlock;
inode_inc_iversion(new_dir);
- new_dir->i_ctime = new_dir->i_mtime = new_dir->i_atime =
- EXFAT_I(new_dir)->i_crtime = current_time(new_dir);
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
+ EXFAT_I(new_dir)->i_crtime = current_time(new_dir);
exfat_truncate_atime(&new_dir->i_atime);
if (IS_DIRSYNC(new_dir))
exfat_sync_inode(new_dir);
@@ -1336,7 +1334,6 @@ static int exfat_rename(struct mnt_idmap *idmap,
}
inode_inc_iversion(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
if (IS_DIRSYNC(old_dir))
exfat_sync_inode(old_dir);
else
@@ -1354,8 +1351,7 @@ static int exfat_rename(struct mnt_idmap *idmap,
exfat_warn(sb, "abnormal access to an inode dropped");
WARN_ON(new_inode->i_nlink == 0);
}
- new_inode->i_ctime = EXFAT_I(new_inode)->i_crtime =
- current_time(new_inode);
+ EXFAT_I(new_inode)->i_crtime = current_time(new_inode);
}
unlock:
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 8c32460e031e..2778bd9b631e 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -31,16 +31,6 @@ static void exfat_free_iocharset(struct exfat_sb_info *sbi)
kfree(sbi->options.iocharset);
}
-static void exfat_delayed_free(struct rcu_head *p)
-{
- struct exfat_sb_info *sbi = container_of(p, struct exfat_sb_info, rcu);
-
- unload_nls(sbi->nls_io);
- exfat_free_iocharset(sbi);
- exfat_free_upcase_table(sbi);
- kfree(sbi);
-}
-
static void exfat_put_super(struct super_block *sb)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
@@ -50,7 +40,8 @@ static void exfat_put_super(struct super_block *sb)
brelse(sbi->boot_bh);
mutex_unlock(&sbi->s_lock);
- call_rcu(&sbi->rcu, exfat_delayed_free);
+ unload_nls(sbi->nls_io);
+ exfat_free_upcase_table(sbi);
}
static int exfat_sync_fs(struct super_block *sb, int wait)
@@ -379,8 +370,7 @@ static int exfat_read_root(struct inode *inode)
ei->i_size_ondisk = i_size_read(inode);
exfat_save_attr(inode, ATTR_SUBDIR);
- inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
- current_time(inode);
+ inode->i_mtime = inode->i_atime = ei->i_crtime = inode_set_ctime_current(inode);
exfat_truncate_atime(&inode->i_atime);
return 0;
}
@@ -710,9 +700,6 @@ free_table:
check_nls_io:
unload_nls(sbi->nls_io);
- exfat_free_iocharset(sbi);
- sb->s_fs_info = NULL;
- kfree(sbi);
return err;
}
@@ -721,14 +708,18 @@ static int exfat_get_tree(struct fs_context *fc)
return get_tree_bdev(fc, exfat_fill_super);
}
+static void exfat_free_sbi(struct exfat_sb_info *sbi)
+{
+ exfat_free_iocharset(sbi);
+ kfree(sbi);
+}
+
static void exfat_free(struct fs_context *fc)
{
struct exfat_sb_info *sbi = fc->s_fs_info;
- if (sbi) {
- exfat_free_iocharset(sbi);
- kfree(sbi);
- }
+ if (sbi)
+ exfat_free_sbi(sbi);
}
static int exfat_reconfigure(struct fs_context *fc)
@@ -773,12 +764,21 @@ static int exfat_init_fs_context(struct fs_context *fc)
return 0;
}
+static void exfat_kill_sb(struct super_block *sb)
+{
+ struct exfat_sb_info *sbi = sb->s_fs_info;
+
+ kill_block_super(sb);
+ if (sbi)
+ exfat_free_sbi(sbi);
+}
+
static struct file_system_type exfat_fs_type = {
.owner = THIS_MODULE,
.name = "exfat",
.init_fs_context = exfat_init_fs_context,
.parameters = exfat_parameters,
- .kill_sb = kill_block_super,
+ .kill_sb = exfat_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
};
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 82b17d7fc93f..7e54c31589c7 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -237,7 +237,7 @@ ext2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
error = __ext2_set_acl(inode, acl, type);
if (!error && update_mode) {
inode->i_mode = mode;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
}
return error;
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 42db804794bd..b335f17f682f 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -468,7 +468,7 @@ int ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
ext2_set_de_type(de, inode);
ext2_commit_chunk(page, pos, len);
if (update_times)
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(dir);
return ext2_handle_dirsync(dir);
@@ -555,7 +555,7 @@ got_it:
de->inode = cpu_to_le32(inode->i_ino);
ext2_set_de_type (de, inode);
ext2_commit_chunk(page, pos, rec_len);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(dir);
err = ext2_handle_dirsync(dir);
@@ -606,7 +606,7 @@ int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page)
pde->rec_len = ext2_rec_len_to_disk(to - from);
dir->inode = 0;
ext2_commit_chunk(page, pos, to - from);
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(inode);
return ext2_handle_dirsync(inode);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index a4e1d7a9c544..124df89689e1 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -549,7 +549,7 @@ got:
inode->i_ino = ino;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_flags =
ext2_mask_flags(mode, EXT2_I(dir)->i_flags & EXT2_FL_INHERITED);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 75983215c7a1..acbab27fe957 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -595,7 +595,7 @@ static void ext2_splice_branch(struct inode *inode,
if (where->bh)
mark_buffer_dirty_inode(where->bh, inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
}
@@ -1287,7 +1287,7 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
__ext2_truncate_blocks(inode, newsize);
filemap_invalidate_unlock(inode->i_mapping);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
if (inode_needs_sync(inode)) {
sync_mapping_buffers(inode->i_mapping);
sync_inode_metadata(inode, 1);
@@ -1409,9 +1409,9 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
inode->i_size = le32_to_cpu(raw_inode->i_size);
inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
- inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
+ inode_set_ctime(inode, (signed)le32_to_cpu(raw_inode->i_ctime), 0);
inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
- inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0;
+ inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
* This is needed because nfsd might try to access dead inodes
@@ -1541,7 +1541,7 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
raw_inode->i_size = cpu_to_le32(inode->i_size);
raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
- raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
+ raw_inode->i_ctime = cpu_to_le32(inode_get_ctime(inode).tv_sec);
raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
@@ -1628,7 +1628,7 @@ int ext2_getattr(struct mnt_idmap *idmap, const struct path *path,
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
return 0;
}
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index cc87d413eb43..44e04484e570 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -44,7 +44,7 @@ int ext2_fileattr_set(struct mnt_idmap *idmap,
(fa->flags & EXT2_FL_USER_MODIFIABLE);
ext2_set_inode_flags(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return 0;
@@ -77,7 +77,7 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
inode_lock(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode->i_generation = generation;
inode_unlock(inode);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 937dd8f60f96..059517068adc 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -211,7 +211,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir,
if (err)
return err;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode_inc_link_count(inode);
ihold(inode);
@@ -291,7 +291,7 @@ static int ext2_unlink(struct inode *dir, struct dentry *dentry)
if (err)
goto out;
- inode->i_ctime = dir->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode);
err = 0;
out:
@@ -367,7 +367,7 @@ static int ext2_rename (struct mnt_idmap * idmap,
ext2_put_page(new_page, new_de);
if (err)
goto out_dir;
- new_inode->i_ctime = current_time(new_inode);
+ inode_set_ctime_current(new_inode);
if (dir_de)
drop_nlink(new_inode);
inode_dec_link_count(new_inode);
@@ -383,7 +383,7 @@ static int ext2_rename (struct mnt_idmap * idmap,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- old_inode->i_ctime = current_time(old_inode);
+ inode_set_ctime_current(old_inode);
mark_inode_dirty(old_inode);
err = ext2_delete_entry(old_de, old_page);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 2959afc7541c..aaf3e3e88cb2 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1572,7 +1572,7 @@ out:
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return len - towrite;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 8906ba479aaf..1c9187188d68 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -773,7 +773,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
/* Update the inode. */
EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
if (IS_SYNC(inode)) {
error = sync_inode_metadata(inode, 1);
/* In case sync failed due to ENOSPC the inode was actually
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 27fcbddfb148..3bffe862f954 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -259,7 +259,7 @@ retry:
error = __ext4_set_acl(handle, inode, type, acl, 0 /* xattr_flags */);
if (!error && update_mode) {
inode->i_mode = mode;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
error = ext4_mark_inode_dirty(handle, inode);
}
out_stop:
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0a2d55faa095..1e2259d9967d 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -868,64 +868,70 @@ struct ext4_inode {
* affected filesystem before 2242.
*/
-static inline __le32 ext4_encode_extra_time(struct timespec64 *time)
+static inline __le32 ext4_encode_extra_time(struct timespec64 ts)
{
- u32 extra =((time->tv_sec - (s32)time->tv_sec) >> 32) & EXT4_EPOCH_MASK;
- return cpu_to_le32(extra | (time->tv_nsec << EXT4_EPOCH_BITS));
+ u32 extra = ((ts.tv_sec - (s32)ts.tv_sec) >> 32) & EXT4_EPOCH_MASK;
+ return cpu_to_le32(extra | (ts.tv_nsec << EXT4_EPOCH_BITS));
}
-static inline void ext4_decode_extra_time(struct timespec64 *time,
- __le32 extra)
+static inline struct timespec64 ext4_decode_extra_time(__le32 base,
+ __le32 extra)
{
+ struct timespec64 ts = { .tv_sec = (signed)le32_to_cpu(base) };
+
if (unlikely(extra & cpu_to_le32(EXT4_EPOCH_MASK)))
- time->tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
- time->tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
+ ts.tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
+ ts.tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
+ return ts;
}
-#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
+#define EXT4_INODE_SET_XTIME_VAL(xtime, inode, raw_inode, ts) \
do { \
- if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) {\
- (raw_inode)->xtime = cpu_to_le32((inode)->xtime.tv_sec); \
- (raw_inode)->xtime ## _extra = \
- ext4_encode_extra_time(&(inode)->xtime); \
- } \
- else \
- (raw_inode)->xtime = cpu_to_le32(clamp_t(int32_t, (inode)->xtime.tv_sec, S32_MIN, S32_MAX)); \
+ if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) { \
+ (raw_inode)->xtime = cpu_to_le32((ts).tv_sec); \
+ (raw_inode)->xtime ## _extra = ext4_encode_extra_time(ts); \
+ } else \
+ (raw_inode)->xtime = cpu_to_le32(clamp_t(int32_t, (ts).tv_sec, S32_MIN, S32_MAX)); \
} while (0)
-#define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode) \
-do { \
- if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
- (raw_inode)->xtime = cpu_to_le32((einode)->xtime.tv_sec); \
- if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
- (raw_inode)->xtime ## _extra = \
- ext4_encode_extra_time(&(einode)->xtime); \
-} while (0)
+#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
+ EXT4_INODE_SET_XTIME_VAL(xtime, inode, raw_inode, (inode)->xtime)
+
+#define EXT4_INODE_SET_CTIME(inode, raw_inode) \
+ EXT4_INODE_SET_XTIME_VAL(i_ctime, inode, raw_inode, inode_get_ctime(inode))
+
+#define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode) \
+ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
+ EXT4_INODE_SET_XTIME_VAL(xtime, &((einode)->vfs_inode), \
+ raw_inode, (einode)->xtime)
+
+#define EXT4_INODE_GET_XTIME_VAL(xtime, inode, raw_inode) \
+ (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra) ? \
+ ext4_decode_extra_time((raw_inode)->xtime, \
+ (raw_inode)->xtime ## _extra) : \
+ (struct timespec64) { \
+ .tv_sec = (signed)le32_to_cpu((raw_inode)->xtime) \
+ })
#define EXT4_INODE_GET_XTIME(xtime, inode, raw_inode) \
do { \
- (inode)->xtime.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime); \
- if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) { \
- ext4_decode_extra_time(&(inode)->xtime, \
- raw_inode->xtime ## _extra); \
- } \
- else \
- (inode)->xtime.tv_nsec = 0; \
+ (inode)->xtime = EXT4_INODE_GET_XTIME_VAL(xtime, inode, raw_inode); \
} while (0)
+#define EXT4_INODE_GET_CTIME(inode, raw_inode) \
+do { \
+ inode_set_ctime_to_ts(inode, \
+ EXT4_INODE_GET_XTIME_VAL(i_ctime, inode, raw_inode)); \
+} while (0)
-#define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \
-do { \
- if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
- (einode)->xtime.tv_sec = \
- (signed)le32_to_cpu((raw_inode)->xtime); \
- else \
- (einode)->xtime.tv_sec = 0; \
- if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
- ext4_decode_extra_time(&(einode)->xtime, \
- raw_inode->xtime ## _extra); \
- else \
- (einode)->xtime.tv_nsec = 0; \
+#define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \
+do { \
+ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
+ (einode)->xtime = \
+ EXT4_INODE_GET_XTIME_VAL(xtime, &(einode->vfs_inode), \
+ raw_inode); \
+ else \
+ (einode)->xtime = (struct timespec64){0, 0}; \
} while (0)
#define i_disk_version osd1.linux1.l_i_version
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 77f318ec8abb..b38d59581411 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -234,8 +234,7 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
might_sleep();
- if (bh->b_bdev->bd_super)
- ext4_check_bdev_write_error(bh->b_bdev->bd_super);
+ ext4_check_bdev_write_error(sb);
if (ext4_handle_valid(handle)) {
err = jbd2_journal_get_write_access(handle, bh);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e4115d338f10..202c76996b62 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4476,12 +4476,12 @@ retry:
map.m_lblk += ret;
map.m_len = len = len - ret;
epos = (loff_t)map.m_lblk << inode->i_blkbits;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
if (new_size) {
if (epos > new_size)
epos = new_size;
if (ext4_update_inode_size(inode, epos) & 0x1)
- inode->i_mtime = inode->i_ctime;
+ inode->i_mtime = inode_get_ctime(inode);
}
ret2 = ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -4617,7 +4617,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
/* Now release the pages and zero block aligned part of pages */
truncate_pagecache_range(inode, start, end - 1);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags);
@@ -4642,7 +4642,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
goto out_mutex;
}
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
if (new_size)
ext4_update_inode_size(inode, new_size);
ret = ext4_mark_inode_dirty(handle, inode);
@@ -5378,7 +5378,7 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
ret = ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -5488,7 +5488,7 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
/* Expand file to avoid data loss if there is error while shifting */
inode->i_size += len;
EXT4_I(inode)->i_disksize += len;
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
ret = ext4_mark_inode_dirty(handle, inode);
if (ret)
goto out_stop;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 754f961cd9fd..48abef5f23e7 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1250,7 +1250,7 @@ got:
inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
/* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
ei->i_crtime = inode->i_mtime;
memset(ei->i_data, 0, sizeof(ei->i_data));
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index a4b7e4bc32d4..003861037374 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1037,7 +1037,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
ext4_update_dx_flag(dir);
inode_inc_iversion(dir);
return 1;
@@ -1991,7 +1991,7 @@ out:
ext4_orphan_del(handle, inode);
if (err == 0) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
err = ext4_mark_inode_dirty(handle, inode);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
diff --git a/fs/ext4/inode-test.c b/fs/ext4/inode-test.c
index 7935ea6cf92c..f0c0fd507fbc 100644
--- a/fs/ext4/inode-test.c
+++ b/fs/ext4/inode-test.c
@@ -245,9 +245,9 @@ static void inode_test_xtimestamp_decoding(struct kunit *test)
struct timestamp_expectation *test_param =
(struct timestamp_expectation *)(test->param_value);
- timestamp.tv_sec = get_32bit_time(test_param);
- ext4_decode_extra_time(&timestamp,
- cpu_to_le32(test_param->extra_bits));
+ timestamp = ext4_decode_extra_time(
+ cpu_to_le32(get_32bit_time(test_param)),
+ cpu_to_le32(test_param->extra_bits));
KUNIT_EXPECT_EQ_MSG(test,
test_param->expected.tv_sec,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 43775a6ca505..6683076ecb2f 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3986,7 +3986,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
ret2 = ext4_mark_inode_dirty(handle, inode);
if (unlikely(ret2))
ret = ret2;
@@ -4146,7 +4146,7 @@ out_stop:
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
err2 = ext4_mark_inode_dirty(handle, inode);
if (unlikely(err2 && !err))
err = err2;
@@ -4249,7 +4249,7 @@ static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
- EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
+ EXT4_INODE_SET_CTIME(inode, raw_inode);
EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
@@ -4858,7 +4858,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
}
}
- EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
+ EXT4_INODE_GET_CTIME(inode, raw_inode);
EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
@@ -4981,7 +4981,7 @@ static void __ext4_update_other_inode_time(struct super_block *sb,
spin_unlock(&inode->i_lock);
spin_lock(&ei->i_raw_lock);
- EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
+ EXT4_INODE_SET_CTIME(inode, raw_inode);
EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
ext4_inode_csum_set(inode, raw_inode, ei);
@@ -5376,10 +5376,8 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
* Update c/mtime on truncate up, ext4_truncate() will
* update c/mtime in shrink case below
*/
- if (!shrink) {
- inode->i_mtime = current_time(inode);
- inode->i_ctime = inode->i_mtime;
- }
+ if (!shrink)
+ inode->i_mtime = inode_set_ctime_current(inode);
if (shrink)
ext4_fc_track_range(handle, inode,
@@ -5537,7 +5535,7 @@ int ext4_getattr(struct mnt_idmap *idmap, const struct path *path,
STATX_ATTR_NODUMP |
STATX_ATTR_VERITY);
- generic_fillattr(idmap, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
return 0;
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 331859511f80..b0349f451863 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -449,7 +449,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
diff = size - size_bl;
swap_inode_data(inode, inode_bl);
- inode->i_ctime = inode_bl->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
+ inode_set_ctime_current(inode_bl);
inode_inc_iversion(inode);
inode->i_generation = get_random_u32();
@@ -663,7 +664,7 @@ static int ext4_ioctl_setflags(struct inode *inode,
ext4_set_inode_flags(inode, false);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode_inc_iversion(inode);
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
@@ -774,7 +775,7 @@ static int ext4_ioctl_setproject(struct inode *inode, __u32 projid)
}
EXT4_I(inode)->i_projid = kprojid;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode_inc_iversion(inode);
out_dirty:
rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
@@ -1266,7 +1267,7 @@ static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err == 0) {
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode_inc_iversion(inode);
inode->i_generation = generation;
err = ext4_mark_iloc_dirty(handle, inode, &iloc);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 0caf6c730ce3..933ad03f4f58 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2203,7 +2203,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
ext4_update_dx_flag(dir);
inode_inc_iversion(dir);
err2 = ext4_mark_inode_dirty(handle, dir);
@@ -3197,7 +3197,8 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
* recovery. */
inode->i_size = 0;
ext4_orphan_add(handle, inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
+ dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_ctime_current(inode);
retval = ext4_mark_inode_dirty(handle, inode);
if (retval)
goto end_rmdir;
@@ -3271,7 +3272,7 @@ int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto out_handle;
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
ext4_update_dx_flag(dir);
retval = ext4_mark_inode_dirty(handle, dir);
if (retval)
@@ -3286,7 +3287,7 @@ int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
drop_nlink(inode);
if (!inode->i_nlink)
ext4_orphan_add(handle, inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
retval = ext4_mark_inode_dirty(handle, inode);
if (dentry && !retval)
ext4_fc_track_unlink(handle, dentry);
@@ -3463,7 +3464,7 @@ retry:
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
ext4_inc_count(inode);
ihold(inode);
@@ -3641,8 +3642,7 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
if (ext4_has_feature_filetype(ent->dir->i_sb))
ent->de->file_type = file_type;
inode_inc_iversion(ent->dir);
- ent->dir->i_ctime = ent->dir->i_mtime =
- current_time(ent->dir);
+ ent->dir->i_mtime = inode_set_ctime_current(ent->dir);
retval = ext4_mark_inode_dirty(handle, ent->dir);
BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata");
if (!ent->inlined) {
@@ -3941,7 +3941,7 @@ static int ext4_rename(struct mnt_idmap *idmap, struct inode *old_dir,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- old.inode->i_ctime = current_time(old.inode);
+ inode_set_ctime_current(old.inode);
retval = ext4_mark_inode_dirty(handle, old.inode);
if (unlikely(retval))
goto end_rename;
@@ -3955,9 +3955,9 @@ static int ext4_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (new.inode) {
ext4_dec_count(new.inode);
- new.inode->i_ctime = current_time(new.inode);
+ inode_set_ctime_current(new.inode);
}
- old.dir->i_ctime = old.dir->i_mtime = current_time(old.dir);
+ old.dir->i_mtime = inode_set_ctime_current(old.dir);
ext4_update_dx_flag(old.dir);
if (old.dir_bh) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
@@ -4053,7 +4053,6 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
};
u8 new_file_type;
int retval;
- struct timespec64 ctime;
if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT) &&
!projid_eq(EXT4_I(new_dir)->i_projid,
@@ -4147,9 +4146,8 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- ctime = current_time(old.inode);
- old.inode->i_ctime = ctime;
- new.inode->i_ctime = ctime;
+ inode_set_ctime_current(old.inode);
+ inode_set_ctime_current(new.inode);
retval = ext4_mark_inode_dirty(handle, old.inode);
if (unlikely(retval))
goto end_rename;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c94ebf704616..73547d2334fd 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -93,6 +93,7 @@ static int ext4_get_tree(struct fs_context *fc);
static int ext4_reconfigure(struct fs_context *fc);
static void ext4_fc_free(struct fs_context *fc);
static int ext4_init_fs_context(struct fs_context *fc);
+static void ext4_kill_sb(struct super_block *sb);
static const struct fs_parameter_spec ext4_param_specs[];
/*
@@ -135,12 +136,12 @@ static struct file_system_type ext2_fs_type = {
.name = "ext2",
.init_fs_context = ext4_init_fs_context,
.parameters = ext4_param_specs,
- .kill_sb = kill_block_super,
+ .kill_sb = ext4_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("ext2");
MODULE_ALIAS("ext2");
-#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
+#define IS_EXT2_SB(sb) ((sb)->s_type == &ext2_fs_type)
#else
#define IS_EXT2_SB(sb) (0)
#endif
@@ -151,12 +152,12 @@ static struct file_system_type ext3_fs_type = {
.name = "ext3",
.init_fs_context = ext4_init_fs_context,
.parameters = ext4_param_specs,
- .kill_sb = kill_block_super,
+ .kill_sb = ext4_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("ext3");
MODULE_ALIAS("ext3");
-#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
+#define IS_EXT3_SB(sb) ((sb)->s_type == &ext3_fs_type)
static inline void __ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
@@ -1096,15 +1097,6 @@ void ext4_update_dynamic_rev(struct super_block *sb)
*/
}
-static void ext4_bdev_mark_dead(struct block_device *bdev)
-{
- ext4_force_shutdown(bdev->bd_holder, EXT4_GOING_FLAGS_NOLOGFLUSH);
-}
-
-static const struct blk_holder_ops ext4_holder_ops = {
- .mark_dead = ext4_bdev_mark_dead,
-};
-
/*
* Open the external journal device
*/
@@ -1113,7 +1105,7 @@ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
struct block_device *bdev;
bdev = blkdev_get_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE, sb,
- &ext4_holder_ops);
+ &fs_holder_ops);
if (IS_ERR(bdev))
goto fail;
return bdev;
@@ -1125,25 +1117,6 @@ fail:
return NULL;
}
-/*
- * Release the journal device
- */
-static void ext4_blkdev_remove(struct ext4_sb_info *sbi)
-{
- struct block_device *bdev;
- bdev = sbi->s_journal_bdev;
- if (bdev) {
- /*
- * Invalidate the journal device's buffers. We don't want them
- * floating about in memory - the physical journal device may
- * hotswapped, and it breaks the `ro-after' testing code.
- */
- invalidate_bdev(bdev);
- blkdev_put(bdev, sbi->s_sb);
- sbi->s_journal_bdev = NULL;
- }
-}
-
static inline struct inode *orphan_list_entry(struct list_head *l)
{
return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
@@ -1339,8 +1312,13 @@ static void ext4_put_super(struct super_block *sb)
sync_blockdev(sb->s_bdev);
invalidate_bdev(sb->s_bdev);
if (sbi->s_journal_bdev) {
+ /*
+ * Invalidate the journal device's buffers. We don't want them
+ * floating about in memory - the physical journal device may
+ * hotswapped, and it breaks the `ro-after' testing code.
+ */
sync_blockdev(sbi->s_journal_bdev);
- ext4_blkdev_remove(sbi);
+ invalidate_bdev(sbi->s_journal_bdev);
}
ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
@@ -5572,7 +5550,6 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
spin_lock_init(&sbi->s_bdev_wb_lock);
errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
&sbi->s_bdev_wb_err);
- sb->s_bdev->bd_super = sb;
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
ext4_orphan_cleanup(sb, es);
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
@@ -5664,9 +5641,11 @@ failed_mount:
kfree(get_qf_name(sb, sbi, i));
#endif
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
- /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */
brelse(sbi->s_sbh);
- ext4_blkdev_remove(sbi);
+ if (sbi->s_journal_bdev) {
+ invalidate_bdev(sbi->s_journal_bdev);
+ blkdev_put(sbi->s_journal_bdev, sb);
+ }
out_fail:
invalidate_bdev(sb->s_bdev);
sb->s_fs_info = NULL;
@@ -5854,7 +5833,10 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
return NULL;
+ /* see get_tree_bdev why this is needed and safe */
+ up_write(&sb->s_umount);
bdev = ext4_blkdev_get(j_dev, sb);
+ down_write(&sb->s_umount);
if (bdev == NULL)
return NULL;
@@ -7103,7 +7085,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
}
EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
err = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
out_unlock:
@@ -7273,13 +7255,24 @@ static inline int ext3_feature_set_ok(struct super_block *sb)
return 1;
}
+static void ext4_kill_sb(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct block_device *journal_bdev = sbi ? sbi->s_journal_bdev : NULL;
+
+ kill_block_super(sb);
+
+ if (journal_bdev)
+ blkdev_put(journal_bdev, sb);
+}
+
static struct file_system_type ext4_fs_type = {
.owner = THIS_MODULE,
.name = "ext4",
.init_fs_context = ext4_init_fs_context,
.parameters = ext4_param_specs,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+ .kill_sb = ext4_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
};
MODULE_ALIAS_FS("ext4");
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 05151d61b00b..281e1bfbbe3e 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -356,13 +356,13 @@ ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
{
- return ((u64)ea_inode->i_ctime.tv_sec << 32) |
+ return ((u64) inode_get_ctime(ea_inode).tv_sec << 32) |
(u32) inode_peek_iversion_raw(ea_inode);
}
static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
{
- ea_inode->i_ctime.tv_sec = (u32)(ref_count >> 32);
+ inode_set_ctime(ea_inode, (u32)(ref_count >> 32), 0);
inode_set_iversion_raw(ea_inode, ref_count & 0xffffffff);
}
@@ -2473,7 +2473,7 @@ retry_inode:
}
if (!error) {
ext4_xattr_update_super_block(handle, inode->i_sb);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode_inc_iversion(inode);
if (!value)
no_expand = 0;
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 236d890f560b..0f7df9c11af3 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1045,7 +1045,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
struct address_space *mapping = cc->inode->i_mapping;
struct page *page;
sector_t last_block_in_bio;
- unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
+ fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
pgoff_t start_idx = start_idx_of_cluster(cc);
int i, ret;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index d635c58cf5a3..8aa29fe2e87b 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -455,7 +455,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
de->file_type = fs_umode_to_ftype(inode->i_mode);
set_page_dirty(page);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
f2fs_mark_inode_dirty_sync(dir, false);
f2fs_put_page(page, 1);
}
@@ -609,7 +609,7 @@ void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
f2fs_i_links_write(dir, true);
clear_inode_flag(inode, FI_NEW_INODE);
}
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
f2fs_mark_inode_dirty_sync(dir, false);
if (F2FS_I(dir)->i_current_depth != current_depth)
@@ -858,7 +858,7 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
if (S_ISDIR(inode->i_mode))
f2fs_i_links_write(dir, false);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_i_links_write(inode, false);
if (S_ISDIR(inode->i_mode)) {
@@ -919,7 +919,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
}
f2fs_put_page(page, 1);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c7cb2177b252..613132339d72 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2736,7 +2736,7 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
static inline struct page *f2fs_pagecache_get_page(
struct address_space *mapping, pgoff_t index,
- int fgp_flags, gfp_t gfp_mask)
+ fgf_t fgp_flags, gfp_t gfp_mask)
{
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
return NULL;
@@ -3303,9 +3303,11 @@ static inline void clear_file(struct inode *inode, int type)
static inline bool f2fs_is_time_consistent(struct inode *inode)
{
+ struct timespec64 ctime = inode_get_ctime(inode);
+
if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ctime))
return false;
if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
return false;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 093039dee992..35886a52edfb 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -794,7 +794,7 @@ int f2fs_truncate(struct inode *inode)
if (err)
return err;
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, false);
return 0;
}
@@ -882,7 +882,7 @@ int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
STATX_ATTR_NODUMP |
STATX_ATTR_VERITY);
- generic_fillattr(idmap, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
/* we need to show initial sectors used for inline_data/dentries */
if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
@@ -905,7 +905,7 @@ static void __setattr_copy(struct mnt_idmap *idmap,
if (ia_valid & ATTR_MTIME)
inode->i_mtime = attr->ia_mtime;
if (ia_valid & ATTR_CTIME)
- inode->i_ctime = attr->ia_ctime;
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
@@ -1008,7 +1008,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
return err;
spin_lock(&F2FS_I(inode)->i_size_lock);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
F2FS_I(inode)->last_disk_size = i_size_read(inode);
spin_unlock(&F2FS_I(inode)->i_size_lock);
}
@@ -1835,7 +1835,7 @@ static long f2fs_fallocate(struct file *file, int mode,
}
if (!ret) {
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, false);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
}
@@ -1937,7 +1937,7 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
else
clear_inode_flag(inode, FI_PROJ_INHERIT);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_set_inode_flags(inode);
f2fs_mark_inode_dirty_sync(inode, true);
return 0;
@@ -2874,10 +2874,10 @@ out_src:
if (ret)
goto out_unlock;
- src->i_mtime = src->i_ctime = current_time(src);
+ src->i_mtime = inode_set_ctime_current(src);
f2fs_mark_inode_dirty_sync(src, false);
if (src != dst) {
- dst->i_mtime = dst->i_ctime = current_time(dst);
+ dst->i_mtime = inode_set_ctime_current(dst);
f2fs_mark_inode_dirty_sync(dst, false);
}
f2fs_update_time(sbi, REQ_TIME);
@@ -3073,7 +3073,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
goto out_unlock;
fi->i_projid = kprojid;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, true);
out_unlock:
f2fs_unlock_op(sbi);
@@ -3511,7 +3511,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
}
set_inode_flag(inode, FI_COMPRESS_RELEASED);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, true);
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -3710,7 +3710,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
if (ret >= 0) {
clear_inode_flag(inode, FI_COMPRESS_RELEASED);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, true);
}
unlock_inode:
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 01effd3fcb6c..a1ca394bc327 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -2181,12 +2181,14 @@ out_drop_write:
if (err)
return err;
- err = freeze_super(sbi->sb);
+ err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
if (err)
return err;
if (f2fs_readonly(sbi->sb)) {
- thaw_super(sbi->sb);
+ err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
+ if (err)
+ return err;
return -EROFS;
}
@@ -2240,6 +2242,6 @@ recover_out:
out_err:
f2fs_up_write(&sbi->cp_global_sem);
f2fs_up_write(&sbi->gc_lock);
- thaw_super(sbi->sb);
+ thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
return err;
}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 4638fee16a91..88fc9208ffa7 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -698,7 +698,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
set_page_dirty(page);
f2fs_put_page(page, 1);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 09e986b050c6..c1c2ba9f28e5 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -403,7 +403,7 @@ static void init_idisk_time(struct inode *inode)
struct f2fs_inode_info *fi = F2FS_I(inode);
fi->i_disk_time[0] = inode->i_atime;
- fi->i_disk_time[1] = inode->i_ctime;
+ fi->i_disk_time[1] = inode_get_ctime(inode);
fi->i_disk_time[2] = inode->i_mtime;
}
@@ -434,10 +434,10 @@ static int do_read_inode(struct inode *inode)
inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
- inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
+ inode_set_ctime(inode, le64_to_cpu(ri->i_ctime),
+ le32_to_cpu(ri->i_ctime_nsec));
inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
- inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
inode->i_generation = le32_to_cpu(ri->i_generation);
if (S_ISDIR(inode->i_mode))
@@ -714,10 +714,10 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
set_raw_inline(inode, ri);
ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
- ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+ ri->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
- ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
if (S_ISDIR(inode->i_mode))
ri->i_current_depth =
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index bee0568888da..193b22a2d6bf 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -243,7 +243,7 @@ static struct inode *f2fs_new_inode(struct mnt_idmap *idmap,
inode->i_ino = ino;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
F2FS_I(inode)->i_crtime = inode->i_mtime;
inode->i_generation = get_random_u32();
@@ -420,7 +420,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
f2fs_balance_fs(sbi, true);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
ihold(inode);
set_inode_flag(inode, FI_INC_LINK);
@@ -1052,7 +1052,7 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
new_page = NULL;
- new_inode->i_ctime = current_time(new_inode);
+ inode_set_ctime_current(new_inode);
f2fs_down_write(&F2FS_I(new_inode)->i_sem);
if (old_dir_entry)
f2fs_i_links_write(new_inode, false);
@@ -1086,7 +1086,7 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
f2fs_i_pino_write(old_inode, new_dir->i_ino);
f2fs_up_write(&F2FS_I(old_inode)->i_sem);
- old_inode->i_ctime = current_time(old_inode);
+ inode_set_ctime_current(old_inode);
f2fs_mark_inode_dirty_sync(old_inode, false);
f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
@@ -1251,7 +1251,7 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_i_pino_write(old_inode, new_dir->i_ino);
f2fs_up_write(&F2FS_I(old_inode)->i_sem);
- old_dir->i_ctime = current_time(old_dir);
+ inode_set_ctime_current(old_dir);
if (old_nlink) {
f2fs_down_write(&F2FS_I(old_dir)->i_sem);
f2fs_i_links_write(old_dir, old_nlink > 0);
@@ -1270,7 +1270,7 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_i_pino_write(new_inode, old_dir->i_ino);
f2fs_up_write(&F2FS_I(new_inode)->i_sem);
- new_dir->i_ctime = current_time(new_dir);
+ inode_set_ctime_current(new_dir);
if (new_nlink) {
f2fs_down_write(&F2FS_I(new_dir)->i_sem);
f2fs_i_links_write(new_dir, new_nlink > 0);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 4e7d4ceeb084..b8637e88d94f 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -321,10 +321,10 @@ static int recover_inode(struct inode *inode, struct page *page)
f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
- inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
+ inode_set_ctime(inode, le64_to_cpu(raw->i_ctime),
+ le32_to_cpu(raw->i_ctime_nsec));
inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
- inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
F2FS_I(inode)->i_advise = raw->i_advise;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index ca31163da00a..aa1f9a3a8037 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1561,7 +1561,7 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
int i;
for (i = 0; i < sbi->s_ndevs; i++) {
- blkdev_put(FDEV(i).bdev, sbi->sb->s_type);
+ blkdev_put(FDEV(i).bdev, sbi->sb);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
#endif
@@ -2703,7 +2703,7 @@ retry:
if (len == towrite)
return err;
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, false);
return len - towrite;
}
@@ -4198,7 +4198,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
/* Single zoned block device mount */
FDEV(0).bdev =
blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev, mode,
- sbi->sb->s_type, NULL);
+ sbi->sb, NULL);
} else {
/* Multi-device mount */
memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
@@ -4217,8 +4217,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
sbi->log_blocks_per_seg) - 1;
}
FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path, mode,
- sbi->sb->s_type,
- NULL);
+ sbi->sb, NULL);
}
if (IS_ERR(FDEV(i).bdev))
return PTR_ERR(FDEV(i).bdev);
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 476b186b90a6..4ae93e1df421 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -764,7 +764,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
same:
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
inode->i_mode = F2FS_I(inode)->i_acl_mode;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
clear_inode_flag(inode, FI_ACL_MODE);
}
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index e3b690b48e3e..66cf4778cf3b 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -460,8 +460,7 @@ extern struct timespec64 fat_truncate_mtime(const struct msdos_sb_info *sbi,
const struct timespec64 *ts);
extern int fat_truncate_time(struct inode *inode, struct timespec64 *now,
int flags);
-extern int fat_update_time(struct inode *inode, struct timespec64 *now,
- int flags);
+extern int fat_update_time(struct inode *inode, int flags);
extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
int fat_cache_init(void);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 456477946dd9..e887e9ab7472 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -401,7 +401,7 @@ int fat_getattr(struct mnt_idmap *idmap, const struct path *path,
struct inode *inode = d_inode(path->dentry);
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
- generic_fillattr(idmap, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
stat->blksize = sbi->cluster_size;
if (sbi->options.nfs == FAT_NFS_NOSTALE_RO) {
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index d99b8549ec8f..cdd39b6020f3 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -562,7 +562,7 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
& ~((loff_t)sbi->cluster_size - 1)) >> 9;
fat_time_fat2unix(sbi, &inode->i_mtime, de->time, de->date, 0);
- inode->i_ctime = inode->i_mtime;
+ inode_set_ctime_to_ts(inode, inode->i_mtime);
if (sbi->options.isvfat) {
fat_time_fat2unix(sbi, &inode->i_atime, 0, de->adate, 0);
fat_time_fat2unix(sbi, &MSDOS_I(inode)->i_crtime, de->ctime,
@@ -1407,8 +1407,7 @@ static int fat_read_root(struct inode *inode)
MSDOS_I(inode)->mmu_private = inode->i_size;
fat_save_attrs(inode, ATTR_DIR);
- inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = 0;
- inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = 0;
+ inode->i_mtime = inode->i_atime = inode_set_ctime(inode, 0, 0);
set_nlink(inode, fat_subdirs(inode)+2);
return 0;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 7e5d6ae305f2..f2304a1054aa 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -332,13 +332,14 @@ int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
* but ctime updates are ignored.
*/
if (flags & S_MTIME)
- inode->i_mtime = inode->i_ctime = fat_truncate_mtime(sbi, now);
+ inode->i_mtime = inode_set_ctime_to_ts(inode,
+ fat_truncate_mtime(sbi, now));
return 0;
}
EXPORT_SYMBOL_GPL(fat_truncate_time);
-int fat_update_time(struct inode *inode, struct timespec64 *now, int flags)
+int fat_update_time(struct inode *inode, int flags)
{
int dirty_flags = 0;
@@ -346,16 +347,13 @@ int fat_update_time(struct inode *inode, struct timespec64 *now, int flags)
return 0;
if (flags & (S_ATIME | S_CTIME | S_MTIME)) {
- fat_truncate_time(inode, now, flags);
+ fat_truncate_time(inode, NULL, flags);
if (inode->i_sb->s_flags & SB_LAZYTIME)
dirty_flags |= I_DIRTY_TIME;
else
dirty_flags |= I_DIRTY_SYNC;
}
- if ((flags & S_VERSION) && inode_maybe_inc_iversion(inode, false))
- dirty_flags |= I_DIRTY_SYNC;
-
__mark_inode_dirty(inode, dirty_flags);
return 0;
}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index b622be119706..e871009f6c88 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -34,7 +34,7 @@
#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
-static int setfl(int fd, struct file * filp, unsigned long arg)
+static int setfl(int fd, struct file * filp, unsigned int arg)
{
struct inode * inode = file_inode(filp);
int error = 0;
@@ -112,11 +112,11 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
}
EXPORT_SYMBOL(__f_setown);
-int f_setown(struct file *filp, unsigned long arg, int force)
+int f_setown(struct file *filp, int who, int force)
{
enum pid_type type;
struct pid *pid = NULL;
- int who = arg, ret = 0;
+ int ret = 0;
type = PIDTYPE_TGID;
if (who < 0) {
@@ -317,28 +317,29 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
struct file *filp)
{
void __user *argp = (void __user *)arg;
+ int argi = (int)arg;
struct flock flock;
long err = -EINVAL;
switch (cmd) {
case F_DUPFD:
- err = f_dupfd(arg, filp, 0);
+ err = f_dupfd(argi, filp, 0);
break;
case F_DUPFD_CLOEXEC:
- err = f_dupfd(arg, filp, O_CLOEXEC);
+ err = f_dupfd(argi, filp, O_CLOEXEC);
break;
case F_GETFD:
err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
break;
case F_SETFD:
err = 0;
- set_close_on_exec(fd, arg & FD_CLOEXEC);
+ set_close_on_exec(fd, argi & FD_CLOEXEC);
break;
case F_GETFL:
err = filp->f_flags;
break;
case F_SETFL:
- err = setfl(fd, filp, arg);
+ err = setfl(fd, filp, argi);
break;
#if BITS_PER_LONG != 32
/* 32-bit arches must use fcntl64() */
@@ -375,7 +376,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
force_successful_syscall_return();
break;
case F_SETOWN:
- err = f_setown(filp, arg, 1);
+ err = f_setown(filp, argi, 1);
break;
case F_GETOWN_EX:
err = f_getown_ex(filp, arg);
@@ -391,28 +392,28 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
break;
case F_SETSIG:
/* arg == 0 restores default behaviour. */
- if (!valid_signal(arg)) {
+ if (!valid_signal(argi)) {
break;
}
err = 0;
- filp->f_owner.signum = arg;
+ filp->f_owner.signum = argi;
break;
case F_GETLEASE:
err = fcntl_getlease(filp);
break;
case F_SETLEASE:
- err = fcntl_setlease(fd, filp, arg);
+ err = fcntl_setlease(fd, filp, argi);
break;
case F_NOTIFY:
- err = fcntl_dirnotify(fd, filp, arg);
+ err = fcntl_dirnotify(fd, filp, argi);
break;
case F_SETPIPE_SZ:
case F_GETPIPE_SZ:
- err = pipe_fcntl(filp, cmd, arg);
+ err = pipe_fcntl(filp, cmd, argi);
break;
case F_ADD_SEALS:
case F_GET_SEALS:
- err = memfd_fcntl(filp, cmd, arg);
+ err = memfd_fcntl(filp, cmd, argi);
break;
case F_GET_RW_HINT:
case F_SET_RW_HINT:
diff --git a/fs/file.c b/fs/file.c
index 3fd003a8604f..3e4a4dfa38fc 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -668,7 +668,7 @@ EXPORT_SYMBOL(close_fd); /* for ksys_close() */
/**
* last_fd - return last valid index into fd table
- * @cur_fds: files struct
+ * @fdt: File descriptor table.
*
* Context: Either rcu read lock or files_lock must be held.
*
@@ -693,29 +693,30 @@ static inline void __range_cloexec(struct files_struct *cur_fds,
spin_unlock(&cur_fds->file_lock);
}
-static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
+static inline void __range_close(struct files_struct *files, unsigned int fd,
unsigned int max_fd)
{
+ struct file *file;
unsigned n;
- rcu_read_lock();
- n = last_fd(files_fdtable(cur_fds));
- rcu_read_unlock();
+ spin_lock(&files->file_lock);
+ n = last_fd(files_fdtable(files));
max_fd = min(max_fd, n);
- while (fd <= max_fd) {
- struct file *file;
-
- spin_lock(&cur_fds->file_lock);
- file = pick_file(cur_fds, fd++);
- spin_unlock(&cur_fds->file_lock);
-
+ for (; fd <= max_fd; fd++) {
+ file = pick_file(files, fd);
if (file) {
- /* found a valid file to close */
- filp_close(file, cur_fds);
+ spin_unlock(&files->file_lock);
+ filp_close(file, files);
cond_resched();
+ spin_lock(&files->file_lock);
+ } else if (need_resched()) {
+ spin_unlock(&files->file_lock);
+ cond_resched();
+ spin_lock(&files->file_lock);
}
}
+ spin_unlock(&files->file_lock);
}
/**
@@ -723,6 +724,7 @@ static inline void __range_close(struct files_struct *cur_fds, unsigned int fd,
*
* @fd: starting file descriptor to close
* @max_fd: last file descriptor to close
+ * @flags: CLOSE_RANGE flags.
*
* This closes a range of file descriptors. All file descriptors
* from @fd up to and including @max_fd are closed.
diff --git a/fs/file_table.c b/fs/file_table.c
index fc7d677ff5ad..ee21b3da9d08 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -461,11 +461,8 @@ void fput(struct file *file)
*/
void __fput_sync(struct file *file)
{
- if (atomic_long_dec_and_test(&file->f_count)) {
- struct task_struct *task = current;
- BUG_ON(!(task->flags & PF_KTHREAD));
+ if (atomic_long_dec_and_test(&file->f_count))
__fput(file);
- }
}
EXPORT_SYMBOL(fput);
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index ceb6a12649ba..ac5d43b164b5 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -110,10 +110,9 @@ static inline void dip2vip_cpy(struct vxfs_sb_info *sbi,
inode->i_size = vip->vii_size;
inode->i_atime.tv_sec = vip->vii_atime;
- inode->i_ctime.tv_sec = vip->vii_ctime;
+ inode_set_ctime(inode, vip->vii_ctime, 0);
inode->i_mtime.tv_sec = vip->vii_mtime;
inode->i_atime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
inode->i_mtime.tv_nsec = 0;
inode->i_blocks = vip->vii_blocks;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index aca4b4811394..969ce991b0b0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1953,9 +1953,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
struct inode *inode = wb_inode(wb->b_io.prev);
struct super_block *sb = inode->i_sb;
- if (!trylock_super(sb)) {
+ if (!super_trylock_shared(sb)) {
/*
- * trylock_super() may fail consistently due to
+ * super_trylock_shared() may fail consistently due to
* s_umount being grabbed by someone else. Don't use
* requeue_io() to avoid busy retrying the inode/sb.
*/
diff --git a/fs/fs_context.c b/fs/fs_context.c
index 851214d1d013..a0ad7a0c4680 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -162,6 +162,10 @@ EXPORT_SYMBOL(vfs_parse_fs_param);
/**
* vfs_parse_fs_string - Convenience function to just parse a string.
+ * @fc: Filesystem context.
+ * @key: Parameter name.
+ * @value: Default value.
+ * @v_size: Maximum number of bytes in the value.
*/
int vfs_parse_fs_string(struct fs_context *fc, const char *key,
const char *value, size_t v_size)
@@ -189,7 +193,7 @@ EXPORT_SYMBOL(vfs_parse_fs_string);
/**
* generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
- * @ctx: The superblock configuration to fill in.
+ * @fc: The superblock configuration to fill in.
* @data: The data to parse
*
* Parse a blob of data that's in key[=val][,key[=val]]* form. This can be
@@ -315,10 +319,31 @@ struct fs_context *fs_context_for_reconfigure(struct dentry *dentry,
}
EXPORT_SYMBOL(fs_context_for_reconfigure);
+/**
+ * fs_context_for_submount: allocate a new fs_context for a submount
+ * @type: file_system_type of the new context
+ * @reference: reference dentry from which to copy relevant info
+ *
+ * Allocate a new fs_context suitable for a submount. This also ensures that
+ * the fc->security object is inherited from @reference (if needed).
+ */
struct fs_context *fs_context_for_submount(struct file_system_type *type,
struct dentry *reference)
{
- return alloc_fs_context(type, reference, 0, 0, FS_CONTEXT_FOR_SUBMOUNT);
+ struct fs_context *fc;
+ int ret;
+
+ fc = alloc_fs_context(type, reference, 0, 0, FS_CONTEXT_FOR_SUBMOUNT);
+ if (IS_ERR(fc))
+ return fc;
+
+ ret = security_fs_context_submount(fc, reference->d_sb);
+ if (ret) {
+ put_fs_context(fc);
+ return ERR_PTR(ret);
+ }
+
+ return fc;
}
EXPORT_SYMBOL(fs_context_for_submount);
@@ -333,7 +358,7 @@ void fc_drop_locked(struct fs_context *fc)
static void legacy_fs_context_free(struct fs_context *fc);
/**
- * vfs_dup_fc_config: Duplicate a filesystem context.
+ * vfs_dup_fs_context - Duplicate a filesystem context.
* @src_fc: The context to copy.
*/
struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc)
@@ -379,7 +404,9 @@ EXPORT_SYMBOL(vfs_dup_fs_context);
/**
* logfc - Log a message to a filesystem context
- * @fc: The filesystem context to log to.
+ * @log: The filesystem context to log to, or NULL to use printk.
+ * @prefix: A string to prefix the output with, or NULL.
+ * @level: 'w' for a warning, 'e' for an error. Anything else is a notice.
* @fmt: The format of the buffer.
*/
void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, ...)
@@ -692,6 +719,7 @@ void vfs_clean_context(struct fs_context *fc)
security_free_mnt_opts(&fc->security);
kfree(fc->source);
fc->source = NULL;
+ fc->exclusive = false;
fc->purpose = FS_CONTEXT_FOR_RECONFIGURE;
fc->phase = FS_CONTEXT_AWAITING_RECONF;
diff --git a/fs/fsopen.c b/fs/fsopen.c
index fc9d2d9fd234..ce03f6521c88 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -209,6 +209,72 @@ err:
return ret;
}
+static int vfs_cmd_create(struct fs_context *fc, bool exclusive)
+{
+ struct super_block *sb;
+ int ret;
+
+ if (fc->phase != FS_CONTEXT_CREATE_PARAMS)
+ return -EBUSY;
+
+ if (!mount_capable(fc))
+ return -EPERM;
+
+ /* require the new mount api */
+ if (exclusive && fc->ops == &legacy_fs_context_ops)
+ return -EOPNOTSUPP;
+
+ fc->phase = FS_CONTEXT_CREATING;
+ fc->exclusive = exclusive;
+
+ ret = vfs_get_tree(fc);
+ if (ret) {
+ fc->phase = FS_CONTEXT_FAILED;
+ return ret;
+ }
+
+ sb = fc->root->d_sb;
+ ret = security_sb_kern_mount(sb);
+ if (unlikely(ret)) {
+ fc_drop_locked(fc);
+ fc->phase = FS_CONTEXT_FAILED;
+ return ret;
+ }
+
+ /* vfs_get_tree() callchains will have grabbed @s_umount */
+ up_write(&sb->s_umount);
+ fc->phase = FS_CONTEXT_AWAITING_MOUNT;
+ return 0;
+}
+
+static int vfs_cmd_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb;
+ int ret;
+
+ if (fc->phase != FS_CONTEXT_RECONF_PARAMS)
+ return -EBUSY;
+
+ fc->phase = FS_CONTEXT_RECONFIGURING;
+
+ sb = fc->root->d_sb;
+ if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
+ fc->phase = FS_CONTEXT_FAILED;
+ return -EPERM;
+ }
+
+ down_write(&sb->s_umount);
+ ret = reconfigure_super(fc);
+ up_write(&sb->s_umount);
+ if (ret) {
+ fc->phase = FS_CONTEXT_FAILED;
+ return ret;
+ }
+
+ vfs_clean_context(fc);
+ return 0;
+}
+
/*
* Check the state and apply the configuration. Note that this function is
* allowed to 'steal' the value by setting param->xxx to NULL before returning.
@@ -216,7 +282,6 @@ err:
static int vfs_fsconfig_locked(struct fs_context *fc, int cmd,
struct fs_parameter *param)
{
- struct super_block *sb;
int ret;
ret = finish_clean_context(fc);
@@ -224,39 +289,11 @@ static int vfs_fsconfig_locked(struct fs_context *fc, int cmd,
return ret;
switch (cmd) {
case FSCONFIG_CMD_CREATE:
- if (fc->phase != FS_CONTEXT_CREATE_PARAMS)
- return -EBUSY;
- if (!mount_capable(fc))
- return -EPERM;
- fc->phase = FS_CONTEXT_CREATING;
- ret = vfs_get_tree(fc);
- if (ret)
- break;
- sb = fc->root->d_sb;
- ret = security_sb_kern_mount(sb);
- if (unlikely(ret)) {
- fc_drop_locked(fc);
- break;
- }
- up_write(&sb->s_umount);
- fc->phase = FS_CONTEXT_AWAITING_MOUNT;
- return 0;
+ return vfs_cmd_create(fc, false);
+ case FSCONFIG_CMD_CREATE_EXCL:
+ return vfs_cmd_create(fc, true);
case FSCONFIG_CMD_RECONFIGURE:
- if (fc->phase != FS_CONTEXT_RECONF_PARAMS)
- return -EBUSY;
- fc->phase = FS_CONTEXT_RECONFIGURING;
- sb = fc->root->d_sb;
- if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
- ret = -EPERM;
- break;
- }
- down_write(&sb->s_umount);
- ret = reconfigure_super(fc);
- up_write(&sb->s_umount);
- if (ret)
- break;
- vfs_clean_context(fc);
- return 0;
+ return vfs_cmd_reconfigure(fc);
default:
if (fc->phase != FS_CONTEXT_CREATE_PARAMS &&
fc->phase != FS_CONTEXT_RECONF_PARAMS)
@@ -264,8 +301,6 @@ static int vfs_fsconfig_locked(struct fs_context *fc, int cmd,
return vfs_parse_fs_param(fc, param);
}
- fc->phase = FS_CONTEXT_FAILED;
- return ret;
}
/**
@@ -353,6 +388,7 @@ SYSCALL_DEFINE5(fsconfig,
return -EINVAL;
break;
case FSCONFIG_CMD_CREATE:
+ case FSCONFIG_CMD_CREATE_EXCL:
case FSCONFIG_CMD_RECONFIGURE:
if (_key || _value || aux)
return -EINVAL;
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 247ef4f76761..ab62e4624256 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -235,7 +235,7 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
inode->i_mode = mode;
inode->i_uid = fc->user_id;
inode->i_gid = fc->group_id;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
/* setting ->i_op to NULL is not allowed */
if (iop)
inode->i_op = iop;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index f67bef9d83c4..881524b9a55a 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -933,7 +933,7 @@ void fuse_flush_time_update(struct inode *inode)
static void fuse_update_ctime_in_cache(struct inode *inode)
{
if (!IS_NOCMTIME(inode)) {
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty_sync(inode);
fuse_flush_time_update(inode);
}
@@ -1222,7 +1222,7 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
forget_all_cached_acls(inode);
err = fuse_do_getattr(inode, stat, file);
} else if (stat) {
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
stat->mode = fi->orig_i_mode;
stat->ino = fi->orig_ino;
}
@@ -1715,8 +1715,8 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
inarg.mtimensec = inode->i_mtime.tv_nsec;
if (fm->fc->minor >= 23) {
inarg.valid |= FATTR_CTIME;
- inarg.ctime = inode->i_ctime.tv_sec;
- inarg.ctimensec = inode->i_ctime.tv_nsec;
+ inarg.ctime = inode_get_ctime(inode).tv_sec;
+ inarg.ctimensec = inode_get_ctime(inode).tv_nsec;
}
if (ff) {
inarg.valid |= FATTR_FH;
@@ -1857,7 +1857,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
if (attr->ia_valid & ATTR_MTIME)
inode->i_mtime = attr->ia_mtime;
if (attr->ia_valid & ATTR_CTIME)
- inode->i_ctime = attr->ia_ctime;
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
/* FIXME: clear I_DIRTY_SYNC? */
}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index f19d748890f0..549358ffea8b 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -194,8 +194,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
inode->i_mtime.tv_nsec = attr->mtimensec;
}
if (!(cache_mask & STATX_CTIME)) {
- inode->i_ctime.tv_sec = attr->ctime;
- inode->i_ctime.tv_nsec = attr->ctimensec;
+ inode_set_ctime(inode, attr->ctime, attr->ctimensec);
}
if (attr->blksize != 0)
@@ -259,8 +258,8 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
attr->mtimensec = inode->i_mtime.tv_nsec;
}
if (cache_mask & STATX_CTIME) {
- attr->ctime = inode->i_ctime.tv_sec;
- attr->ctimensec = inode->i_ctime.tv_nsec;
+ attr->ctime = inode_get_ctime(inode).tv_sec;
+ attr->ctimensec = inode_get_ctime(inode).tv_nsec;
}
if ((attr_version != 0 && fi->attr_version > attr_version) ||
@@ -318,8 +317,7 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr,
inode->i_size = attr->size;
inode->i_mtime.tv_sec = attr->mtime;
inode->i_mtime.tv_nsec = attr->mtimensec;
- inode->i_ctime.tv_sec = attr->ctime;
- inode->i_ctime.tv_nsec = attr->ctimensec;
+ inode_set_ctime(inode, attr->ctime, attr->ctimensec);
if (S_ISREG(inode->i_mode)) {
fuse_init_common(inode);
fuse_init_file_inode(inode, attr->flags);
@@ -1401,16 +1399,18 @@ EXPORT_SYMBOL_GPL(fuse_dev_free);
static void fuse_fill_attr_from_inode(struct fuse_attr *attr,
const struct fuse_inode *fi)
{
+ struct timespec64 ctime = inode_get_ctime(&fi->inode);
+
*attr = (struct fuse_attr){
.ino = fi->inode.i_ino,
.size = fi->inode.i_size,
.blocks = fi->inode.i_blocks,
.atime = fi->inode.i_atime.tv_sec,
.mtime = fi->inode.i_mtime.tv_sec,
- .ctime = fi->inode.i_ctime.tv_sec,
+ .ctime = ctime.tv_sec,
.atimensec = fi->inode.i_atime.tv_nsec,
.mtimensec = fi->inode.i_mtime.tv_nsec,
- .ctimensec = fi->inode.i_ctime.tv_nsec,
+ .ctimensec = ctime.tv_nsec,
.mode = fi->inode.i_mode,
.nlink = fi->inode.i_nlink,
.uid = fi->inode.i_uid.val,
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index a392aa0f041d..443640e6fb9c 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -142,7 +142,7 @@ int gfs2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
ret = __gfs2_set_acl(inode, acl, type);
if (!ret && mode != inode->i_mode) {
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode->i_mode = mode;
mark_inode_dirty(inode);
}
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index ae49256b7c8c..9c4b26aec580 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -747,7 +747,7 @@ static const struct address_space_operations gfs2_aops = {
.writepages = gfs2_writepages,
.read_folio = gfs2_read_folio,
.readahead = gfs2_readahead,
- .dirty_folio = filemap_dirty_folio,
+ .dirty_folio = iomap_dirty_folio,
.release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
.bmap = gfs2_bmap,
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 8d611fbcf0bd..f62366be7587 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -971,7 +971,7 @@ gfs2_iomap_get_folio(struct iomap_iter *iter, loff_t pos, unsigned len)
if (status)
return ERR_PTR(status);
- folio = iomap_get_folio(iter, pos);
+ folio = iomap_get_folio(iter, pos, len);
if (IS_ERR(folio))
gfs2_trans_end(sdp);
return folio;
@@ -1386,7 +1386,7 @@ static int trunc_start(struct inode *inode, u64 newsize)
ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
i_size_write(inode, newsize);
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
gfs2_dinode_out(ip, dibh->b_data);
if (journaled)
@@ -1583,8 +1583,7 @@ out_unlock:
/* Every transaction boundary, we rewrite the dinode
to keep its di_blocks current in case of failure. */
- ip->i_inode.i_mtime = ip->i_inode.i_ctime =
- current_time(&ip->i_inode);
+ ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
@@ -1950,7 +1949,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
gfs2_statfs_change(sdp, 0, +btotal, 0);
gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
ip->i_inode.i_gid);
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
up_write(&ip->i_rw_mutex);
@@ -1993,7 +1992,7 @@ static int trunc_end(struct gfs2_inode *ip)
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
gfs2_ordered_del_inode(ip);
}
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
gfs2_trans_add_meta(ip->i_gl, dibh);
@@ -2094,7 +2093,7 @@ static int do_grow(struct inode *inode, u64 size)
goto do_end_trans;
truncate_setsize(inode, size);
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 54a6d17b8c25..1a2afa88f8be 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -130,7 +130,7 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
if (ip->i_inode.i_size < offset + size)
i_size_write(&ip->i_inode, offset + size);
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
@@ -227,7 +227,7 @@ out:
if (ip->i_inode.i_size < offset + copied)
i_size_write(&ip->i_inode, offset + copied);
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
@@ -1814,7 +1814,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
gfs2_inum_out(nip, dent);
dent->de_type = cpu_to_be16(IF2DT(nip->i_inode.i_mode));
dent->de_rahead = cpu_to_be16(gfs2_inode_ra_len(nip));
- tv = current_time(&ip->i_inode);
+ tv = inode_set_ctime_current(&ip->i_inode);
if (ip->i_diskflags & GFS2_DIF_EXHASH) {
leaf = (struct gfs2_leaf *)bh->b_data;
be16_add_cpu(&leaf->lf_entries, 1);
@@ -1825,7 +1825,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
da->bh = NULL;
brelse(bh);
ip->i_entries++;
- ip->i_inode.i_mtime = ip->i_inode.i_ctime = tv;
+ ip->i_inode.i_mtime = tv;
if (S_ISDIR(nip->i_inode.i_mode))
inc_nlink(&ip->i_inode);
mark_inode_dirty(inode);
@@ -1876,7 +1876,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
const struct qstr *name = &dentry->d_name;
struct gfs2_dirent *dent, *prev = NULL;
struct buffer_head *bh;
- struct timespec64 tv = current_time(&dip->i_inode);
+ struct timespec64 tv;
/* Returns _either_ the entry (if its first in block) or the
previous entry otherwise */
@@ -1896,6 +1896,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
}
dirent_del(dip, bh, prev, dent);
+ tv = inode_set_ctime_current(&dip->i_inode);
if (dip->i_diskflags & GFS2_DIF_EXHASH) {
struct gfs2_leaf *leaf = (struct gfs2_leaf *)bh->b_data;
u16 entries = be16_to_cpu(leaf->lf_entries);
@@ -1910,7 +1911,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
if (!dip->i_entries)
gfs2_consist_inode(dip);
dip->i_entries--;
- dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv;
+ dip->i_inode.i_mtime = tv;
if (d_is_dir(dentry))
drop_nlink(&dip->i_inode);
mark_inode_dirty(&dip->i_inode);
@@ -1951,7 +1952,7 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
dent->de_type = cpu_to_be16(new_type);
brelse(bh);
- dip->i_inode.i_mtime = dip->i_inode.i_ctime = current_time(&dip->i_inode);
+ dip->i_inode.i_mtime = inode_set_ctime_current(&dip->i_inode);
mark_inode_dirty_sync(&dip->i_inode);
return 0;
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index b43fa8b8fc05..766186c80682 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -260,7 +260,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
goto out_trans_end;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
gfs2_trans_add_meta(ip->i_gl, bh);
ip->i_diskflags = new_flags;
gfs2_dinode_out(ip, bh->b_data);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 54319328b16b..aecdac3cfbe1 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -437,8 +437,8 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
inode->i_atime = atime;
inode->i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
inode->i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
- inode->i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
- inode->i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
+ inode_set_ctime(inode, be64_to_cpu(str->di_ctime),
+ be32_to_cpu(str->di_ctime_nsec));
ip->i_goal = be64_to_cpu(str->di_goal_meta);
ip->i_generation = be64_to_cpu(str->di_generation);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 17c994a0c0d0..a21ac41d6669 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -690,7 +690,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
set_nlink(inode, S_ISDIR(mode) ? 2 : 1);
inode->i_rdev = dev;
inode->i_size = size;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
munge_mode_uid_gid(dip, inode);
check_and_update_goal(dip);
ip->i_goal = dip->i_goal;
@@ -1029,7 +1029,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
gfs2_trans_add_meta(ip->i_gl, dibh);
inc_nlink(&ip->i_inode);
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_ctime_current(&ip->i_inode);
ihold(inode);
d_instantiate(dentry, inode);
mark_inode_dirty(inode);
@@ -1114,7 +1114,7 @@ static int gfs2_unlink_inode(struct gfs2_inode *dip,
return error;
ip->i_entries = 0;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
if (S_ISDIR(inode->i_mode))
clear_nlink(inode);
else
@@ -1371,7 +1371,7 @@ static int update_moved_ino(struct gfs2_inode *ip, struct gfs2_inode *ndip,
if (dir_rename)
return gfs2_dir_mvino(ip, &gfs2_qdotdot, ndip, DT_DIR);
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_ctime_current(&ip->i_inode);
mark_inode_dirty_sync(&ip->i_inode);
return 0;
}
@@ -2071,7 +2071,7 @@ static int gfs2_getattr(struct mnt_idmap *idmap,
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (gfs2_holder_initialized(&gh))
gfs2_glock_dq_uninit(&gh);
@@ -2139,8 +2139,7 @@ loff_t gfs2_seek_hole(struct file *file, loff_t offset)
return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
}
-static int gfs2_update_time(struct inode *inode, struct timespec64 *time,
- int flags)
+static int gfs2_update_time(struct inode *inode, int flags)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_glock *gl = ip->i_gl;
@@ -2155,7 +2154,8 @@ static int gfs2_update_time(struct inode *inode, struct timespec64 *time,
if (error)
return error;
}
- return generic_update_time(inode, time, flags);
+ generic_update_time(inode, flags);
+ return 0;
}
static const struct inode_operations gfs2_file_iops = {
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 704192b73605..aa5fd06d47bc 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -871,7 +871,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
size = loc + sizeof(struct gfs2_quota);
if (size > inode->i_size)
i_size_write(inode, size);
- inode->i_mtime = inode->i_atime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
set_bit(QDF_REFRESH, &qd->qd_flags);
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 9f4d5d6549ee..2f701335e8ee 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -412,7 +412,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
str->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
str->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
- str->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec);
+ str->di_ctime = cpu_to_be64(inode_get_ctime(inode).tv_sec);
str->di_goal_meta = cpu_to_be64(ip->i_goal);
str->di_goal_data = cpu_to_be64(ip->i_goal);
@@ -429,7 +429,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
str->di_eattr = cpu_to_be64(ip->i_eattr);
str->di_atime_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
str->di_mtime_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
- str->di_ctime_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
+ str->di_ctime_nsec = cpu_to_be32(inode_get_ctime(inode).tv_nsec);
}
/**
@@ -689,7 +689,7 @@ static int gfs2_freeze_locally(struct gfs2_sbd *sdp)
struct super_block *sb = sdp->sd_vfs;
int error;
- error = freeze_super(sb);
+ error = freeze_super(sb, FREEZE_HOLDER_USERSPACE);
if (error)
return error;
@@ -697,7 +697,9 @@ static int gfs2_freeze_locally(struct gfs2_sbd *sdp)
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
GFS2_LFC_FREEZE_GO_SYNC);
if (gfs2_withdrawn(sdp)) {
- thaw_super(sb);
+ error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
+ if (error)
+ return error;
return -EIO;
}
}
@@ -712,7 +714,7 @@ static int gfs2_do_thaw(struct gfs2_sbd *sdp)
error = gfs2_freeze_lock_shared(sdp);
if (error)
goto fail;
- error = thaw_super(sb);
+ error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
if (!error)
return 0;
@@ -761,7 +763,7 @@ out:
*
*/
-static int gfs2_freeze_super(struct super_block *sb)
+static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
int error;
@@ -816,7 +818,7 @@ out:
*
*/
-static int gfs2_thaw_super(struct super_block *sb)
+static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
int error;
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 2dfbe2f188dd..c60bc7f628e1 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -168,10 +168,10 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
switch (n) {
case 0:
- error = thaw_super(sdp->sd_vfs);
+ error = thaw_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE);
break;
case 1:
- error = freeze_super(sdp->sd_vfs);
+ error = freeze_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE);
break;
default:
return -EINVAL;
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 93b36d026bb4..4fea70c0fe3d 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -311,7 +311,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
ea->ea_num_ptrs = 0;
}
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_ctime_current(&ip->i_inode);
__mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
gfs2_trans_end(sdp);
@@ -763,7 +763,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
if (error)
goto out_end_trans;
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_ctime_current(&ip->i_inode);
__mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
out_end_trans:
@@ -888,7 +888,7 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
if (es->es_el)
ea_set_remove_stuffed(ip, es->es_el);
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_ctime_current(&ip->i_inode);
__mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
gfs2_trans_end(GFS2_SB(&ip->i_inode));
@@ -1106,7 +1106,7 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
ea->ea_type = GFS2_EATYPE_UNUSED;
}
- ip->i_inode.i_ctime = current_time(&ip->i_inode);
+ inode_set_ctime_current(&ip->i_inode);
__mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
gfs2_trans_end(GFS2_SB(&ip->i_inode));
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index d365bf0b8c77..632c226a3972 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -133,7 +133,7 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i
goto err1;
dir->i_size++;
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
hfs_find_exit(&fd);
return 0;
@@ -269,7 +269,7 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, const struct qstr *str)
}
dir->i_size--;
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
res = 0;
out:
@@ -337,7 +337,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
if (err)
goto out;
dst_dir->i_size++;
- dst_dir->i_mtime = dst_dir->i_ctime = current_time(dst_dir);
+ dst_dir->i_mtime = inode_set_ctime_current(dst_dir);
mark_inode_dirty(dst_dir);
/* finally remove the old entry */
@@ -349,7 +349,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
if (err)
goto out;
src_dir->i_size--;
- src_dir->i_mtime = src_dir->i_ctime = current_time(src_dir);
+ src_dir->i_mtime = inode_set_ctime_current(src_dir);
mark_inode_dirty(src_dir);
type = entry.type;
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 3e1e3dcf0b48..b75c26045df4 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -263,7 +263,7 @@ static int hfs_remove(struct inode *dir, struct dentry *dentry)
if (res)
return res;
clear_nlink(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
hfs_delete_inode(inode);
mark_inode_dirty(inode);
return 0;
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 441d7fc952e3..ee349b72cfb3 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -200,7 +200,7 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
set_nlink(inode, 1);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
HFS_I(inode)->flags = 0;
HFS_I(inode)->rsrc_inode = NULL;
HFS_I(inode)->fs_blocks = 0;
@@ -355,8 +355,8 @@ static int hfs_read_inode(struct inode *inode, void *data)
inode->i_mode |= S_IWUGO;
inode->i_mode &= ~hsb->s_file_umask;
inode->i_mode |= S_IFREG;
- inode->i_ctime = inode->i_atime = inode->i_mtime =
- hfs_m_to_utime(rec->file.MdDat);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_to_ts(inode,
+ hfs_m_to_utime(rec->file.MdDat));
inode->i_op = &hfs_file_inode_operations;
inode->i_fop = &hfs_file_operations;
inode->i_mapping->a_ops = &hfs_aops;
@@ -366,8 +366,8 @@ static int hfs_read_inode(struct inode *inode, void *data)
inode->i_size = be16_to_cpu(rec->dir.Val) + 2;
HFS_I(inode)->fs_blocks = 0;
inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
- inode->i_ctime = inode->i_atime = inode->i_mtime =
- hfs_m_to_utime(rec->dir.MdDat);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_to_ts(inode,
+ hfs_m_to_utime(rec->dir.MdDat));
inode->i_op = &hfs_dir_inode_operations;
inode->i_fop = &hfs_dir_operations;
break;
@@ -654,8 +654,7 @@ int hfs_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
truncate_setsize(inode, attr->ia_size);
hfs_file_truncate(inode);
- inode->i_atime = inode->i_mtime = inode->i_ctime =
- current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
}
setattr_copy(&nop_mnt_idmap, inode, attr);
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 2875961fdc10..dc27d418fbcd 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -28,7 +28,9 @@ static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags)
/* fix up inode on a timezone change */
diff = sys_tz.tz_minuteswest * 60 - HFS_I(inode)->tz_secondswest;
if (diff) {
- inode->i_ctime.tv_sec += diff;
+ struct timespec64 ctime = inode_get_ctime(inode);
+
+ inode_set_ctime(inode, ctime.tv_sec + diff, ctime.tv_nsec);
inode->i_atime.tv_sec += diff;
inode->i_mtime.tv_sec += diff;
HFS_I(inode)->tz_secondswest += diff;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 35472cba750e..e71ae2537eaa 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -312,7 +312,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
dir->i_size++;
if (S_ISDIR(inode->i_mode))
hfsplus_subfolders_inc(dir);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
hfs_find_exit(&fd);
@@ -417,7 +417,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
dir->i_size--;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_dec(dir);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
if (type == HFSPLUS_FILE || type == HFSPLUS_FOLDER) {
@@ -494,7 +494,7 @@ int hfsplus_rename_cat(u32 cnid,
dst_dir->i_size++;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_inc(dst_dir);
- dst_dir->i_mtime = dst_dir->i_ctime = current_time(dst_dir);
+ dst_dir->i_mtime = inode_set_ctime_current(dst_dir);
/* finally remove the old entry */
err = hfsplus_cat_build_key(sb, src_fd.search_key,
@@ -511,7 +511,7 @@ int hfsplus_rename_cat(u32 cnid,
src_dir->i_size--;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_dec(src_dir);
- src_dir->i_mtime = src_dir->i_ctime = current_time(src_dir);
+ src_dir->i_mtime = inode_set_ctime_current(src_dir);
/* remove old thread entry */
hfsplus_cat_build_key_with_cnid(sb, src_fd.search_key, cnid);
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 56fb5f1312e7..f5c4b3e31a1c 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -346,7 +346,7 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
inc_nlink(inode);
hfsplus_instantiate(dst_dentry, inode, cnid);
ihold(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
sbi->file_count++;
hfsplus_mark_mdb_dirty(dst_dir->i_sb);
@@ -405,7 +405,7 @@ static int hfsplus_unlink(struct inode *dir, struct dentry *dentry)
hfsplus_delete_inode(inode);
} else
sbi->file_count--;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
out:
mutex_unlock(&sbi->vh_mutex);
@@ -426,7 +426,7 @@ static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry)
if (res)
goto out;
clear_nlink(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
hfsplus_delete_inode(inode);
mark_inode_dirty(inode);
out:
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 7d1a675e037d..c65c8c4b03dd 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -267,7 +267,7 @@ static int hfsplus_setattr(struct mnt_idmap *idmap,
}
truncate_setsize(inode, attr->ia_size);
hfsplus_file_truncate(inode);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
}
setattr_copy(&nop_mnt_idmap, inode, attr);
@@ -298,7 +298,7 @@ int hfsplus_getattr(struct mnt_idmap *idmap, const struct path *path,
stat->attributes_mask |= STATX_ATTR_APPEND | STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP;
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
return 0;
}
@@ -392,7 +392,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
inode->i_ino = sbi->next_cnid++;
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
set_nlink(inode, 1);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
hip = HFSPLUS_I(inode);
INIT_LIST_HEAD(&hip->open_dir_list);
@@ -523,7 +523,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
inode->i_size = 2 + be32_to_cpu(folder->valence);
inode->i_atime = hfsp_mt2ut(folder->access_date);
inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
- inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
+ inode_set_ctime_to_ts(inode,
+ hfsp_mt2ut(folder->attribute_mod_date));
HFSPLUS_I(inode)->create_date = folder->create_date;
HFSPLUS_I(inode)->fs_blocks = 0;
if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
@@ -564,7 +565,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
}
inode->i_atime = hfsp_mt2ut(file->access_date);
inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
- inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
+ inode_set_ctime_to_ts(inode,
+ hfsp_mt2ut(file->attribute_mod_date));
HFSPLUS_I(inode)->create_date = file->create_date;
} else {
pr_err("bad catalog entry used to create inode\n");
@@ -609,7 +611,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
hfsplus_cat_set_perms(inode, &folder->permissions);
folder->access_date = hfsp_ut2mt(inode->i_atime);
folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
- folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
+ folder->attribute_mod_date = hfsp_ut2mt(inode_get_ctime(inode));
folder->valence = cpu_to_be32(inode->i_size - 2);
if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
folder->subfolders =
@@ -644,7 +646,7 @@ int hfsplus_cat_write_inode(struct inode *inode)
file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
file->access_date = hfsp_ut2mt(inode->i_atime);
file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
- file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
+ file->attribute_mod_date = hfsp_ut2mt(inode_get_ctime(inode));
hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
}
@@ -700,7 +702,7 @@ int hfsplus_fileattr_set(struct mnt_idmap *idmap,
else
hip->userflags &= ~HFSPLUS_FLG_NODUMP;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return 0;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 46387090eb76..dc5a5cea5fae 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -517,8 +517,7 @@ static int hostfs_inode_update(struct inode *ino, const struct hostfs_stat *st)
(struct timespec64){ st->atime.tv_sec, st->atime.tv_nsec };
ino->i_mtime =
(struct timespec64){ st->mtime.tv_sec, st->mtime.tv_nsec };
- ino->i_ctime =
- (struct timespec64){ st->ctime.tv_sec, st->ctime.tv_nsec };
+ inode_set_ctime(ino, st->ctime.tv_sec, st->ctime.tv_nsec);
ino->i_size = st->size;
ino->i_blocks = st->blocks;
return 0;
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index f32f15669996..f36566d61215 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -277,10 +277,10 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
* inode.
*/
- if (!result->i_ctime.tv_sec) {
- if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date))))
- result->i_ctime.tv_sec = 1;
- result->i_ctime.tv_nsec = 0;
+ if (!inode_get_ctime(result).tv_sec) {
+ time64_t csec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date));
+
+ inode_set_ctime(result, csec ? csec : 1, 0);
result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date));
result->i_mtime.tv_nsec = 0;
result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date));
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index e50e92a42432..479166378bae 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -36,7 +36,7 @@ void hpfs_init_inode(struct inode *i)
hpfs_inode->i_rddir_off = NULL;
hpfs_inode->i_dirty = 0;
- i->i_ctime.tv_sec = i->i_ctime.tv_nsec = 0;
+ inode_set_ctime(i, 0, 0);
i->i_mtime.tv_sec = i->i_mtime.tv_nsec = 0;
i->i_atime.tv_sec = i->i_atime.tv_nsec = 0;
}
@@ -232,7 +232,7 @@ void hpfs_write_inode_nolock(struct inode *i)
if (de) {
de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
- de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec));
+ de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime(i).tv_sec));
de->read_only = !(i->i_mode & 0222);
de->ea_size = cpu_to_le32(hpfs_inode->i_ea_size);
hpfs_mark_4buffers_dirty(&qbh);
@@ -242,7 +242,7 @@ void hpfs_write_inode_nolock(struct inode *i)
if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) {
de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
- de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec));
+ de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime(i).tv_sec));
de->read_only = !(i->i_mode & 0222);
de->ea_size = cpu_to_le32(/*hpfs_inode->i_ea_size*/0);
de->file_size = cpu_to_le32(0);
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 69fb40b2c99a..f4eb8d6f5989 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -13,10 +13,9 @@ static void hpfs_update_directory_times(struct inode *dir)
{
time64_t t = local_to_gmt(dir->i_sb, local_get_seconds(dir->i_sb));
if (t == dir->i_mtime.tv_sec &&
- t == dir->i_ctime.tv_sec)
+ t == inode_get_ctime(dir).tv_sec)
return;
- dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t;
- dir->i_mtime.tv_nsec = dir->i_ctime.tv_nsec = 0;
+ dir->i_mtime = inode_set_ctime(dir, t, 0);
hpfs_write_inode_nolock(dir);
}
@@ -59,10 +58,8 @@ static int hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
hpfs_i(result)->i_dno = dno;
- result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
- result->i_ctime.tv_nsec = 0;
- result->i_mtime.tv_nsec = 0;
- result->i_atime.tv_nsec = 0;
+ result->i_mtime = result->i_atime =
+ inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0);
hpfs_i(result)->i_ea_size = 0;
result->i_mode |= S_IFDIR;
result->i_op = &hpfs_dir_iops;
@@ -167,10 +164,8 @@ static int hpfs_create(struct mnt_idmap *idmap, struct inode *dir,
result->i_fop = &hpfs_file_ops;
set_nlink(result, 1);
hpfs_i(result)->i_parent_dir = dir->i_ino;
- result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
- result->i_ctime.tv_nsec = 0;
- result->i_mtime.tv_nsec = 0;
- result->i_atime.tv_nsec = 0;
+ result->i_mtime = result->i_atime =
+ inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0);
hpfs_i(result)->i_ea_size = 0;
if (dee.read_only)
result->i_mode &= ~0222;
@@ -250,10 +245,8 @@ static int hpfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
hpfs_init_inode(result);
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
- result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
- result->i_ctime.tv_nsec = 0;
- result->i_mtime.tv_nsec = 0;
- result->i_atime.tv_nsec = 0;
+ result->i_mtime = result->i_atime =
+ inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0);
hpfs_i(result)->i_ea_size = 0;
result->i_uid = current_fsuid();
result->i_gid = current_fsgid();
@@ -326,10 +319,8 @@ static int hpfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
result->i_ino = fno;
hpfs_init_inode(result);
hpfs_i(result)->i_parent_dir = dir->i_ino;
- result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
- result->i_ctime.tv_nsec = 0;
- result->i_mtime.tv_nsec = 0;
- result->i_atime.tv_nsec = 0;
+ result->i_mtime = result->i_atime =
+ inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0);
hpfs_i(result)->i_ea_size = 0;
result->i_mode = S_IFLNK | 0777;
result->i_uid = current_fsuid();
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 1cb89595b875..758a51564124 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -729,8 +729,9 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
root->i_atime.tv_nsec = 0;
root->i_mtime.tv_sec = local_to_gmt(s, le32_to_cpu(de->write_date));
root->i_mtime.tv_nsec = 0;
- root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date));
- root->i_ctime.tv_nsec = 0;
+ inode_set_ctime(root,
+ local_to_gmt(s, le32_to_cpu(de->creation_date)),
+ 0);
hpfs_i(root)->i_ea_size = le32_to_cpu(de->ea_size);
hpfs_i(root)->i_parent_dir = root->i_ino;
if (root->i_size == -1)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 7b17ccfa039d..93d3bcfd4fc8 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -887,7 +887,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
i_size_write(inode, offset + len);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
out:
inode_unlock(inode);
return error;
@@ -935,7 +935,7 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
inode->i_mode = S_IFDIR | ctx->mode;
inode->i_uid = ctx->uid;
inode->i_gid = ctx->gid;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_op = &hugetlbfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
@@ -979,7 +979,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
&hugetlbfs_i_mmap_rwsem_key);
inode->i_mapping->a_ops = &hugetlbfs_aops;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_mapping->private_data = resv_map;
info->seals = F_SEAL_SEAL;
switch (mode & S_IFMT) {
@@ -1022,7 +1022,7 @@ static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
if (!inode)
return -ENOSPC;
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
d_instantiate(dentry, inode);
dget(dentry);/* Extra count - pin the dentry in core */
return 0;
@@ -1054,7 +1054,7 @@ static int hugetlbfs_tmpfile(struct mnt_idmap *idmap,
inode = hugetlbfs_get_inode(dir->i_sb, dir, mode | S_IFREG, 0);
if (!inode)
return -ENOSPC;
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
d_tmpfile(file, inode);
return finish_open_simple(file, 0);
}
@@ -1076,7 +1076,7 @@ static int hugetlbfs_symlink(struct mnt_idmap *idmap,
} else
iput(inode);
}
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
return error;
}
diff --git a/fs/inode.c b/fs/inode.c
index 67611a360031..35fd688168c5 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -751,16 +751,11 @@ EXPORT_SYMBOL_GPL(evict_inodes);
/**
* invalidate_inodes - attempt to free all inodes on a superblock
* @sb: superblock to operate on
- * @kill_dirty: flag to guide handling of dirty inodes
*
- * Attempts to free all inodes for a given superblock. If there were any
- * busy inodes return a non-zero value, else zero.
- * If @kill_dirty is set, discard dirty inodes too, otherwise treat
- * them as busy.
+ * Attempts to free all inodes (including dirty inodes) for a given superblock.
*/
-int invalidate_inodes(struct super_block *sb, bool kill_dirty)
+void invalidate_inodes(struct super_block *sb)
{
- int busy = 0;
struct inode *inode, *next;
LIST_HEAD(dispose);
@@ -772,14 +767,8 @@ again:
spin_unlock(&inode->i_lock);
continue;
}
- if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
- spin_unlock(&inode->i_lock);
- busy = 1;
- continue;
- }
if (atomic_read(&inode->i_count)) {
spin_unlock(&inode->i_lock);
- busy = 1;
continue;
}
@@ -797,8 +786,6 @@ again:
spin_unlock(&sb->s_inode_list_lock);
dispose_list(&dispose);
-
- return busy;
}
/*
@@ -1850,6 +1837,7 @@ EXPORT_SYMBOL(bmap);
static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
struct timespec64 now)
{
+ struct timespec64 ctime;
if (!(mnt->mnt_flags & MNT_RELATIME))
return 1;
@@ -1861,7 +1849,8 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
/*
* Is ctime younger than or equal to atime? If yes, update atime:
*/
- if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0)
+ ctime = inode_get_ctime(inode);
+ if (timespec64_compare(&ctime, &inode->i_atime) >= 0)
return 1;
/*
@@ -1876,29 +1865,76 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
return 0;
}
-int generic_update_time(struct inode *inode, struct timespec64 *time, int flags)
+/**
+ * inode_update_timestamps - update the timestamps on the inode
+ * @inode: inode to be updated
+ * @flags: S_* flags that needed to be updated
+ *
+ * The update_time function is called when an inode's timestamps need to be
+ * updated for a read or write operation. This function handles updating the
+ * actual timestamps. It's up to the caller to ensure that the inode is marked
+ * dirty appropriately.
+ *
+ * In the case where any of S_MTIME, S_CTIME, or S_VERSION need to be updated,
+ * attempt to update all three of them. S_ATIME updates can be handled
+ * independently of the rest.
+ *
+ * Returns a set of S_* flags indicating which values changed.
+ */
+int inode_update_timestamps(struct inode *inode, int flags)
{
- int dirty_flags = 0;
+ int updated = 0;
+ struct timespec64 now;
+
+ if (flags & (S_MTIME|S_CTIME|S_VERSION)) {
+ struct timespec64 ctime = inode_get_ctime(inode);
- if (flags & (S_ATIME | S_CTIME | S_MTIME)) {
- if (flags & S_ATIME)
- inode->i_atime = *time;
- if (flags & S_CTIME)
- inode->i_ctime = *time;
- if (flags & S_MTIME)
- inode->i_mtime = *time;
-
- if (inode->i_sb->s_flags & SB_LAZYTIME)
- dirty_flags |= I_DIRTY_TIME;
- else
- dirty_flags |= I_DIRTY_SYNC;
+ now = inode_set_ctime_current(inode);
+ if (!timespec64_equal(&now, &ctime))
+ updated |= S_CTIME;
+ if (!timespec64_equal(&now, &inode->i_mtime)) {
+ inode->i_mtime = now;
+ updated |= S_MTIME;
+ }
+ if (IS_I_VERSION(inode) && inode_maybe_inc_iversion(inode, updated))
+ updated |= S_VERSION;
+ } else {
+ now = current_time(inode);
}
- if ((flags & S_VERSION) && inode_maybe_inc_iversion(inode, false))
- dirty_flags |= I_DIRTY_SYNC;
+ if (flags & S_ATIME) {
+ if (!timespec64_equal(&now, &inode->i_atime)) {
+ inode->i_atime = now;
+ updated |= S_ATIME;
+ }
+ }
+ return updated;
+}
+EXPORT_SYMBOL(inode_update_timestamps);
+/**
+ * generic_update_time - update the timestamps on the inode
+ * @inode: inode to be updated
+ * @flags: S_* flags that needed to be updated
+ *
+ * The update_time function is called when an inode's timestamps need to be
+ * updated for a read or write operation. In the case where any of S_MTIME, S_CTIME,
+ * or S_VERSION need to be updated we attempt to update all three of them. S_ATIME
+ * updates can be handled done independently of the rest.
+ *
+ * Returns a S_* mask indicating which fields were updated.
+ */
+int generic_update_time(struct inode *inode, int flags)
+{
+ int updated = inode_update_timestamps(inode, flags);
+ int dirty_flags = 0;
+
+ if (updated & (S_ATIME|S_MTIME|S_CTIME))
+ dirty_flags = inode->i_sb->s_flags & SB_LAZYTIME ? I_DIRTY_TIME : I_DIRTY_SYNC;
+ if (updated & S_VERSION)
+ dirty_flags |= I_DIRTY_SYNC;
__mark_inode_dirty(inode, dirty_flags);
- return 0;
+ return updated;
}
EXPORT_SYMBOL(generic_update_time);
@@ -1906,11 +1942,12 @@ EXPORT_SYMBOL(generic_update_time);
* This does the actual work of updating an inodes time or version. Must have
* had called mnt_want_write() before calling this.
*/
-int inode_update_time(struct inode *inode, struct timespec64 *time, int flags)
+int inode_update_time(struct inode *inode, int flags)
{
if (inode->i_op->update_time)
- return inode->i_op->update_time(inode, time, flags);
- return generic_update_time(inode, time, flags);
+ return inode->i_op->update_time(inode, flags);
+ generic_update_time(inode, flags);
+ return 0;
}
EXPORT_SYMBOL(inode_update_time);
@@ -1962,7 +1999,6 @@ void touch_atime(const struct path *path)
{
struct vfsmount *mnt = path->mnt;
struct inode *inode = d_inode(path->dentry);
- struct timespec64 now;
if (!atime_needs_update(path, inode))
return;
@@ -1981,8 +2017,7 @@ void touch_atime(const struct path *path)
* We may also fail on filesystems that have the ability to make parts
* of the fs read only, e.g. subvolumes in Btrfs.
*/
- now = current_time(inode);
- inode_update_time(inode, &now, S_ATIME);
+ inode_update_time(inode, S_ATIME);
__mnt_drop_write(mnt);
skip_update:
sb_end_write(inode->i_sb);
@@ -2067,18 +2102,63 @@ int file_remove_privs(struct file *file)
}
EXPORT_SYMBOL(file_remove_privs);
-static int inode_needs_update_time(struct inode *inode, struct timespec64 *now)
+/**
+ * current_mgtime - Return FS time (possibly fine-grained)
+ * @inode: inode.
+ *
+ * Return the current time truncated to the time granularity supported by
+ * the fs, as suitable for a ctime/mtime change. If the ctime is flagged
+ * as having been QUERIED, get a fine-grained timestamp.
+ */
+struct timespec64 current_mgtime(struct inode *inode)
+{
+ struct timespec64 now, ctime;
+ atomic_long_t *pnsec = (atomic_long_t *)&inode->__i_ctime.tv_nsec;
+ long nsec = atomic_long_read(pnsec);
+
+ if (nsec & I_CTIME_QUERIED) {
+ ktime_get_real_ts64(&now);
+ return timestamp_truncate(now, inode);
+ }
+
+ ktime_get_coarse_real_ts64(&now);
+ now = timestamp_truncate(now, inode);
+
+ /*
+ * If we've recently fetched a fine-grained timestamp
+ * then the coarse-grained one may still be earlier than the
+ * existing ctime. Just keep the existing value if so.
+ */
+ ctime = inode_get_ctime(inode);
+ if (timespec64_compare(&ctime, &now) > 0)
+ now = ctime;
+
+ return now;
+}
+EXPORT_SYMBOL(current_mgtime);
+
+static struct timespec64 current_ctime(struct inode *inode)
+{
+ if (is_mgtime(inode))
+ return current_mgtime(inode);
+ return current_time(inode);
+}
+
+static int inode_needs_update_time(struct inode *inode)
{
int sync_it = 0;
+ struct timespec64 now = current_ctime(inode);
+ struct timespec64 ctime;
/* First try to exhaust all avenues to not sync */
if (IS_NOCMTIME(inode))
return 0;
- if (!timespec64_equal(&inode->i_mtime, now))
+ if (!timespec64_equal(&inode->i_mtime, &now))
sync_it = S_MTIME;
- if (!timespec64_equal(&inode->i_ctime, now))
+ ctime = inode_get_ctime(inode);
+ if (!timespec64_equal(&ctime, &now))
sync_it |= S_CTIME;
if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
@@ -2087,15 +2167,14 @@ static int inode_needs_update_time(struct inode *inode, struct timespec64 *now)
return sync_it;
}
-static int __file_update_time(struct file *file, struct timespec64 *now,
- int sync_mode)
+static int __file_update_time(struct file *file, int sync_mode)
{
int ret = 0;
struct inode *inode = file_inode(file);
/* try to update time settings */
if (!__mnt_want_write_file(file)) {
- ret = inode_update_time(inode, now, sync_mode);
+ ret = inode_update_time(inode, sync_mode);
__mnt_drop_write_file(file);
}
@@ -2120,13 +2199,12 @@ int file_update_time(struct file *file)
{
int ret;
struct inode *inode = file_inode(file);
- struct timespec64 now = current_time(inode);
- ret = inode_needs_update_time(inode, &now);
+ ret = inode_needs_update_time(inode);
if (ret <= 0)
return ret;
- return __file_update_time(file, &now, ret);
+ return __file_update_time(file, ret);
}
EXPORT_SYMBOL(file_update_time);
@@ -2149,7 +2227,6 @@ static int file_modified_flags(struct file *file, int flags)
{
int ret;
struct inode *inode = file_inode(file);
- struct timespec64 now = current_time(inode);
/*
* Clear the security bits if the process is not being run by root.
@@ -2162,13 +2239,13 @@ static int file_modified_flags(struct file *file, int flags)
if (unlikely(file->f_mode & FMODE_NOCMTIME))
return 0;
- ret = inode_needs_update_time(inode, &now);
+ ret = inode_needs_update_time(inode);
if (ret <= 0)
return ret;
if (flags & IOCB_NOWAIT)
return -EAGAIN;
- return __file_update_time(file, &now, ret);
+ return __file_update_time(file, ret);
}
/**
@@ -2488,15 +2565,59 @@ struct timespec64 current_time(struct inode *inode)
struct timespec64 now;
ktime_get_coarse_real_ts64(&now);
+ return timestamp_truncate(now, inode);
+}
+EXPORT_SYMBOL(current_time);
- if (unlikely(!inode->i_sb)) {
- WARN(1, "current_time() called with uninitialized super_block in the inode");
+/**
+ * inode_set_ctime_current - set the ctime to current_time
+ * @inode: inode
+ *
+ * Set the inode->i_ctime to the current value for the inode. Returns
+ * the current value that was assigned to i_ctime.
+ */
+struct timespec64 inode_set_ctime_current(struct inode *inode)
+{
+ struct timespec64 now;
+ struct timespec64 ctime;
+
+ ctime.tv_nsec = READ_ONCE(inode->__i_ctime.tv_nsec);
+ if (!(ctime.tv_nsec & I_CTIME_QUERIED)) {
+ now = current_time(inode);
+
+ /* Just copy it into place if it's not multigrain */
+ if (!is_mgtime(inode)) {
+ inode_set_ctime_to_ts(inode, now);
+ return now;
+ }
+
+ /*
+ * If we've recently updated with a fine-grained timestamp,
+ * then the coarse-grained one may still be earlier than the
+ * existing ctime. Just keep the existing value if so.
+ */
+ ctime.tv_sec = inode->__i_ctime.tv_sec;
+ if (timespec64_compare(&ctime, &now) > 0)
+ return ctime;
+
+ /*
+ * Ctime updates are usually protected by the inode_lock, but
+ * we can still race with someone setting the QUERIED flag.
+ * Try to swap the new nsec value into place. If it's changed
+ * in the interim, then just go with a fine-grained timestamp.
+ */
+ if (cmpxchg(&inode->__i_ctime.tv_nsec, ctime.tv_nsec,
+ now.tv_nsec) != ctime.tv_nsec)
+ goto fine_grained;
+ inode->__i_ctime.tv_sec = now.tv_sec;
return now;
}
-
- return timestamp_truncate(now, inode);
+fine_grained:
+ ktime_get_real_ts64(&now);
+ inode_set_ctime_to_ts(inode, timestamp_truncate(now, inode));
+ return now;
}
-EXPORT_SYMBOL(current_time);
+EXPORT_SYMBOL(inode_set_ctime_current);
/**
* in_group_or_capable - check whether caller is CAP_FSETID privileged
diff --git a/fs/internal.h b/fs/internal.h
index f7a3dc111026..74d3b161dd2c 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -115,7 +115,7 @@ static inline void put_file_access(struct file *file)
* super.c
*/
extern int reconfigure_super(struct fs_context *);
-extern bool trylock_super(struct super_block *sb);
+extern bool super_trylock_shared(struct super_block *sb);
struct super_block *user_get_super(dev_t, bool excl);
void put_super(struct super_block *sb);
extern bool mount_capable(struct fs_context *);
@@ -201,7 +201,7 @@ void lock_two_inodes(struct inode *inode1, struct inode *inode2,
* fs-writeback.c
*/
extern long get_nr_dirty_inodes(void);
-extern int invalidate_inodes(struct super_block *, bool);
+void invalidate_inodes(struct super_block *sb);
/*
* dcache.c
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 5b2481cd4750..f5fd99d6b0d4 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -109,9 +109,6 @@ static int ioctl_fibmap(struct file *filp, int __user *p)
* Returns 0 on success, -errno on error, 1 if this was the last
* extent that will fit in user array.
*/
-#define SET_UNKNOWN_FLAGS (FIEMAP_EXTENT_DELALLOC)
-#define SET_NO_UNMOUNTED_IO_FLAGS (FIEMAP_EXTENT_DATA_ENCRYPTED)
-#define SET_NOT_ALIGNED_FLAGS (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
u64 phys, u64 len, u32 flags)
{
@@ -127,6 +124,10 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
return 1;
+#define SET_UNKNOWN_FLAGS (FIEMAP_EXTENT_DELALLOC)
+#define SET_NO_UNMOUNTED_IO_FLAGS (FIEMAP_EXTENT_DATA_ENCRYPTED)
+#define SET_NOT_ALIGNED_FLAGS (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
+
if (flags & SET_UNKNOWN_FLAGS)
flags |= FIEMAP_EXTENT_UNKNOWN;
if (flags & SET_NO_UNMOUNTED_IO_FLAGS)
@@ -396,8 +397,8 @@ static int ioctl_fsfreeze(struct file *filp)
/* Freeze */
if (sb->s_op->freeze_super)
- return sb->s_op->freeze_super(sb);
- return freeze_super(sb);
+ return sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE);
+ return freeze_super(sb, FREEZE_HOLDER_USERSPACE);
}
static int ioctl_fsthaw(struct file *filp)
@@ -409,8 +410,8 @@ static int ioctl_fsthaw(struct file *filp)
/* Thaw */
if (sb->s_op->thaw_super)
- return sb->s_op->thaw_super(sb);
- return thaw_super(sb);
+ return sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE);
+ return thaw_super(sb, FREEZE_HOLDER_USERSPACE);
}
static int ioctl_file_dedupe_range(struct file *file,
@@ -877,6 +878,9 @@ out:
#ifdef CONFIG_COMPAT
/**
* compat_ptr_ioctl - generic implementation of .compat_ioctl file operation
+ * @file: The file to operate on.
+ * @cmd: The ioctl command number.
+ * @arg: The argument to the ioctl.
*
* This is not normally called as a function, but instead set in struct
* file_operations as
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index aa8967cca1a3..283fb96f6609 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -23,65 +23,169 @@
#define IOEND_BATCH_SIZE 4096
+typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length);
/*
- * Structure allocated for each folio when block size < folio size
- * to track sub-folio uptodate status and I/O completions.
+ * Structure allocated for each folio to track per-block uptodate, dirty state
+ * and I/O completions.
*/
-struct iomap_page {
+struct iomap_folio_state {
atomic_t read_bytes_pending;
atomic_t write_bytes_pending;
- spinlock_t uptodate_lock;
- unsigned long uptodate[];
+ spinlock_t state_lock;
+
+ /*
+ * Each block has two bits in this bitmap:
+ * Bits [0..blocks_per_folio) has the uptodate status.
+ * Bits [b_p_f...(2*b_p_f)) has the dirty status.
+ */
+ unsigned long state[];
};
-static inline struct iomap_page *to_iomap_page(struct folio *folio)
+static struct bio_set iomap_ioend_bioset;
+
+static inline bool ifs_is_fully_uptodate(struct folio *folio,
+ struct iomap_folio_state *ifs)
{
- if (folio_test_private(folio))
- return folio_get_private(folio);
- return NULL;
+ struct inode *inode = folio->mapping->host;
+
+ return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
}
-static struct bio_set iomap_ioend_bioset;
+static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
+ unsigned int block)
+{
+ return test_bit(block, ifs->state);
+}
+
+static void ifs_set_range_uptodate(struct folio *folio,
+ struct iomap_folio_state *ifs, size_t off, size_t len)
+{
+ struct inode *inode = folio->mapping->host;
+ unsigned int first_blk = off >> inode->i_blkbits;
+ unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
+ unsigned int nr_blks = last_blk - first_blk + 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ifs->state_lock, flags);
+ bitmap_set(ifs->state, first_blk, nr_blks);
+ if (ifs_is_fully_uptodate(folio, ifs))
+ folio_mark_uptodate(folio);
+ spin_unlock_irqrestore(&ifs->state_lock, flags);
+}
+
+static void iomap_set_range_uptodate(struct folio *folio, size_t off,
+ size_t len)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ if (ifs)
+ ifs_set_range_uptodate(folio, ifs, off, len);
+ else
+ folio_mark_uptodate(folio);
+}
+
+static inline bool ifs_block_is_dirty(struct folio *folio,
+ struct iomap_folio_state *ifs, int block)
+{
+ struct inode *inode = folio->mapping->host;
+ unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
+
+ return test_bit(block + blks_per_folio, ifs->state);
+}
+
+static void ifs_clear_range_dirty(struct folio *folio,
+ struct iomap_folio_state *ifs, size_t off, size_t len)
+{
+ struct inode *inode = folio->mapping->host;
+ unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
+ unsigned int first_blk = (off >> inode->i_blkbits);
+ unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
+ unsigned int nr_blks = last_blk - first_blk + 1;
+ unsigned long flags;
-static struct iomap_page *
-iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags)
+ spin_lock_irqsave(&ifs->state_lock, flags);
+ bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
+ spin_unlock_irqrestore(&ifs->state_lock, flags);
+}
+
+static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
{
- struct iomap_page *iop = to_iomap_page(folio);
+ struct iomap_folio_state *ifs = folio->private;
+
+ if (ifs)
+ ifs_clear_range_dirty(folio, ifs, off, len);
+}
+
+static void ifs_set_range_dirty(struct folio *folio,
+ struct iomap_folio_state *ifs, size_t off, size_t len)
+{
+ struct inode *inode = folio->mapping->host;
+ unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
+ unsigned int first_blk = (off >> inode->i_blkbits);
+ unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
+ unsigned int nr_blks = last_blk - first_blk + 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ifs->state_lock, flags);
+ bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
+ spin_unlock_irqrestore(&ifs->state_lock, flags);
+}
+
+static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
+{
+ struct iomap_folio_state *ifs = folio->private;
+
+ if (ifs)
+ ifs_set_range_dirty(folio, ifs, off, len);
+}
+
+static struct iomap_folio_state *ifs_alloc(struct inode *inode,
+ struct folio *folio, unsigned int flags)
+{
+ struct iomap_folio_state *ifs = folio->private;
unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
gfp_t gfp;
- if (iop || nr_blocks <= 1)
- return iop;
+ if (ifs || nr_blocks <= 1)
+ return ifs;
if (flags & IOMAP_NOWAIT)
gfp = GFP_NOWAIT;
else
gfp = GFP_NOFS | __GFP_NOFAIL;
- iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
- gfp);
- if (iop) {
- spin_lock_init(&iop->uptodate_lock);
- if (folio_test_uptodate(folio))
- bitmap_fill(iop->uptodate, nr_blocks);
- folio_attach_private(folio, iop);
- }
- return iop;
+ /*
+ * ifs->state tracks two sets of state flags when the
+ * filesystem block size is smaller than the folio size.
+ * The first state tracks per-block uptodate and the
+ * second tracks per-block dirty state.
+ */
+ ifs = kzalloc(struct_size(ifs, state,
+ BITS_TO_LONGS(2 * nr_blocks)), gfp);
+ if (!ifs)
+ return ifs;
+
+ spin_lock_init(&ifs->state_lock);
+ if (folio_test_uptodate(folio))
+ bitmap_set(ifs->state, 0, nr_blocks);
+ if (folio_test_dirty(folio))
+ bitmap_set(ifs->state, nr_blocks, nr_blocks);
+ folio_attach_private(folio, ifs);
+
+ return ifs;
}
-static void iomap_page_release(struct folio *folio)
+static void ifs_free(struct folio *folio)
{
- struct iomap_page *iop = folio_detach_private(folio);
- struct inode *inode = folio->mapping->host;
- unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
+ struct iomap_folio_state *ifs = folio_detach_private(folio);
- if (!iop)
+ if (!ifs)
return;
- WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
- WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
- WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
+ WARN_ON_ONCE(atomic_read(&ifs->read_bytes_pending));
+ WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
+ WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
folio_test_uptodate(folio));
- kfree(iop);
+ kfree(ifs);
}
/*
@@ -90,7 +194,7 @@ static void iomap_page_release(struct folio *folio)
static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
{
- struct iomap_page *iop = to_iomap_page(folio);
+ struct iomap_folio_state *ifs = folio->private;
loff_t orig_pos = *pos;
loff_t isize = i_size_read(inode);
unsigned block_bits = inode->i_blkbits;
@@ -105,12 +209,12 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
* per-block uptodate status and adjust the offset and length if needed
* to avoid reading in already uptodate ranges.
*/
- if (iop) {
+ if (ifs) {
unsigned int i;
/* move forward for each leading block marked uptodate */
for (i = first; i <= last; i++) {
- if (!test_bit(i, iop->uptodate))
+ if (!ifs_block_is_uptodate(ifs, i))
break;
*pos += block_size;
poff += block_size;
@@ -120,7 +224,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
/* truncate len if we find any trailing uptodate block(s) */
for ( ; i <= last; i++) {
- if (test_bit(i, iop->uptodate)) {
+ if (ifs_block_is_uptodate(ifs, i)) {
plen -= (last - i + 1) * block_size;
last = i - 1;
break;
@@ -144,43 +248,19 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
*lenp = plen;
}
-static void iomap_iop_set_range_uptodate(struct folio *folio,
- struct iomap_page *iop, size_t off, size_t len)
-{
- struct inode *inode = folio->mapping->host;
- unsigned first = off >> inode->i_blkbits;
- unsigned last = (off + len - 1) >> inode->i_blkbits;
- unsigned long flags;
-
- spin_lock_irqsave(&iop->uptodate_lock, flags);
- bitmap_set(iop->uptodate, first, last - first + 1);
- if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
- folio_mark_uptodate(folio);
- spin_unlock_irqrestore(&iop->uptodate_lock, flags);
-}
-
-static void iomap_set_range_uptodate(struct folio *folio,
- struct iomap_page *iop, size_t off, size_t len)
-{
- if (iop)
- iomap_iop_set_range_uptodate(folio, iop, off, len);
- else
- folio_mark_uptodate(folio);
-}
-
static void iomap_finish_folio_read(struct folio *folio, size_t offset,
size_t len, int error)
{
- struct iomap_page *iop = to_iomap_page(folio);
+ struct iomap_folio_state *ifs = folio->private;
if (unlikely(error)) {
folio_clear_uptodate(folio);
folio_set_error(folio);
} else {
- iomap_set_range_uptodate(folio, iop, offset, len);
+ iomap_set_range_uptodate(folio, offset, len);
}
- if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
+ if (!ifs || atomic_sub_and_test(len, &ifs->read_bytes_pending))
folio_unlock(folio);
}
@@ -213,7 +293,6 @@ struct iomap_readpage_ctx {
static int iomap_read_inline_data(const struct iomap_iter *iter,
struct folio *folio)
{
- struct iomap_page *iop;
const struct iomap *iomap = iomap_iter_srcmap(iter);
size_t size = i_size_read(iter->inode) - iomap->offset;
size_t poff = offset_in_page(iomap->offset);
@@ -231,15 +310,13 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
if (WARN_ON_ONCE(size > iomap->length))
return -EIO;
if (offset > 0)
- iop = iomap_page_create(iter->inode, folio, iter->flags);
- else
- iop = to_iomap_page(folio);
+ ifs_alloc(iter->inode, folio, iter->flags);
addr = kmap_local_folio(folio, offset);
memcpy(addr, iomap->inline_data, size);
memset(addr + size, 0, PAGE_SIZE - poff - size);
kunmap_local(addr);
- iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
+ iomap_set_range_uptodate(folio, offset, PAGE_SIZE - poff);
return 0;
}
@@ -260,7 +337,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
loff_t pos = iter->pos + offset;
loff_t length = iomap_length(iter) - offset;
struct folio *folio = ctx->cur_folio;
- struct iomap_page *iop;
+ struct iomap_folio_state *ifs;
loff_t orig_pos = pos;
size_t poff, plen;
sector_t sector;
@@ -269,20 +346,20 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
return iomap_read_inline_data(iter, folio);
/* zero post-eof blocks as the page may be mapped */
- iop = iomap_page_create(iter->inode, folio, iter->flags);
+ ifs = ifs_alloc(iter->inode, folio, iter->flags);
iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
if (plen == 0)
goto done;
if (iomap_block_needs_zeroing(iter, pos)) {
folio_zero_range(folio, poff, plen);
- iomap_set_range_uptodate(folio, iop, poff, plen);
+ iomap_set_range_uptodate(folio, poff, plen);
goto done;
}
ctx->cur_folio_in_bio = true;
- if (iop)
- atomic_add(plen, &iop->read_bytes_pending);
+ if (ifs)
+ atomic_add(plen, &ifs->read_bytes_pending);
sector = iomap_sector(iomap, pos);
if (!ctx->bio ||
@@ -436,11 +513,11 @@ EXPORT_SYMBOL_GPL(iomap_readahead);
*/
bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
{
- struct iomap_page *iop = to_iomap_page(folio);
+ struct iomap_folio_state *ifs = folio->private;
struct inode *inode = folio->mapping->host;
unsigned first, last, i;
- if (!iop)
+ if (!ifs)
return false;
/* Caller's range may extend past the end of this folio */
@@ -451,7 +528,7 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
last = (from + count - 1) >> inode->i_blkbits;
for (i = first; i <= last; i++)
- if (!test_bit(i, iop->uptodate))
+ if (!ifs_block_is_uptodate(ifs, i))
return false;
return true;
}
@@ -461,16 +538,18 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
* iomap_get_folio - get a folio reference for writing
* @iter: iteration structure
* @pos: start offset of write
+ * @len: Suggested size of folio to create.
*
* Returns a locked reference to the folio at @pos, or an error pointer if the
* folio could not be obtained.
*/
-struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos)
+struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
{
- unsigned fgp = FGP_WRITEBEGIN | FGP_NOFS;
+ fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
if (iter->flags & IOMAP_NOWAIT)
fgp |= FGP_NOWAIT;
+ fgp |= fgf_set_order(len);
return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
fgp, mapping_gfp_mask(iter->inode->i_mapping));
@@ -483,14 +562,13 @@ bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
folio_size(folio));
/*
- * mm accommodates an old ext3 case where clean folios might
- * not have had the dirty bit cleared. Thus, it can send actual
- * dirty folios to ->release_folio() via shrink_active_list();
- * skip those here.
+ * If the folio is dirty, we refuse to release our metadata because
+ * it may be partially dirty. Once we track per-block dirty state,
+ * we can release the metadata if every block is dirty.
*/
- if (folio_test_dirty(folio) || folio_test_writeback(folio))
+ if (folio_test_dirty(folio))
return false;
- iomap_page_release(folio);
+ ifs_free(folio);
return true;
}
EXPORT_SYMBOL_GPL(iomap_release_folio);
@@ -507,16 +585,22 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
if (offset == 0 && len == folio_size(folio)) {
WARN_ON_ONCE(folio_test_writeback(folio));
folio_cancel_dirty(folio);
- iomap_page_release(folio);
- } else if (folio_test_large(folio)) {
- /* Must release the iop so the page can be split */
- WARN_ON_ONCE(!folio_test_uptodate(folio) &&
- folio_test_dirty(folio));
- iomap_page_release(folio);
+ ifs_free(folio);
}
}
EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
+bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
+{
+ struct inode *inode = mapping->host;
+ size_t len = folio_size(folio);
+
+ ifs_alloc(inode, folio, 0);
+ iomap_set_range_dirty(folio, 0, len);
+ return filemap_dirty_folio(mapping, folio);
+}
+EXPORT_SYMBOL_GPL(iomap_dirty_folio);
+
static void
iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
{
@@ -547,7 +631,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
size_t len, struct folio *folio)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
- struct iomap_page *iop;
+ struct iomap_folio_state *ifs;
loff_t block_size = i_blocksize(iter->inode);
loff_t block_start = round_down(pos, block_size);
loff_t block_end = round_up(pos + len, block_size);
@@ -555,14 +639,23 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
size_t from = offset_in_folio(folio, pos), to = from + len;
size_t poff, plen;
- if (folio_test_uptodate(folio))
+ /*
+ * If the write completely overlaps the current folio, then
+ * entire folio will be dirtied so there is no need for
+ * per-block state tracking structures to be attached to this folio.
+ */
+ if (pos <= folio_pos(folio) &&
+ pos + len >= folio_pos(folio) + folio_size(folio))
return 0;
- folio_clear_error(folio);
- iop = iomap_page_create(iter->inode, folio, iter->flags);
- if ((iter->flags & IOMAP_NOWAIT) && !iop && nr_blocks > 1)
+ ifs = ifs_alloc(iter->inode, folio, iter->flags);
+ if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
return -EAGAIN;
+ if (folio_test_uptodate(folio))
+ return 0;
+ folio_clear_error(folio);
+
do {
iomap_adjust_read_range(iter->inode, folio, &block_start,
block_end - block_start, &poff, &plen);
@@ -589,7 +682,7 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
if (status)
return status;
}
- iomap_set_range_uptodate(folio, iop, poff, plen);
+ iomap_set_range_uptodate(folio, poff, plen);
} while ((block_start += plen) < block_end);
return 0;
@@ -603,7 +696,7 @@ static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
if (folio_ops && folio_ops->get_folio)
return folio_ops->get_folio(iter, pos, len);
else
- return iomap_get_folio(iter, pos);
+ return iomap_get_folio(iter, pos, len);
}
static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
@@ -696,7 +789,6 @@ out_unlock:
static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
size_t copied, struct folio *folio)
{
- struct iomap_page *iop = to_iomap_page(folio);
flush_dcache_folio(folio);
/*
@@ -712,7 +804,8 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
*/
if (unlikely(copied < len && !folio_test_uptodate(folio)))
return 0;
- iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
+ iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
+ iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
filemap_dirty_folio(inode->i_mapping, folio);
return copied;
}
@@ -773,6 +866,7 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
{
loff_t length = iomap_length(iter);
+ size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
loff_t pos = iter->pos;
ssize_t written = 0;
long status = 0;
@@ -781,15 +875,12 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
do {
struct folio *folio;
- struct page *page;
- unsigned long offset; /* Offset into pagecache page */
- unsigned long bytes; /* Bytes to write to page */
+ size_t offset; /* Offset into folio */
+ size_t bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
- offset = offset_in_page(pos);
- bytes = min_t(unsigned long, PAGE_SIZE - offset,
- iov_iter_count(i));
-again:
+ offset = pos & (chunk - 1);
+ bytes = min(chunk - offset, iov_iter_count(i));
status = balance_dirty_pages_ratelimited_flags(mapping,
bdp_flags);
if (unlikely(status))
@@ -819,12 +910,14 @@ again:
if (iter->iomap.flags & IOMAP_F_STALE)
break;
- page = folio_file_page(folio, pos >> PAGE_SHIFT);
- if (mapping_writably_mapped(mapping))
- flush_dcache_page(page);
+ offset = offset_in_folio(folio, pos);
+ if (bytes > folio_size(folio) - offset)
+ bytes = folio_size(folio) - offset;
- copied = copy_page_from_iter_atomic(page, offset, bytes, i);
+ if (mapping_writably_mapped(mapping))
+ flush_dcache_folio(folio);
+ copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
status = iomap_write_end(iter, pos, bytes, copied, folio);
if (unlikely(copied != status))
@@ -840,11 +933,13 @@ again:
*/
if (copied)
bytes = copied;
- goto again;
+ if (chunk > PAGE_SIZE)
+ chunk /= 2;
+ } else {
+ pos += status;
+ written += status;
+ length -= status;
}
- pos += status;
- written += status;
- length -= status;
} while (iov_iter_count(i) && length);
if (status == -EAGAIN) {
@@ -880,6 +975,76 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
}
EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
+static int iomap_write_delalloc_ifs_punch(struct inode *inode,
+ struct folio *folio, loff_t start_byte, loff_t end_byte,
+ iomap_punch_t punch)
+{
+ unsigned int first_blk, last_blk, i;
+ loff_t last_byte;
+ u8 blkbits = inode->i_blkbits;
+ struct iomap_folio_state *ifs;
+ int ret = 0;
+
+ /*
+ * When we have per-block dirty tracking, there can be
+ * blocks within a folio which are marked uptodate
+ * but not dirty. In that case it is necessary to punch
+ * out such blocks to avoid leaking any delalloc blocks.
+ */
+ ifs = folio->private;
+ if (!ifs)
+ return ret;
+
+ last_byte = min_t(loff_t, end_byte - 1,
+ folio_pos(folio) + folio_size(folio) - 1);
+ first_blk = offset_in_folio(folio, start_byte) >> blkbits;
+ last_blk = offset_in_folio(folio, last_byte) >> blkbits;
+ for (i = first_blk; i <= last_blk; i++) {
+ if (!ifs_block_is_dirty(folio, ifs, i)) {
+ ret = punch(inode, folio_pos(folio) + (i << blkbits),
+ 1 << blkbits);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+
+static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
+ loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
+ iomap_punch_t punch)
+{
+ int ret = 0;
+
+ if (!folio_test_dirty(folio))
+ return ret;
+
+ /* if dirty, punch up to offset */
+ if (start_byte > *punch_start_byte) {
+ ret = punch(inode, *punch_start_byte,
+ start_byte - *punch_start_byte);
+ if (ret)
+ return ret;
+ }
+
+ /* Punch non-dirty blocks within folio */
+ ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte,
+ end_byte, punch);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure the next punch start is correctly bound to
+ * the end of this data range, not the end of the folio.
+ */
+ *punch_start_byte = min_t(loff_t, end_byte,
+ folio_pos(folio) + folio_size(folio));
+
+ return ret;
+}
+
/*
* Scan the data range passed to us for dirty page cache folios. If we find a
* dirty folio, punch out the preceeding range and update the offset from which
@@ -899,10 +1064,11 @@ EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
*/
static int iomap_write_delalloc_scan(struct inode *inode,
loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
- int (*punch)(struct inode *inode, loff_t offset, loff_t length))
+ iomap_punch_t punch)
{
while (start_byte < end_byte) {
struct folio *folio;
+ int ret;
/* grab locked page */
folio = filemap_lock_folio(inode->i_mapping,
@@ -913,26 +1079,12 @@ static int iomap_write_delalloc_scan(struct inode *inode,
continue;
}
- /* if dirty, punch up to offset */
- if (folio_test_dirty(folio)) {
- if (start_byte > *punch_start_byte) {
- int error;
-
- error = punch(inode, *punch_start_byte,
- start_byte - *punch_start_byte);
- if (error) {
- folio_unlock(folio);
- folio_put(folio);
- return error;
- }
- }
-
- /*
- * Make sure the next punch start is correctly bound to
- * the end of this data range, not the end of the folio.
- */
- *punch_start_byte = min_t(loff_t, end_byte,
- folio_next_index(folio) << PAGE_SHIFT);
+ ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
+ start_byte, end_byte, punch);
+ if (ret) {
+ folio_unlock(folio);
+ folio_put(folio);
+ return ret;
}
/* move offset to start of next folio in range */
@@ -977,8 +1129,7 @@ static int iomap_write_delalloc_scan(struct inode *inode,
* the code to subtle off-by-one bugs....
*/
static int iomap_write_delalloc_release(struct inode *inode,
- loff_t start_byte, loff_t end_byte,
- int (*punch)(struct inode *inode, loff_t pos, loff_t length))
+ loff_t start_byte, loff_t end_byte, iomap_punch_t punch)
{
loff_t punch_start_byte = start_byte;
loff_t scan_end_byte = min(i_size_read(inode), end_byte);
@@ -1071,8 +1222,7 @@ out_unlock:
*/
int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
struct iomap *iomap, loff_t pos, loff_t length,
- ssize_t written,
- int (*punch)(struct inode *inode, loff_t pos, loff_t length))
+ ssize_t written, iomap_punch_t punch)
{
loff_t start_byte;
loff_t end_byte;
@@ -1293,17 +1443,17 @@ EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
size_t len, int error)
{
- struct iomap_page *iop = to_iomap_page(folio);
+ struct iomap_folio_state *ifs = folio->private;
if (error) {
folio_set_error(folio);
mapping_set_error(inode->i_mapping, error);
}
- WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop);
- WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
+ WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
+ WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
- if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
+ if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
folio_end_writeback(folio);
}
@@ -1570,7 +1720,7 @@ iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
*/
static void
iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
- struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
+ struct iomap_folio_state *ifs, struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct list_head *iolist)
{
sector_t sector = iomap_sector(&wpc->iomap, pos);
@@ -1588,8 +1738,8 @@ iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff);
}
- if (iop)
- atomic_add(len, &iop->write_bytes_pending);
+ if (ifs)
+ atomic_add(len, &ifs->write_bytes_pending);
wpc->ioend->io_size += len;
wbc_account_cgroup_owner(wbc, &folio->page, len);
}
@@ -1615,7 +1765,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct inode *inode,
struct folio *folio, u64 end_pos)
{
- struct iomap_page *iop = iomap_page_create(inode, folio, 0);
+ struct iomap_folio_state *ifs = folio->private;
struct iomap_ioend *ioend, *next;
unsigned len = i_blocksize(inode);
unsigned nblocks = i_blocks_per_folio(inode, folio);
@@ -1623,7 +1773,14 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
int error = 0, count = 0, i;
LIST_HEAD(submit_list);
- WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
+ WARN_ON_ONCE(end_pos <= pos);
+
+ if (!ifs && nblocks > 1) {
+ ifs = ifs_alloc(inode, folio, 0);
+ iomap_set_range_dirty(folio, 0, end_pos - pos);
+ }
+
+ WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) != 0);
/*
* Walk through the folio to find areas to write back. If we
@@ -1631,7 +1788,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
* invalid, grab a new one.
*/
for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
- if (iop && !test_bit(i, iop->uptodate))
+ if (ifs && !ifs_block_is_dirty(folio, ifs, i))
continue;
error = wpc->ops->map_blocks(wpc, inode, pos);
@@ -1642,7 +1799,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
continue;
if (wpc->iomap.type == IOMAP_HOLE)
continue;
- iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc,
+ iomap_add_to_ioend(inode, pos, folio, ifs, wpc, wbc,
&submit_list);
count++;
}
@@ -1675,6 +1832,12 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
}
}
+ /*
+ * We can have dirty bits set past end of file in page_mkwrite path
+ * while mapping the last partial folio. Hence it's better to clear
+ * all the dirty bits in the folio here.
+ */
+ iomap_clear_range_dirty(folio, 0, folio_size(folio));
folio_start_writeback(folio);
folio_unlock(folio);
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index ea3b868c8355..bcd3f8cf5ea4 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -20,10 +20,12 @@
* Private flags for iomap_dio, must not overlap with the public ones in
* iomap.h:
*/
-#define IOMAP_DIO_WRITE_FUA (1 << 28)
-#define IOMAP_DIO_NEED_SYNC (1 << 29)
-#define IOMAP_DIO_WRITE (1 << 30)
-#define IOMAP_DIO_DIRTY (1 << 31)
+#define IOMAP_DIO_CALLER_COMP (1U << 26)
+#define IOMAP_DIO_INLINE_COMP (1U << 27)
+#define IOMAP_DIO_WRITE_THROUGH (1U << 28)
+#define IOMAP_DIO_NEED_SYNC (1U << 29)
+#define IOMAP_DIO_WRITE (1U << 30)
+#define IOMAP_DIO_DIRTY (1U << 31)
struct iomap_dio {
struct kiocb *iocb;
@@ -41,7 +43,6 @@ struct iomap_dio {
struct {
struct iov_iter *iter;
struct task_struct *waiter;
- struct bio *poll_bio;
} submit;
/* used for aio completion: */
@@ -63,12 +64,14 @@ static struct bio *iomap_dio_alloc_bio(const struct iomap_iter *iter,
static void iomap_dio_submit_bio(const struct iomap_iter *iter,
struct iomap_dio *dio, struct bio *bio, loff_t pos)
{
+ struct kiocb *iocb = dio->iocb;
+
atomic_inc(&dio->ref);
/* Sync dio can't be polled reliably */
- if ((dio->iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(dio->iocb)) {
- bio_set_polled(bio, dio->iocb);
- dio->submit.poll_bio = bio;
+ if ((iocb->ki_flags & IOCB_HIPRI) && !is_sync_kiocb(iocb)) {
+ bio_set_polled(bio, iocb);
+ WRITE_ONCE(iocb->private, bio);
}
if (dio->dops && dio->dops->submit_io)
@@ -130,6 +133,11 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
}
EXPORT_SYMBOL_GPL(iomap_dio_complete);
+static ssize_t iomap_dio_deferred_complete(void *data)
+{
+ return iomap_dio_complete(data);
+}
+
static void iomap_dio_complete_work(struct work_struct *work)
{
struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
@@ -152,27 +160,69 @@ void iomap_dio_bio_end_io(struct bio *bio)
{
struct iomap_dio *dio = bio->bi_private;
bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
+ struct kiocb *iocb = dio->iocb;
if (bio->bi_status)
iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
+ if (!atomic_dec_and_test(&dio->ref))
+ goto release_bio;
- if (atomic_dec_and_test(&dio->ref)) {
- if (dio->wait_for_completion) {
- struct task_struct *waiter = dio->submit.waiter;
- WRITE_ONCE(dio->submit.waiter, NULL);
- blk_wake_io_task(waiter);
- } else if (dio->flags & IOMAP_DIO_WRITE) {
- struct inode *inode = file_inode(dio->iocb->ki_filp);
-
- WRITE_ONCE(dio->iocb->private, NULL);
- INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
- queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
- } else {
- WRITE_ONCE(dio->iocb->private, NULL);
- iomap_dio_complete_work(&dio->aio.work);
- }
+ /*
+ * Synchronous dio, task itself will handle any completion work
+ * that needs after IO. All we need to do is wake the task.
+ */
+ if (dio->wait_for_completion) {
+ struct task_struct *waiter = dio->submit.waiter;
+
+ WRITE_ONCE(dio->submit.waiter, NULL);
+ blk_wake_io_task(waiter);
+ goto release_bio;
+ }
+
+ /*
+ * Flagged with IOMAP_DIO_INLINE_COMP, we can complete it inline
+ */
+ if (dio->flags & IOMAP_DIO_INLINE_COMP) {
+ WRITE_ONCE(iocb->private, NULL);
+ iomap_dio_complete_work(&dio->aio.work);
+ goto release_bio;
+ }
+
+ /*
+ * If this dio is flagged with IOMAP_DIO_CALLER_COMP, then schedule
+ * our completion that way to avoid an async punt to a workqueue.
+ */
+ if (dio->flags & IOMAP_DIO_CALLER_COMP) {
+ /* only polled IO cares about private cleared */
+ iocb->private = dio;
+ iocb->dio_complete = iomap_dio_deferred_complete;
+
+ /*
+ * Invoke ->ki_complete() directly. We've assigned our
+ * dio_complete callback handler, and since the issuer set
+ * IOCB_DIO_CALLER_COMP, we know their ki_complete handler will
+ * notice ->dio_complete being set and will defer calling that
+ * handler until it can be done from a safe task context.
+ *
+ * Note that the 'res' being passed in here is not important
+ * for this case. The actual completion value of the request
+ * will be gotten from dio_complete when that is run by the
+ * issuer.
+ */
+ iocb->ki_complete(iocb, 0);
+ goto release_bio;
}
+ /*
+ * Async DIO completion that requires filesystem level completion work
+ * gets punted to a work queue to complete as the operation may require
+ * more IO to be issued to finalise filesystem metadata changes or
+ * guarantee data integrity.
+ */
+ INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
+ queue_work(file_inode(iocb->ki_filp)->i_sb->s_dio_done_wq,
+ &dio->aio.work);
+release_bio:
if (should_dirty) {
bio_check_pages_dirty(bio);
} else {
@@ -203,7 +253,7 @@ static void iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
/*
* Figure out the bio's operation flags from the dio request, the
* mapping, and whether or not we want FUA. Note that we can end up
- * clearing the WRITE_FUA flag in the dio request.
+ * clearing the WRITE_THROUGH flag in the dio request.
*/
static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
const struct iomap *iomap, bool use_fua)
@@ -217,7 +267,7 @@ static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
if (use_fua)
opflags |= REQ_FUA;
else
- dio->flags &= ~IOMAP_DIO_WRITE_FUA;
+ dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
return opflags;
}
@@ -257,12 +307,19 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
* Use a FUA write if we need datasync semantics, this is a pure
* data IO that doesn't require any metadata updates (including
* after IO completion such as unwritten extent conversion) and
- * the underlying device supports FUA. This allows us to avoid
- * cache flushes on IO completion.
+ * the underlying device either supports FUA or doesn't have
+ * a volatile write cache. This allows us to avoid cache flushes
+ * on IO completion. If we can't use writethrough and need to
+ * sync, disable in-task completions as dio completion will
+ * need to call generic_write_sync() which will do a blocking
+ * fsync / cache flush call.
*/
if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
- (dio->flags & IOMAP_DIO_WRITE_FUA) && bdev_fua(iomap->bdev))
+ (dio->flags & IOMAP_DIO_WRITE_THROUGH) &&
+ (bdev_fua(iomap->bdev) || !bdev_write_cache(iomap->bdev)))
use_fua = true;
+ else if (dio->flags & IOMAP_DIO_NEED_SYNC)
+ dio->flags &= ~IOMAP_DIO_CALLER_COMP;
}
/*
@@ -277,10 +334,23 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter,
goto out;
/*
- * We can only poll for single bio I/Os.
+ * We can only do deferred completion for pure overwrites that
+ * don't require additional IO at completion. This rules out
+ * writes that need zeroing or extent conversion, extend
+ * the file size, or issue journal IO or cache flushes
+ * during completion processing.
*/
if (need_zeroout ||
+ ((dio->flags & IOMAP_DIO_NEED_SYNC) && !use_fua) ||
((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode)))
+ dio->flags &= ~IOMAP_DIO_CALLER_COMP;
+
+ /*
+ * The rules for polled IO completions follow the guidelines as the
+ * ones we set for inline and deferred completions. If none of those
+ * are available for this IO, clear the polled flag.
+ */
+ if (!(dio->flags & (IOMAP_DIO_INLINE_COMP|IOMAP_DIO_CALLER_COMP)))
dio->iocb->ki_flags &= ~IOCB_HIPRI;
if (need_zeroout) {
@@ -505,12 +575,14 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->submit.iter = iter;
dio->submit.waiter = current;
- dio->submit.poll_bio = NULL;
if (iocb->ki_flags & IOCB_NOWAIT)
iomi.flags |= IOMAP_NOWAIT;
if (iov_iter_rw(iter) == READ) {
+ /* reads can always complete inline */
+ dio->flags |= IOMAP_DIO_INLINE_COMP;
+
if (iomi.pos >= dio->i_size)
goto out_free_dio;
@@ -524,6 +596,15 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomi.flags |= IOMAP_WRITE;
dio->flags |= IOMAP_DIO_WRITE;
+ /*
+ * Flag as supporting deferred completions, if the issuer
+ * groks it. This can avoid a workqueue punt for writes.
+ * We may later clear this flag if we need to do other IO
+ * as part of this IO completion.
+ */
+ if (iocb->ki_flags & IOCB_DIO_CALLER_COMP)
+ dio->flags |= IOMAP_DIO_CALLER_COMP;
+
if (dio_flags & IOMAP_DIO_OVERWRITE_ONLY) {
ret = -EAGAIN;
if (iomi.pos >= dio->i_size ||
@@ -537,13 +618,16 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->flags |= IOMAP_DIO_NEED_SYNC;
/*
- * For datasync only writes, we optimistically try
- * using FUA for this IO. Any non-FUA write that
- * occurs will clear this flag, hence we know before
- * completion whether a cache flush is necessary.
+ * For datasync only writes, we optimistically try using
+ * WRITE_THROUGH for this IO. This flag requires either
+ * FUA writes through the device's write cache, or a
+ * normal write to a device without a volatile write
+ * cache. For the former, Any non-FUA write that occurs
+ * will clear this flag, hence we know before completion
+ * whether a cache flush is necessary.
*/
if (!(iocb->ki_flags & IOCB_SYNC))
- dio->flags |= IOMAP_DIO_WRITE_FUA;
+ dio->flags |= IOMAP_DIO_WRITE_THROUGH;
}
/*
@@ -605,14 +689,13 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
iomap_dio_set_error(dio, ret);
/*
- * If all the writes we issued were FUA, we don't need to flush the
- * cache on IO completion. Clear the sync flag for this case.
+ * If all the writes we issued were already written through to the
+ * media, we don't need to flush the cache on IO completion. Clear the
+ * sync flag for this case.
*/
- if (dio->flags & IOMAP_DIO_WRITE_FUA)
+ if (dio->flags & IOMAP_DIO_WRITE_THROUGH)
dio->flags &= ~IOMAP_DIO_NEED_SYNC;
- WRITE_ONCE(iocb->private, dio->submit.poll_bio);
-
/*
* We are about to drop our additional submission reference, which
* might be the last reference to the dio. There are three different
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index df9d70588b60..2ee21286ac8f 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1422,13 +1422,8 @@ static int isofs_read_inode(struct inode *inode, int relocated)
inode->i_ino, de->flags[-high_sierra]);
}
#endif
-
- inode->i_mtime.tv_sec =
- inode->i_atime.tv_sec =
- inode->i_ctime.tv_sec = iso_date(de->date, high_sierra);
- inode->i_mtime.tv_nsec =
- inode->i_atime.tv_nsec =
- inode->i_ctime.tv_nsec = 0;
+ inode->i_mtime = inode->i_atime =
+ inode_set_ctime(inode, iso_date(de->date, high_sierra), 0);
ei->i_first_extent = (isonum_733(de->extent) +
isonum_711(de->ext_attr_length));
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 48f58c6c9e69..348783a70f57 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -421,10 +421,9 @@ repeat:
/* Rock ridge never appears on a High Sierra disk */
cnt = 0;
if (rr->u.TF.flags & TF_CREATE) {
- inode->i_ctime.tv_sec =
- iso_date(rr->u.TF.times[cnt++].time,
- 0);
- inode->i_ctime.tv_nsec = 0;
+ inode_set_ctime(inode,
+ iso_date(rr->u.TF.times[cnt++].time, 0),
+ 0);
}
if (rr->u.TF.flags & TF_MODIFY) {
inode->i_mtime.tv_sec =
@@ -439,10 +438,9 @@ repeat:
inode->i_atime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_ATTRIBUTES) {
- inode->i_ctime.tv_sec =
- iso_date(rr->u.TF.times[cnt++].time,
- 0);
- inode->i_ctime.tv_nsec = 0;
+ inode_set_ctime(inode,
+ iso_date(rr->u.TF.times[cnt++].time, 0),
+ 0);
}
break;
case SIG('S', 'L'):
@@ -534,7 +532,7 @@ repeat:
inode->i_size = reloc->i_size;
inode->i_blocks = reloc->i_blocks;
inode->i_atime = reloc->i_atime;
- inode->i_ctime = reloc->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(reloc));
inode->i_mtime = reloc->i_mtime;
iput(reloc);
break;
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 5075a0a6d594..091ab0eaabbe 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -204,7 +204,8 @@ static int jffs2_create(struct mnt_idmap *idmap, struct inode *dir_i,
if (ret)
goto fail;
- dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));
+ dir_i->i_mtime = inode_set_ctime_to_ts(dir_i,
+ ITIME(je32_to_cpu(ri->ctime)));
jffs2_free_raw_inode(ri);
@@ -237,7 +238,7 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
if (dead_f->inocache)
set_nlink(d_inode(dentry), dead_f->inocache->pino_nlink);
if (!ret)
- dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
+ dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(now));
return ret;
}
/***********************************************************************/
@@ -271,7 +272,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
set_nlink(d_inode(old_dentry), ++f->inocache->pino_nlink);
mutex_unlock(&f->sem);
d_instantiate(dentry, d_inode(old_dentry));
- dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
+ dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(now));
ihold(d_inode(old_dentry));
}
return ret;
@@ -422,7 +423,8 @@ static int jffs2_symlink (struct mnt_idmap *idmap, struct inode *dir_i,
goto fail;
}
- dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+ dir_i->i_mtime = inode_set_ctime_to_ts(dir_i,
+ ITIME(je32_to_cpu(rd->mctime)));
jffs2_free_raw_dirent(rd);
@@ -566,7 +568,8 @@ static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
goto fail;
}
- dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+ dir_i->i_mtime = inode_set_ctime_to_ts(dir_i,
+ ITIME(je32_to_cpu(rd->mctime)));
inc_nlink(dir_i);
jffs2_free_raw_dirent(rd);
@@ -607,7 +610,7 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
dentry->d_name.len, f, now);
if (!ret) {
- dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
+ dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(now));
clear_nlink(d_inode(dentry));
drop_nlink(dir_i);
}
@@ -743,7 +746,8 @@ static int jffs2_mknod (struct mnt_idmap *idmap, struct inode *dir_i,
goto fail;
}
- dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+ dir_i->i_mtime = inode_set_ctime_to_ts(dir_i,
+ ITIME(je32_to_cpu(rd->mctime)));
jffs2_free_raw_dirent(rd);
@@ -864,14 +868,16 @@ static int jffs2_rename (struct mnt_idmap *idmap,
* caller won't do it on its own since we are returning an error.
*/
d_invalidate(new_dentry);
- new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
+ new_dir_i->i_mtime = inode_set_ctime_to_ts(new_dir_i,
+ ITIME(now));
return ret;
}
if (d_is_dir(old_dentry))
drop_nlink(old_dir_i);
- new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
+ old_dir_i->i_mtime = inode_set_ctime_to_ts(old_dir_i, ITIME(now));
+ new_dir_i->i_mtime = inode_set_ctime_to_ts(new_dir_i, ITIME(now));
return 0;
}
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 2345ca3f09ee..11c66793960e 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -317,7 +317,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
inode->i_size = pos + writtenlen;
inode->i_blocks = (inode->i_size + 511) >> 9;
- inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
+ inode->i_mtime = inode_set_ctime_to_ts(inode,
+ ITIME(je32_to_cpu(ri->ctime)));
}
}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 038516bee1ab..0403efab4089 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -115,7 +115,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
- ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
+ ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode_get_ctime(inode)));
ri->offset = cpu_to_je32(0);
ri->csize = ri->dsize = cpu_to_je32(mdatalen);
@@ -148,7 +148,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
}
/* It worked. Update the inode */
inode->i_atime = ITIME(je32_to_cpu(ri->atime));
- inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
+ inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(ri->ctime)));
inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
inode->i_mode = jemode_to_cpu(ri->mode);
i_uid_write(inode, je16_to_cpu(ri->uid));
@@ -284,7 +284,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
inode->i_size = je32_to_cpu(latest_node.isize);
inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
- inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
+ inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(latest_node.ctime)));
set_nlink(inode, f->inocache->pino_nlink);
@@ -388,7 +388,7 @@ void jffs2_dirty_inode(struct inode *inode, int flags)
iattr.ia_gid = inode->i_gid;
iattr.ia_atime = inode->i_atime;
iattr.ia_mtime = inode->i_mtime;
- iattr.ia_ctime = inode->i_ctime;
+ iattr.ia_ctime = inode_get_ctime(inode);
jffs2_do_setattr(inode, &iattr);
}
@@ -475,7 +475,7 @@ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_r
inode->i_mode = jemode_to_cpu(ri->mode);
i_gid_write(inode, je16_to_cpu(ri->gid));
i_uid_write(inode, je16_to_cpu(ri->uid));
- inode->i_atime = inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
inode->i_blocks = 0;
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 8da19766c101..50727a1ff931 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -35,7 +35,7 @@ struct kvec;
#define ITIME(sec) ((struct timespec64){sec, 0})
#define JFFS2_NOW() JFFS2_CLAMP_TIME(ktime_get_real_seconds())
#define I_SEC(tv) JFFS2_CLAMP_TIME((tv).tv_sec)
-#define JFFS2_F_I_CTIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_ctime)
+#define JFFS2_F_I_CTIME(f) I_SEC(inode_get_ctime(OFNI_EDONI_2SFFJ(f)))
#define JFFS2_F_I_MTIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_mtime)
#define JFFS2_F_I_ATIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_atime)
#define sleep_on_spinunlock(wq, s) \
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index fb96f872d207..1de3602c98de 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -116,7 +116,7 @@ int jfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
if (!rc) {
if (update_mode) {
inode->i_mode = mode;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
}
rc = txCommit(tid, 1, &inode, 0);
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 8ac10e396050..920d58a1566b 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -393,7 +393,7 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length)
break;
}
- ip->i_mtime = ip->i_ctime = current_time(ip);
+ ip->i_mtime = inode_set_ctime_current(ip);
mark_inode_dirty(ip);
txCommit(tid, 1, &ip, 0);
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index ed7989bc2db1..f7bd7e8f5be4 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -96,7 +96,7 @@ int jfs_fileattr_set(struct mnt_idmap *idmap,
jfs_inode->mode2 = flags;
jfs_set_inode_flags(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return 0;
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 390cbfce391f..a40383aa6c84 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -3064,8 +3064,8 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec);
ip->i_mtime.tv_sec = le32_to_cpu(dip->di_mtime.tv_sec);
ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec);
- ip->i_ctime.tv_sec = le32_to_cpu(dip->di_ctime.tv_sec);
- ip->i_ctime.tv_nsec = le32_to_cpu(dip->di_ctime.tv_nsec);
+ inode_set_ctime(ip, le32_to_cpu(dip->di_ctime.tv_sec),
+ le32_to_cpu(dip->di_ctime.tv_nsec));
ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks));
ip->i_generation = le32_to_cpu(dip->di_gen);
@@ -3139,8 +3139,8 @@ static void copy_to_dinode(struct dinode * dip, struct inode *ip)
dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec);
dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec);
- dip->di_ctime.tv_sec = cpu_to_le32(ip->i_ctime.tv_sec);
- dip->di_ctime.tv_nsec = cpu_to_le32(ip->i_ctime.tv_nsec);
+ dip->di_ctime.tv_sec = cpu_to_le32(inode_get_ctime(ip).tv_sec);
+ dip->di_ctime.tv_nsec = cpu_to_le32(inode_get_ctime(ip).tv_nsec);
dip->di_mtime.tv_sec = cpu_to_le32(ip->i_mtime.tv_sec);
dip->di_mtime.tv_nsec = cpu_to_le32(ip->i_mtime.tv_nsec);
dip->di_ixpxd = jfs_ip->ixpxd; /* in-memory pxd's are little-endian */
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 9e1f02767201..87594efa7f7c 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -97,8 +97,8 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
jfs_inode->mode2 |= inode->i_mode;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
- jfs_inode->otime = inode->i_ctime.tv_sec;
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ jfs_inode->otime = inode_get_ctime(inode).tv_sec;
inode->i_generation = JFS_SBI(sb)->gengen++;
jfs_inode->cflag = 0;
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index e98ddb2b1cf2..029d47065600 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -149,7 +149,7 @@ static int jfs_create(struct mnt_idmap *idmap, struct inode *dip,
mark_inode_dirty(ip);
- dip->i_ctime = dip->i_mtime = current_time(dip);
+ dip->i_mtime = inode_set_ctime_current(dip);
mark_inode_dirty(dip);
@@ -284,7 +284,7 @@ static int jfs_mkdir(struct mnt_idmap *idmap, struct inode *dip,
/* update parent directory inode */
inc_nlink(dip); /* for '..' from child directory */
- dip->i_ctime = dip->i_mtime = current_time(dip);
+ dip->i_mtime = inode_set_ctime_current(dip);
mark_inode_dirty(dip);
rc = txCommit(tid, 2, &iplist[0], 0);
@@ -390,7 +390,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
/* update parent directory's link count corresponding
* to ".." entry of the target directory deleted
*/
- dip->i_ctime = dip->i_mtime = current_time(dip);
+ dip->i_mtime = inode_set_ctime_current(dip);
inode_dec_link_count(dip);
/*
@@ -512,7 +512,7 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
ASSERT(ip->i_nlink);
- ip->i_ctime = dip->i_ctime = dip->i_mtime = current_time(ip);
+ dip->i_mtime = inode_set_ctime_to_ts(dip, inode_set_ctime_current(ip));
mark_inode_dirty(dip);
/* update target's inode */
@@ -827,8 +827,8 @@ static int jfs_link(struct dentry *old_dentry,
/* update object inode */
inc_nlink(ip); /* for new link */
- ip->i_ctime = current_time(ip);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ inode_set_ctime_current(ip);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
ihold(ip);
@@ -1028,7 +1028,7 @@ static int jfs_symlink(struct mnt_idmap *idmap, struct inode *dip,
mark_inode_dirty(ip);
- dip->i_ctime = dip->i_mtime = current_time(dip);
+ dip->i_mtime = inode_set_ctime_current(dip);
mark_inode_dirty(dip);
/*
* commit update of parent directory and link object
@@ -1205,7 +1205,7 @@ static int jfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
tblk->xflag |= COMMIT_DELETE;
tblk->u.ip = new_ip;
} else {
- new_ip->i_ctime = current_time(new_ip);
+ inode_set_ctime_current(new_ip);
mark_inode_dirty(new_ip);
}
} else {
@@ -1268,10 +1268,10 @@ static int jfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
/*
* Update ctime on changed/moved inodes & mark dirty
*/
- old_ip->i_ctime = current_time(old_ip);
+ inode_set_ctime_current(old_ip);
mark_inode_dirty(old_ip);
- new_dir->i_ctime = new_dir->i_mtime = current_time(new_dir);
+ new_dir->i_mtime = inode_set_ctime_current(new_dir);
mark_inode_dirty(new_dir);
/* Build list of inodes modified by this transaction */
@@ -1283,7 +1283,7 @@ static int jfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (old_dir != new_dir) {
iplist[ipcount++] = new_dir;
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ old_dir->i_mtime = inode_set_ctime_current(old_dir);
mark_inode_dirty(old_dir);
}
@@ -1416,7 +1416,7 @@ static int jfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
mark_inode_dirty(ip);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index d2f82cb7db1b..2e2f7f6d36a0 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -818,7 +818,7 @@ out:
}
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
inode_unlock(inode);
return len - towrite;
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 931e50018f88..8577ad494e05 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -647,7 +647,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
if (old_blocks)
dquot_free_block(inode, old_blocks);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
return 0;
}
diff --git a/fs/kernel_read_file.c b/fs/kernel_read_file.c
index 5d826274570c..c429c42a6867 100644
--- a/fs/kernel_read_file.c
+++ b/fs/kernel_read_file.c
@@ -8,16 +8,16 @@
/**
* kernel_read_file() - read file contents into a kernel buffer
*
- * @file file to read from
- * @offset where to start reading from (see below).
- * @buf pointer to a "void *" buffer for reading into (if
+ * @file: file to read from
+ * @offset: where to start reading from (see below).
+ * @buf: pointer to a "void *" buffer for reading into (if
* *@buf is NULL, a buffer will be allocated, and
* @buf_size will be ignored)
- * @buf_size size of buf, if already allocated. If @buf not
+ * @buf_size: size of buf, if already allocated. If @buf not
* allocated, this is the largest size to allocate.
- * @file_size if non-NULL, the full size of @file will be
+ * @file_size: if non-NULL, the full size of @file will be
* written here.
- * @id the kernel_read_file_id identifying the type of
+ * @id: the kernel_read_file_id identifying the type of
* file contents being read (for LSMs to examine)
*
* @offset must be 0 unless both @buf and @file_size are non-NULL
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 5a1a4af9d3d2..660995856a04 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -556,7 +556,7 @@ void kernfs_put(struct kernfs_node *kn)
kfree_const(kn->name);
if (kn->iattr) {
- simple_xattrs_free(&kn->iattr->xattrs);
+ simple_xattrs_free(&kn->iattr->xattrs, NULL);
kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
}
spin_lock(&kernfs_idr_lock);
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index b22b74d1a115..922719a343a7 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -151,8 +151,7 @@ ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size)
static inline void set_default_inode_attr(struct inode *inode, umode_t mode)
{
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime =
- inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
}
static inline void set_inode_attr(struct inode *inode,
@@ -162,7 +161,7 @@ static inline void set_inode_attr(struct inode *inode,
inode->i_gid = attrs->ia_gid;
inode->i_atime = attrs->ia_atime;
inode->i_mtime = attrs->ia_mtime;
- inode->i_ctime = attrs->ia_ctime;
+ inode_set_ctime_to_ts(inode, attrs->ia_ctime);
}
static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode)
@@ -191,7 +190,7 @@ int kernfs_iop_getattr(struct mnt_idmap *idmap,
down_read(&root->kernfs_iattr_rwsem);
kernfs_refresh_inode(kn, inode);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
up_read(&root->kernfs_iattr_rwsem);
return 0;
@@ -306,11 +305,17 @@ int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
const void *value, size_t size, int flags)
{
+ struct simple_xattr *old_xattr;
struct kernfs_iattrs *attrs = kernfs_iattrs(kn);
if (!attrs)
return -ENOMEM;
- return simple_xattr_set(&attrs->xattrs, name, value, size, flags, NULL);
+ old_xattr = simple_xattr_set(&attrs->xattrs, name, value, size, flags);
+ if (IS_ERR(old_xattr))
+ return PTR_ERR(old_xattr);
+
+ simple_xattr_free(old_xattr);
+ return 0;
}
static int kernfs_vfs_xattr_get(const struct xattr_handler *handler,
@@ -342,7 +347,7 @@ static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn,
{
atomic_t *sz = &kn->iattr->user_xattr_size;
atomic_t *nr = &kn->iattr->nr_user_xattrs;
- ssize_t removed_size;
+ struct simple_xattr *old_xattr;
int ret;
if (atomic_inc_return(nr) > KERNFS_MAX_USER_XATTRS) {
@@ -355,13 +360,18 @@ static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn,
goto dec_size_out;
}
- ret = simple_xattr_set(xattrs, full_name, value, size, flags,
- &removed_size);
-
- if (!ret && removed_size >= 0)
- size = removed_size;
- else if (!ret)
+ old_xattr = simple_xattr_set(xattrs, full_name, value, size, flags);
+ if (!old_xattr)
return 0;
+
+ if (IS_ERR(old_xattr)) {
+ ret = PTR_ERR(old_xattr);
+ goto dec_size_out;
+ }
+
+ ret = 0;
+ size = old_xattr->size;
+ simple_xattr_free(old_xattr);
dec_size_out:
atomic_sub(size, sz);
dec_count_out:
@@ -376,18 +386,19 @@ static int kernfs_vfs_user_xattr_rm(struct kernfs_node *kn,
{
atomic_t *sz = &kn->iattr->user_xattr_size;
atomic_t *nr = &kn->iattr->nr_user_xattrs;
- ssize_t removed_size;
- int ret;
+ struct simple_xattr *old_xattr;
- ret = simple_xattr_set(xattrs, full_name, value, size, flags,
- &removed_size);
+ old_xattr = simple_xattr_set(xattrs, full_name, value, size, flags);
+ if (!old_xattr)
+ return 0;
- if (removed_size >= 0) {
- atomic_sub(removed_size, sz);
- atomic_dec(nr);
- }
+ if (IS_ERR(old_xattr))
+ return PTR_ERR(old_xattr);
- return ret;
+ atomic_sub(old_xattr->size, sz);
+ atomic_dec(nr);
+ simple_xattr_free(old_xattr);
+ return 0;
}
static int kernfs_vfs_user_xattr_set(const struct xattr_handler *handler,
diff --git a/fs/libfs.c b/fs/libfs.c
index 5b851315eeed..da78eb64831e 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -33,7 +33,7 @@ int simple_getattr(struct mnt_idmap *idmap, const struct path *path,
unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9);
return 0;
}
@@ -239,6 +239,254 @@ const struct inode_operations simple_dir_inode_operations = {
};
EXPORT_SYMBOL(simple_dir_inode_operations);
+static void offset_set(struct dentry *dentry, u32 offset)
+{
+ dentry->d_fsdata = (void *)((uintptr_t)(offset));
+}
+
+static u32 dentry2offset(struct dentry *dentry)
+{
+ return (u32)((uintptr_t)(dentry->d_fsdata));
+}
+
+static struct lock_class_key simple_offset_xa_lock;
+
+/**
+ * simple_offset_init - initialize an offset_ctx
+ * @octx: directory offset map to be initialized
+ *
+ */
+void simple_offset_init(struct offset_ctx *octx)
+{
+ xa_init_flags(&octx->xa, XA_FLAGS_ALLOC1);
+ lockdep_set_class(&octx->xa.xa_lock, &simple_offset_xa_lock);
+
+ /* 0 is '.', 1 is '..', so always start with offset 2 */
+ octx->next_offset = 2;
+}
+
+/**
+ * simple_offset_add - Add an entry to a directory's offset map
+ * @octx: directory offset ctx to be updated
+ * @dentry: new dentry being added
+ *
+ * Returns zero on success. @so_ctx and the dentry offset are updated.
+ * Otherwise, a negative errno value is returned.
+ */
+int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
+{
+ static const struct xa_limit limit = XA_LIMIT(2, U32_MAX);
+ u32 offset;
+ int ret;
+
+ if (dentry2offset(dentry) != 0)
+ return -EBUSY;
+
+ ret = xa_alloc_cyclic(&octx->xa, &offset, dentry, limit,
+ &octx->next_offset, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ offset_set(dentry, offset);
+ return 0;
+}
+
+/**
+ * simple_offset_remove - Remove an entry to a directory's offset map
+ * @octx: directory offset ctx to be updated
+ * @dentry: dentry being removed
+ *
+ */
+void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry)
+{
+ u32 offset;
+
+ offset = dentry2offset(dentry);
+ if (offset == 0)
+ return;
+
+ xa_erase(&octx->xa, offset);
+ offset_set(dentry, 0);
+}
+
+/**
+ * simple_offset_rename_exchange - exchange rename with directory offsets
+ * @old_dir: parent of dentry being moved
+ * @old_dentry: dentry being moved
+ * @new_dir: destination parent
+ * @new_dentry: destination dentry
+ *
+ * Returns zero on success. Otherwise a negative errno is returned and the
+ * rename is rolled back.
+ */
+int simple_offset_rename_exchange(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry)
+{
+ struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir);
+ struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir);
+ u32 old_index = dentry2offset(old_dentry);
+ u32 new_index = dentry2offset(new_dentry);
+ int ret;
+
+ simple_offset_remove(old_ctx, old_dentry);
+ simple_offset_remove(new_ctx, new_dentry);
+
+ ret = simple_offset_add(new_ctx, old_dentry);
+ if (ret)
+ goto out_restore;
+
+ ret = simple_offset_add(old_ctx, new_dentry);
+ if (ret) {
+ simple_offset_remove(new_ctx, old_dentry);
+ goto out_restore;
+ }
+
+ ret = simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
+ if (ret) {
+ simple_offset_remove(new_ctx, old_dentry);
+ simple_offset_remove(old_ctx, new_dentry);
+ goto out_restore;
+ }
+ return 0;
+
+out_restore:
+ offset_set(old_dentry, old_index);
+ xa_store(&old_ctx->xa, old_index, old_dentry, GFP_KERNEL);
+ offset_set(new_dentry, new_index);
+ xa_store(&new_ctx->xa, new_index, new_dentry, GFP_KERNEL);
+ return ret;
+}
+
+/**
+ * simple_offset_destroy - Release offset map
+ * @octx: directory offset ctx that is about to be destroyed
+ *
+ * During fs teardown (eg. umount), a directory's offset map might still
+ * contain entries. xa_destroy() cleans out anything that remains.
+ */
+void simple_offset_destroy(struct offset_ctx *octx)
+{
+ xa_destroy(&octx->xa);
+}
+
+/**
+ * offset_dir_llseek - Advance the read position of a directory descriptor
+ * @file: an open directory whose position is to be updated
+ * @offset: a byte offset
+ * @whence: enumerator describing the starting position for this update
+ *
+ * SEEK_END, SEEK_DATA, and SEEK_HOLE are not supported for directories.
+ *
+ * Returns the updated read position if successful; otherwise a
+ * negative errno is returned and the read position remains unchanged.
+ */
+static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ switch (whence) {
+ case SEEK_CUR:
+ offset += file->f_pos;
+ fallthrough;
+ case SEEK_SET:
+ if (offset >= 0)
+ break;
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
+
+ return vfs_setpos(file, offset, U32_MAX);
+}
+
+static struct dentry *offset_find_next(struct xa_state *xas)
+{
+ struct dentry *child, *found = NULL;
+
+ rcu_read_lock();
+ child = xas_next_entry(xas, U32_MAX);
+ if (!child)
+ goto out;
+ spin_lock(&child->d_lock);
+ if (simple_positive(child))
+ found = dget_dlock(child);
+ spin_unlock(&child->d_lock);
+out:
+ rcu_read_unlock();
+ return found;
+}
+
+static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
+{
+ u32 offset = dentry2offset(dentry);
+ struct inode *inode = d_inode(dentry);
+
+ return ctx->actor(ctx, dentry->d_name.name, dentry->d_name.len, offset,
+ inode->i_ino, fs_umode_to_dtype(inode->i_mode));
+}
+
+static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
+{
+ struct offset_ctx *so_ctx = inode->i_op->get_offset_ctx(inode);
+ XA_STATE(xas, &so_ctx->xa, ctx->pos);
+ struct dentry *dentry;
+
+ while (true) {
+ dentry = offset_find_next(&xas);
+ if (!dentry)
+ break;
+
+ if (!offset_dir_emit(ctx, dentry)) {
+ dput(dentry);
+ break;
+ }
+
+ dput(dentry);
+ ctx->pos = xas.xa_index + 1;
+ }
+}
+
+/**
+ * offset_readdir - Emit entries starting at offset @ctx->pos
+ * @file: an open directory to iterate over
+ * @ctx: directory iteration context
+ *
+ * Caller must hold @file's i_rwsem to prevent insertion or removal of
+ * entries during this call.
+ *
+ * On entry, @ctx->pos contains an offset that represents the first entry
+ * to be read from the directory.
+ *
+ * The operation continues until there are no more entries to read, or
+ * until the ctx->actor indicates there is no more space in the caller's
+ * output buffer.
+ *
+ * On return, @ctx->pos contains an offset that will read the next entry
+ * in this directory when offset_readdir() is called again with @ctx.
+ *
+ * Return values:
+ * %0 - Complete
+ */
+static int offset_readdir(struct file *file, struct dir_context *ctx)
+{
+ struct dentry *dir = file->f_path.dentry;
+
+ lockdep_assert_held(&d_inode(dir)->i_rwsem);
+
+ if (!dir_emit_dots(file, ctx))
+ return 0;
+
+ offset_iterate_dir(d_inode(dir), ctx);
+ return 0;
+}
+
+const struct file_operations simple_offset_dir_operations = {
+ .llseek = offset_dir_llseek,
+ .iterate_shared = offset_readdir,
+ .read = generic_read_dir,
+ .fsync = noop_fsync,
+};
+
static struct dentry *find_next_child(struct dentry *parent, struct dentry *prev)
{
struct dentry *child = NULL;
@@ -275,7 +523,7 @@ void simple_recursive_removal(struct dentry *dentry,
while ((child = find_next_child(this, victim)) == NULL) {
// kill and ascend
// update metadata while it's still locked
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
clear_nlink(inode);
inode_unlock(inode);
victim = this;
@@ -293,8 +541,7 @@ void simple_recursive_removal(struct dentry *dentry,
dput(victim); // unpin it
}
if (victim == dentry) {
- inode->i_ctime = inode->i_mtime =
- current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
if (d_is_dir(dentry))
drop_nlink(inode);
inode_unlock(inode);
@@ -335,7 +582,7 @@ static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
*/
root->i_ino = 1;
root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
- root->i_atime = root->i_mtime = root->i_ctime = current_time(root);
+ root->i_atime = root->i_mtime = inode_set_ctime_current(root);
s->s_root = d_make_root(root);
if (!s->s_root)
return -ENOMEM;
@@ -391,7 +638,8 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den
{
struct inode *inode = d_inode(old_dentry);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
+ dir->i_mtime = inode_set_ctime_to_ts(dir,
+ inode_set_ctime_current(inode));
inc_nlink(inode);
ihold(inode);
dget(dentry);
@@ -425,7 +673,8 @@ int simple_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
+ dir->i_mtime = inode_set_ctime_to_ts(dir,
+ inode_set_ctime_current(inode));
drop_nlink(inode);
dput(dentry);
return 0;
@@ -444,6 +693,31 @@ int simple_rmdir(struct inode *dir, struct dentry *dentry)
}
EXPORT_SYMBOL(simple_rmdir);
+/**
+ * simple_rename_timestamp - update the various inode timestamps for rename
+ * @old_dir: old parent directory
+ * @old_dentry: dentry that is being renamed
+ * @new_dir: new parent directory
+ * @new_dentry: target for rename
+ *
+ * POSIX mandates that the old and new parent directories have their ctime and
+ * mtime updated, and that inodes of @old_dentry and @new_dentry (if any), have
+ * their ctime updated.
+ */
+void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct inode *newino = d_inode(new_dentry);
+
+ old_dir->i_mtime = inode_set_ctime_current(old_dir);
+ if (new_dir != old_dir)
+ new_dir->i_mtime = inode_set_ctime_current(new_dir);
+ inode_set_ctime_current(d_inode(old_dentry));
+ if (newino)
+ inode_set_ctime_current(newino);
+}
+EXPORT_SYMBOL_GPL(simple_rename_timestamp);
+
int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
@@ -459,11 +733,7 @@ int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
inc_nlink(old_dir);
}
}
- old_dir->i_ctime = old_dir->i_mtime =
- new_dir->i_ctime = new_dir->i_mtime =
- d_inode(old_dentry)->i_ctime =
- d_inode(new_dentry)->i_ctime = current_time(old_dir);
-
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
return 0;
}
EXPORT_SYMBOL_GPL(simple_rename_exchange);
@@ -472,7 +742,6 @@ int simple_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *old_dentry, struct inode *new_dir,
struct dentry *new_dentry, unsigned int flags)
{
- struct inode *inode = d_inode(old_dentry);
int they_are_dirs = d_is_dir(old_dentry);
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
@@ -495,9 +764,7 @@ int simple_rename(struct mnt_idmap *idmap, struct inode *old_dir,
inc_nlink(new_dir);
}
- old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
- new_dir->i_mtime = inode->i_ctime = current_time(old_dir);
-
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
return 0;
}
EXPORT_SYMBOL(simple_rename);
@@ -548,21 +815,20 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len,
struct page **pagep, void **fsdata)
{
- struct page *page;
- pgoff_t index;
-
- index = pos >> PAGE_SHIFT;
+ struct folio *folio;
- page = grab_cache_page_write_begin(mapping, index);
- if (!page)
- return -ENOMEM;
+ folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- *pagep = page;
+ *pagep = &folio->page;
- if (!PageUptodate(page) && (len != PAGE_SIZE)) {
- unsigned from = pos & (PAGE_SIZE - 1);
+ if (!folio_test_uptodate(folio) && (len != folio_size(folio))) {
+ size_t from = offset_in_folio(folio, pos);
- zero_user_segments(page, 0, from, from + len, PAGE_SIZE);
+ folio_zero_segments(folio, 0, from,
+ from + len, folio_size(folio));
}
return 0;
}
@@ -594,17 +860,18 @@ static int simple_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
- struct inode *inode = page->mapping->host;
+ struct folio *folio = page_folio(page);
+ struct inode *inode = folio->mapping->host;
loff_t last_pos = pos + copied;
- /* zero the stale part of the page if we did a short copy */
- if (!PageUptodate(page)) {
+ /* zero the stale part of the folio if we did a short copy */
+ if (!folio_test_uptodate(folio)) {
if (copied < len) {
- unsigned from = pos & (PAGE_SIZE - 1);
+ size_t from = offset_in_folio(folio, pos);
- zero_user(page, from + copied, len - copied);
+ folio_zero_range(folio, from + copied, len - copied);
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
/*
* No need to use i_size_read() here, the i_size
@@ -613,9 +880,9 @@ static int simple_write_end(struct file *file, struct address_space *mapping,
if (last_pos > inode->i_size)
i_size_write(inode, last_pos);
- set_page_dirty(page);
- unlock_page(page);
- put_page(page);
+ folio_mark_dirty(folio);
+ folio_unlock(folio);
+ folio_put(folio);
return copied;
}
@@ -659,7 +926,7 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
*/
inode->i_ino = 1;
inode->i_mode = S_IFDIR | 0755;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
set_nlink(inode, 2);
@@ -685,7 +952,7 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
goto out;
}
inode->i_mode = S_IFREG | files->mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_fop = files->ops;
inode->i_ino = i;
d_add(dentry, inode);
@@ -1253,7 +1520,7 @@ struct inode *alloc_anon_inode(struct super_block *s)
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_flags |= S_PRIVATE;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
return inode;
}
EXPORT_SYMBOL(alloc_anon_inode);
@@ -1269,7 +1536,7 @@ EXPORT_SYMBOL(alloc_anon_inode);
* All arguments are ignored and it just returns -EINVAL.
*/
int
-simple_nosetlease(struct file *filp, long arg, struct file_lock **flp,
+simple_nosetlease(struct file *filp, int arg, struct file_lock **flp,
void **priv)
{
return -EINVAL;
@@ -1315,7 +1582,7 @@ static int empty_dir_getattr(struct mnt_idmap *idmap,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
return 0;
}
diff --git a/fs/locks.c b/fs/locks.c
index df8b26a42524..a45efc16945d 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -438,7 +438,7 @@ static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
fl->fl_end = OFFSET_MAX;
}
-static int assign_type(struct file_lock *fl, long type)
+static int assign_type(struct file_lock *fl, int type)
{
switch (type) {
case F_RDLCK:
@@ -549,7 +549,7 @@ static const struct lock_manager_operations lease_manager_ops = {
/*
* Initialize a lease, use the default lock manager operations
*/
-static int lease_init(struct file *filp, long type, struct file_lock *fl)
+static int lease_init(struct file *filp, int type, struct file_lock *fl)
{
if (assign_type(fl, type) != 0)
return -EINVAL;
@@ -567,7 +567,7 @@ static int lease_init(struct file *filp, long type, struct file_lock *fl)
}
/* Allocate a file_lock initialised to this type of lease */
-static struct file_lock *lease_alloc(struct file *filp, long type)
+static struct file_lock *lease_alloc(struct file *filp, int type)
{
struct file_lock *fl = locks_alloc_lock();
int error = -ENOMEM;
@@ -868,6 +868,21 @@ static bool posix_locks_conflict(struct file_lock *caller_fl,
return locks_conflict(caller_fl, sys_fl);
}
+/* Determine if lock sys_fl blocks lock caller_fl. Used on xx_GETLK
+ * path so checks for additional GETLK-specific things like F_UNLCK.
+ */
+static bool posix_test_locks_conflict(struct file_lock *caller_fl,
+ struct file_lock *sys_fl)
+{
+ /* F_UNLCK checks any locks on the same fd. */
+ if (caller_fl->fl_type == F_UNLCK) {
+ if (!posix_same_owner(caller_fl, sys_fl))
+ return false;
+ return locks_overlap(caller_fl, sys_fl);
+ }
+ return posix_locks_conflict(caller_fl, sys_fl);
+}
+
/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
* checking before calling the locks_conflict().
*/
@@ -901,7 +916,7 @@ posix_test_lock(struct file *filp, struct file_lock *fl)
retry:
spin_lock(&ctx->flc_lock);
list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
- if (!posix_locks_conflict(fl, cfl))
+ if (!posix_test_locks_conflict(fl, cfl))
continue;
if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable
&& (*cfl->fl_lmops->lm_lock_expirable)(cfl)) {
@@ -1301,6 +1316,7 @@ retry:
out:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
+ trace_posix_lock_inode(inode, request, error);
/*
* Free any unused locks.
*/
@@ -1309,7 +1325,6 @@ retry:
if (new_fl2)
locks_free_lock(new_fl2);
locks_dispose_list(&dispose);
- trace_posix_lock_inode(inode, request, error);
return error;
}
@@ -1666,7 +1681,7 @@ int fcntl_getlease(struct file *filp)
* conflict with the lease we're trying to set.
*/
static int
-check_conflicting_open(struct file *filp, const long arg, int flags)
+check_conflicting_open(struct file *filp, const int arg, int flags)
{
struct inode *inode = file_inode(filp);
int self_wcount = 0, self_rcount = 0;
@@ -1701,7 +1716,7 @@ check_conflicting_open(struct file *filp, const long arg, int flags)
}
static int
-generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
+generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **priv)
{
struct file_lock *fl, *my_fl = NULL, *lease;
struct inode *inode = file_inode(filp);
@@ -1859,7 +1874,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
* The (input) flp->fl_lmops->lm_break function is required
* by break_lease().
*/
-int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
+int generic_setlease(struct file *filp, int arg, struct file_lock **flp,
void **priv)
{
struct inode *inode = file_inode(filp);
@@ -1906,7 +1921,7 @@ lease_notifier_chain_init(void)
}
static inline void
-setlease_notifier(long arg, struct file_lock *lease)
+setlease_notifier(int arg, struct file_lock *lease)
{
if (arg != F_UNLCK)
srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
@@ -1942,7 +1957,7 @@ EXPORT_SYMBOL_GPL(lease_unregister_notifier);
* may be NULL if the lm_setup operation doesn't require it.
*/
int
-vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
+vfs_setlease(struct file *filp, int arg, struct file_lock **lease, void **priv)
{
if (lease)
setlease_notifier(arg, *lease);
@@ -1953,7 +1968,7 @@ vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
}
EXPORT_SYMBOL_GPL(vfs_setlease);
-static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
+static int do_fcntl_add_lease(unsigned int fd, struct file *filp, int arg)
{
struct file_lock *fl;
struct fasync_struct *new;
@@ -1988,7 +2003,7 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
* Note that you also need to call %F_SETSIG to
* receive a signal when the lease is broken.
*/
-int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
+int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
{
if (arg == F_UNLCK)
return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp);
@@ -2136,7 +2151,7 @@ EXPORT_SYMBOL_GPL(vfs_test_lock);
* @fl: The file_lock who's fl_pid should be translated
* @ns: The namespace into which the pid should be translated
*
- * Used to tranlate a fl_pid into a namespace virtual pid number
+ * Used to translate a fl_pid into a namespace virtual pid number
*/
static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
{
@@ -2207,7 +2222,8 @@ int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
if (fl == NULL)
return -ENOMEM;
error = -EINVAL;
- if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
+ if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
+ && flock->l_type != F_WRLCK)
goto out;
error = flock_to_posix_lock(filp, fl, flock);
@@ -2414,7 +2430,8 @@ int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock)
return -ENOMEM;
error = -EINVAL;
- if (flock->l_type != F_RDLCK && flock->l_type != F_WRLCK)
+ if (cmd != F_OFD_GETLK && flock->l_type != F_RDLCK
+ && flock->l_type != F_WRLCK)
goto out;
error = flock64_to_posix_lock(filp, fl, flock);
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 870207ba23f1..25c08fbfcb9d 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -251,7 +251,7 @@ struct inode *minix_new_inode(const struct inode *dir, umode_t mode)
}
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_ino = j;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_blocks = 0;
memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u));
insert_inode_hash(inode);
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index bf9858f76b6a..20f23e6e58ad 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -281,7 +281,7 @@ got_it:
de->inode = inode->i_ino;
}
dir_commit_chunk(page, pos, sbi->s_dirsize);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
err = minix_handle_dirsync(dir);
out_put:
@@ -313,7 +313,7 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
else
de->inode = 0;
dir_commit_chunk(page, pos, len);
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return minix_handle_dirsync(inode);
}
@@ -436,7 +436,7 @@ int minix_set_link(struct minix_dir_entry *de, struct page *page,
else
de->inode = inode->i_ino;
dir_commit_chunk(page, pos, sbi->s_dirsize);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
return minix_handle_dirsync(dir);
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index e9fbb5303a22..df575473c1cc 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -501,10 +501,7 @@ static struct inode *V1_minix_iget(struct inode *inode)
i_gid_write(inode, raw_inode->i_gid);
set_nlink(inode, raw_inode->i_nlinks);
inode->i_size = raw_inode->i_size;
- inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = raw_inode->i_time;
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
+ inode->i_mtime = inode->i_atime = inode_set_ctime(inode, raw_inode->i_time, 0);
inode->i_blocks = 0;
for (i = 0; i < 9; i++)
minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
@@ -543,10 +540,9 @@ static struct inode *V2_minix_iget(struct inode *inode)
inode->i_size = raw_inode->i_size;
inode->i_mtime.tv_sec = raw_inode->i_mtime;
inode->i_atime.tv_sec = raw_inode->i_atime;
- inode->i_ctime.tv_sec = raw_inode->i_ctime;
+ inode_set_ctime(inode, raw_inode->i_ctime, 0);
inode->i_mtime.tv_nsec = 0;
inode->i_atime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
inode->i_blocks = 0;
for (i = 0; i < 10; i++)
minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
@@ -622,7 +618,7 @@ static struct buffer_head * V2_minix_update_inode(struct inode * inode)
raw_inode->i_size = inode->i_size;
raw_inode->i_mtime = inode->i_mtime.tv_sec;
raw_inode->i_atime = inode->i_atime.tv_sec;
- raw_inode->i_ctime = inode->i_ctime.tv_sec;
+ raw_inode->i_ctime = inode_get_ctime(inode).tv_sec;
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
else for (i = 0; i < 10; i++)
@@ -660,7 +656,7 @@ int minix_getattr(struct mnt_idmap *idmap, const struct path *path,
struct super_block *sb = path->dentry->d_sb;
struct inode *inode = d_inode(path->dentry);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (INODE_VERSION(inode) == MINIX_V1)
stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb);
else
diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c
index 446148792f41..ce18ae37c29d 100644
--- a/fs/minix/itree_common.c
+++ b/fs/minix/itree_common.c
@@ -131,7 +131,7 @@ static inline int splice_branch(struct inode *inode,
/* We are done with atomic stuff, now do the rest of housekeeping */
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
/* had we spliced it onto indirect block? */
if (where->bh)
@@ -350,7 +350,7 @@ do_indirects:
}
first_whole++;
}
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
}
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 956d5183828d..114084d5636a 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -98,7 +98,7 @@ static int minix_link(struct dentry * old_dentry, struct inode * dir,
{
struct inode *inode = d_inode(old_dentry);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode_inc_link_count(inode);
ihold(inode);
return add_nondir(dentry, inode);
@@ -154,7 +154,7 @@ static int minix_unlink(struct inode * dir, struct dentry *dentry)
if (err)
return err;
- inode->i_ctime = dir->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode);
return 0;
}
@@ -218,7 +218,7 @@ static int minix_rename(struct mnt_idmap *idmap,
put_page(new_page);
if (err)
goto out_dir;
- new_inode->i_ctime = current_time(new_inode);
+ inode_set_ctime_current(new_inode);
if (dir_de)
drop_nlink(new_inode);
inode_dec_link_count(new_inode);
diff --git a/fs/namei.c b/fs/namei.c
index e56ff39a79bc..567ee547492b 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -643,6 +643,8 @@ static bool nd_alloc_stack(struct nameidata *nd)
/**
* path_connected - Verify that a dentry is below mnt.mnt_root
+ * @mnt: The mountpoint to check.
+ * @dentry: The dentry to check.
*
* Rename can sometimes move a file or directory outside of a bind
* mount, path_connected allows those cases to be detected.
@@ -1083,6 +1085,7 @@ fs_initcall(init_fs_namei_sysctls);
/**
* may_follow_link - Check symlink following for unsafe situations
* @nd: nameidata pathwalk data
+ * @inode: Used for idmapping.
*
* In the case of the sysctl_protected_symlinks sysctl being enabled,
* CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
@@ -2890,7 +2893,7 @@ int path_pts(struct path *path)
dput(path->dentry);
path->dentry = parent;
child = d_hash_and_lookup(parent, &this);
- if (!child)
+ if (IS_ERR_OR_NULL(child))
return -ENOENT;
path->dentry = child;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index c1eda73254e1..6bed1394d748 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -59,7 +59,7 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
res->change_attr = delegation->change_attr;
if (nfs_have_writebacks(inode))
res->change_attr++;
- res->ctime = inode->i_ctime;
+ res->ctime = inode_get_ctime(inode);
res->mtime = inode->i_mtime;
res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
args->bitmap[0];
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index e1706e736c64..2dc64454492b 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -116,8 +116,8 @@ static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *
memset(auxdata, 0, sizeof(*auxdata));
auxdata->mtime_sec = inode->i_mtime.tv_sec;
auxdata->mtime_nsec = inode->i_mtime.tv_nsec;
- auxdata->ctime_sec = inode->i_ctime.tv_sec;
- auxdata->ctime_nsec = inode->i_ctime.tv_nsec;
+ auxdata->ctime_sec = inode_get_ctime(inode).tv_sec;
+ auxdata->ctime_nsec = inode_get_ctime(inode).tv_nsec;
if (NFS_SERVER(inode)->nfs_client->rpc_ops->version == 4)
auxdata->change_attr = inode_peek_iversion_raw(inode);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 8172dd4135a1..e21c073158e5 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -514,7 +514,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
memset(&inode->i_atime, 0, sizeof(inode->i_atime));
memset(&inode->i_mtime, 0, sizeof(inode->i_mtime));
- memset(&inode->i_ctime, 0, sizeof(inode->i_ctime));
+ inode_set_ctime(inode, 0, 0);
inode_set_iversion_raw(inode, 0);
inode->i_size = 0;
clear_nlink(inode);
@@ -535,7 +535,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
else if (fattr_supported & NFS_ATTR_FATTR_MTIME)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
- inode->i_ctime = fattr->ctime;
+ inode_set_ctime_to_ts(inode, fattr->ctime);
else if (fattr_supported & NFS_ATTR_FATTR_CTIME)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CTIME);
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
@@ -731,7 +731,7 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
if ((attr->ia_valid & ATTR_GID) != 0)
inode->i_gid = attr->ia_gid;
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
- inode->i_ctime = fattr->ctime;
+ inode_set_ctime_to_ts(inode, fattr->ctime);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
| NFS_INO_INVALID_CTIME);
@@ -749,7 +749,7 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
- inode->i_ctime = fattr->ctime;
+ inode_set_ctime_to_ts(inode, fattr->ctime);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
| NFS_INO_INVALID_CTIME);
@@ -765,7 +765,7 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
- inode->i_ctime = fattr->ctime;
+ inode_set_ctime_to_ts(inode, fattr->ctime);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
| NFS_INO_INVALID_CTIME);
@@ -912,7 +912,7 @@ out_no_revalidate:
/* Only return attributes that were revalidated. */
stat->result_mask = nfs_get_valid_attrmask(inode) | request_mask;
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
stat->change_cookie = inode_peek_iversion_raw(inode);
stat->attributes_mask |= STATX_ATTR_CHANGE_MONOTONIC;
@@ -1444,11 +1444,11 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR);
}
/* If we have atomic WCC data, we may update some attributes */
- ts = inode->i_ctime;
+ ts = inode_get_ctime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME)
&& (fattr->valid & NFS_ATTR_FATTR_CTIME)
&& timespec64_equal(&ts, &fattr->pre_ctime)) {
- inode->i_ctime = fattr->ctime;
+ inode_set_ctime_to_ts(inode, fattr->ctime);
}
ts = inode->i_mtime;
@@ -1510,7 +1510,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec64_equal(&ts, &fattr->mtime))
invalid |= NFS_INO_INVALID_MTIME;
- ts = inode->i_ctime;
+ ts = inode_get_ctime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_CTIME) && !timespec64_equal(&ts, &fattr->ctime))
invalid |= NFS_INO_INVALID_CTIME;
@@ -1997,7 +1997,7 @@ int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fa
}
if ((fattr->valid & NFS_ATTR_FATTR_CTIME) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PRECTIME) == 0) {
- fattr->pre_ctime = inode->i_ctime;
+ fattr->pre_ctime = inode_get_ctime(inode);
fattr->valid |= NFS_ATTR_FATTR_PRECTIME;
}
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) != 0 &&
@@ -2190,7 +2190,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
save_cache_validity & NFS_INO_INVALID_MTIME;
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
- inode->i_ctime = fattr->ctime;
+ inode_set_ctime_to_ts(inode, fattr->ctime);
else if (fattr_supported & NFS_ATTR_FATTR_CTIME)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_CTIME;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 19d51ebf842c..e7494cdd957e 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -215,7 +215,8 @@ nfs_namespace_getattr(struct mnt_idmap *idmap,
if (NFS_FH(d_inode(path->dentry))->size != 0)
return nfs_getattr(idmap, path, stat, request_mask,
query_flags);
- generic_fillattr(&nop_mnt_idmap, d_inode(path->dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(path->dentry),
+ stat);
return 0;
}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 4c9f8bd866ab..47c5c1f86d66 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -328,7 +328,7 @@ extern int update_open_stateid(struct nfs4_state *state,
const nfs4_stateid *open_stateid,
const nfs4_stateid *deleg_stateid,
fmode_t fmode);
-extern int nfs4_proc_setlease(struct file *file, long arg,
+extern int nfs4_proc_setlease(struct file *file, int arg,
struct file_lock **lease, void **priv);
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
struct nfs_fsinfo *fsinfo);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 4aeadd6e1a6d..02788c3c85e5 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -438,7 +438,7 @@ void nfs42_ssc_unregister_ops(void)
}
#endif /* CONFIG_NFS_V4_2 */
-static int nfs4_setlease(struct file *file, long arg, struct file_lock **lease,
+static int nfs4_setlease(struct file *file, int arg, struct file_lock **lease,
void **priv)
{
return nfs4_proc_setlease(file, arg, lease, priv);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 832fa226b8f2..d57aaf0cc577 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -7579,7 +7579,7 @@ static int nfs4_delete_lease(struct file *file, void **priv)
return generic_setlease(file, F_UNLCK, NULL, priv);
}
-static int nfs4_add_lease(struct file *file, long arg, struct file_lock **lease,
+static int nfs4_add_lease(struct file *file, int arg, struct file_lock **lease,
void **priv)
{
struct inode *inode = file_inode(file);
@@ -7597,7 +7597,7 @@ static int nfs4_add_lease(struct file *file, long arg, struct file_lock **lease,
return -EAGAIN;
}
-int nfs4_proc_setlease(struct file *file, long arg, struct file_lock **lease,
+int nfs4_proc_setlease(struct file *file, int arg, struct file_lock **lease,
void **priv)
{
switch (arg) {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3aefbad4cc09..daf305daa751 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1354,9 +1354,9 @@ static void revoke_delegation(struct nfs4_delegation *dp)
trace_nfsd_stid_revoke(&dp->dl_stid);
if (clp->cl_minorversion) {
+ spin_lock(&clp->cl_lock);
dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
refcount_inc(&dp->dl_stid.sc_count);
- spin_lock(&clp->cl_lock);
list_add(&dp->dl_recall_lru, &clp->cl_revoked);
spin_unlock(&clp->cl_lock);
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 1b8b1aab9a15..3709830f90a6 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1105,6 +1105,7 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
if (!nn->nfsd_serv)
return -EBUSY;
trace_nfsd_end_grace(netns(file));
+ nfsd4_end_grace(nn);
break;
default:
return -EINVAL;
@@ -1131,7 +1132,7 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
/* Following advice from simple_fill_super documentation: */
inode->i_ino = iunique(sb, NFSD_MaxReserved);
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
switch (mode & S_IFMT) {
case S_IFDIR:
inode->i_fop = &simple_dir_operations;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 2c9074ab2315..9b7acba382fe 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -520,7 +520,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
nfsd_sanitize_attrs(inode, iap);
- if (check_guard && guardtime != inode->i_ctime.tv_sec)
+ if (check_guard && guardtime != inode_get_ctime(inode).tv_sec)
return nfserr_notsync;
/*
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index decd6471300b..bce734b68f08 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -429,7 +429,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
nilfs_set_de_type(de, inode);
nilfs_commit_chunk(page, mapping, from, to);
nilfs_put_page(page);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
}
/*
@@ -519,7 +519,7 @@ got_it:
de->inode = cpu_to_le64(inode->i_ino);
nilfs_set_de_type(de, inode);
nilfs_commit_chunk(page, page->mapping, from, to);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
nilfs_mark_inode_dirty(dir);
/* OFFSET_CACHE */
out_put:
@@ -567,7 +567,7 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
pde->rec_len = nilfs_rec_len_to_disk(to - from);
dir->inode = 0;
nilfs_commit_chunk(page, mapping, from, to);
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
out:
nilfs_put_page(page);
return err;
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 35bc79305318..d588c719d743 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -366,7 +366,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
atomic64_inc(&root->inodes_count);
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_ino = ino;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
err = nilfs_bmap_read(ii->i_bmap, NULL);
@@ -450,10 +450,10 @@ int nilfs_read_inode_common(struct inode *inode,
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
inode->i_size = le64_to_cpu(raw_inode->i_size);
inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
- inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
+ inode_set_ctime(inode, le64_to_cpu(raw_inode->i_ctime),
+ le32_to_cpu(raw_inode->i_ctime_nsec));
inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
- inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode))
return -EIO; /* this inode is for metadata and corrupted */
@@ -768,9 +768,9 @@ void nilfs_write_inode_common(struct inode *inode,
raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
raw_inode->i_size = cpu_to_le64(inode->i_size);
- raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+ raw_inode->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
- raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ raw_inode->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
@@ -875,7 +875,7 @@ void nilfs_truncate(struct inode *inode)
nilfs_truncate_bmap(ii, blkoff);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
if (IS_SYNC(inode))
nilfs_set_transaction_flag(NILFS_TI_SYNC);
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 1dfbc0c34513..40ffade49f38 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -149,7 +149,7 @@ int nilfs_fileattr_set(struct mnt_idmap *idmap,
NILFS_I(inode)->i_flags = oldflags | (flags & FS_FL_USER_MODIFIABLE);
nilfs_set_inode_flags(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
if (IS_SYNC(inode))
nilfs_set_transaction_flag(NILFS_TI_SYNC);
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index c7024da8f1e2..2a4e7f4a8102 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -185,7 +185,7 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir,
if (err)
return err;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode_inc_link_count(inode);
ihold(inode);
@@ -283,7 +283,7 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
if (err)
goto out;
- inode->i_ctime = dir->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
drop_nlink(inode);
err = 0;
out:
@@ -387,7 +387,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
goto out_dir;
nilfs_set_link(new_dir, new_de, new_page, old_inode);
nilfs_mark_inode_dirty(new_dir);
- new_inode->i_ctime = current_time(new_inode);
+ inode_set_ctime_current(new_inode);
if (dir_de)
drop_nlink(new_inode);
drop_nlink(new_inode);
@@ -406,7 +406,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- old_inode->i_ctime = current_time(old_inode);
+ inode_set_ctime_current(old_inode);
nilfs_delete_entry(old_de, old_page);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 581691e4be49..7ec16879756e 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -725,6 +725,11 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
struct folio *folio = fbatch.folios[i];
folio_lock(folio);
+ if (unlikely(folio->mapping != mapping)) {
+ /* Exclude folios removed from the address space */
+ folio_unlock(folio);
+ continue;
+ }
head = folio_buffers(folio);
if (!head) {
create_empty_buffers(&folio->page, i_blocksize(inode), 0);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 0ef8c71bde8e..a5d1fa4e7552 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -35,6 +35,7 @@
#include <linux/writeback.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
#include "nilfs.h"
#include "export.h"
#include "mdt.h"
@@ -1216,7 +1217,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
}
struct nilfs_super_data {
- struct block_device *bdev;
__u64 cno;
int flags;
};
@@ -1283,64 +1283,49 @@ static int nilfs_identify(char *data, struct nilfs_super_data *sd)
static int nilfs_set_bdev_super(struct super_block *s, void *data)
{
- s->s_bdev = data;
- s->s_dev = s->s_bdev->bd_dev;
+ s->s_dev = *(dev_t *)data;
return 0;
}
static int nilfs_test_bdev_super(struct super_block *s, void *data)
{
- return (void *)s->s_bdev == data;
+ return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
}
static struct dentry *
nilfs_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
- struct nilfs_super_data sd;
+ struct nilfs_super_data sd = { .flags = flags };
struct super_block *s;
- struct dentry *root_dentry;
- int err, s_new = false;
+ dev_t dev;
+ int err;
- sd.bdev = blkdev_get_by_path(dev_name, sb_open_mode(flags), fs_type,
- NULL);
- if (IS_ERR(sd.bdev))
- return ERR_CAST(sd.bdev);
+ if (nilfs_identify(data, &sd))
+ return ERR_PTR(-EINVAL);
- sd.cno = 0;
- sd.flags = flags;
- if (nilfs_identify((char *)data, &sd)) {
- err = -EINVAL;
- goto failed;
- }
+ err = lookup_bdev(dev_name, &dev);
+ if (err)
+ return ERR_PTR(err);
- /*
- * once the super is inserted into the list by sget, s_umount
- * will protect the lockfs code from trying to start a snapshot
- * while we are mounting
- */
- mutex_lock(&sd.bdev->bd_fsfreeze_mutex);
- if (sd.bdev->bd_fsfreeze_count > 0) {
- mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
- err = -EBUSY;
- goto failed;
- }
s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, flags,
- sd.bdev);
- mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
- if (IS_ERR(s)) {
- err = PTR_ERR(s);
- goto failed;
- }
+ &dev);
+ if (IS_ERR(s))
+ return ERR_CAST(s);
if (!s->s_root) {
- s_new = true;
-
- /* New superblock instance created */
- snprintf(s->s_id, sizeof(s->s_id), "%pg", sd.bdev);
- sb_set_blocksize(s, block_size(sd.bdev));
-
- err = nilfs_fill_super(s, data, flags & SB_SILENT ? 1 : 0);
+ /*
+ * We drop s_umount here because we need to open the bdev and
+ * bdev->open_mutex ranks above s_umount (blkdev_put() ->
+ * __invalidate_device()). It is safe because we have active sb
+ * reference and SB_BORN is not set yet.
+ */
+ up_write(&s->s_umount);
+ err = setup_bdev_super(s, flags, NULL);
+ down_write(&s->s_umount);
+ if (!err)
+ err = nilfs_fill_super(s, data,
+ flags & SB_SILENT ? 1 : 0);
if (err)
goto failed_super;
@@ -1366,24 +1351,18 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
}
if (sd.cno) {
+ struct dentry *root_dentry;
+
err = nilfs_attach_snapshot(s, sd.cno, &root_dentry);
if (err)
goto failed_super;
- } else {
- root_dentry = dget(s->s_root);
+ return root_dentry;
}
- if (!s_new)
- blkdev_put(sd.bdev, fs_type);
-
- return root_dentry;
+ return dget(s->s_root);
failed_super:
deactivate_locked_super(s);
-
- failed:
- if (!s_new)
- blkdev_put(sd.bdev, fs_type);
return ERR_PTR(err);
}
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 190aa717fa32..ebdcc25df0f7 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -199,7 +199,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
}
/* this conversion is done only at watch creation */
-static __u32 convert_arg(unsigned long arg)
+static __u32 convert_arg(unsigned int arg)
{
__u32 new_mask = FS_EVENT_ON_CHILD;
@@ -258,7 +258,7 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark,
* up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be
* attached to the fsnotify_mark.
*/
-int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
+int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
{
struct dnotify_mark *new_dn_mark, *dn_mark;
struct fsnotify_mark *new_fsn_mark, *fsn_mark;
diff --git a/fs/nsfs.c b/fs/nsfs.c
index f602a96a1afe..647a22433bd8 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -84,7 +84,7 @@ slow:
return -ENOMEM;
}
inode->i_ino = ns->inum;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_flags |= S_IMMUTABLE;
inode->i_mode = S_IFREG | S_IRUGO;
inode->i_fop = &ns_file_operations;
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 6c3f38d66579..99ac6ea277c4 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -654,7 +654,7 @@ static int ntfs_read_locked_inode(struct inode *vi)
* always changes, when mtime is changed. ctime can be changed on its
* own, mtime is then not changed, e.g. when a file is renamed.
*/
- vi->i_ctime = ntfs2utc(si->last_mft_change_time);
+ inode_set_ctime_to_ts(vi, ntfs2utc(si->last_mft_change_time));
/*
* Last access to the data within the file. Not changed during a rename
* for example but changed whenever the file is written to.
@@ -1218,7 +1218,7 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
vi->i_mtime = base_vi->i_mtime;
- vi->i_ctime = base_vi->i_ctime;
+ inode_set_ctime_to_ts(vi, inode_get_ctime(base_vi));
vi->i_atime = base_vi->i_atime;
vi->i_generation = ni->seq_no = base_ni->seq_no;
@@ -1484,7 +1484,7 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
vi->i_mtime = base_vi->i_mtime;
- vi->i_ctime = base_vi->i_ctime;
+ inode_set_ctime_to_ts(vi, inode_get_ctime(base_vi));
vi->i_atime = base_vi->i_atime;
vi->i_generation = ni->seq_no = base_ni->seq_no;
/* Set inode type to zero but preserve permissions. */
@@ -2804,13 +2804,14 @@ done:
*/
if (!IS_NOCMTIME(VFS_I(base_ni)) && !IS_RDONLY(VFS_I(base_ni))) {
struct timespec64 now = current_time(VFS_I(base_ni));
+ struct timespec64 ctime = inode_get_ctime(VFS_I(base_ni));
int sync_it = 0;
if (!timespec64_equal(&VFS_I(base_ni)->i_mtime, &now) ||
- !timespec64_equal(&VFS_I(base_ni)->i_ctime, &now))
+ !timespec64_equal(&ctime, &now))
sync_it = 1;
+ inode_set_ctime_to_ts(VFS_I(base_ni), now);
VFS_I(base_ni)->i_mtime = now;
- VFS_I(base_ni)->i_ctime = now;
if (sync_it)
mark_inode_dirty_sync(VFS_I(base_ni));
@@ -2928,7 +2929,7 @@ int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (ia_valid & ATTR_MTIME)
vi->i_mtime = attr->ia_mtime;
if (ia_valid & ATTR_CTIME)
- vi->i_ctime = attr->ia_ctime;
+ inode_set_ctime_to_ts(vi, attr->ia_ctime);
mark_inode_dirty(vi);
out:
return err;
@@ -3004,7 +3005,7 @@ int __ntfs_write_inode(struct inode *vi, int sync)
si->last_data_change_time = nt;
modified = true;
}
- nt = utc2ntfs(vi->i_ctime);
+ nt = utc2ntfs(inode_get_ctime(vi));
if (si->last_mft_change_time != nt) {
ntfs_debug("Updating ctime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino, (long long)
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 0155f106ec34..ad1a8f72da22 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -2682,8 +2682,7 @@ mft_rec_already_initialized:
vi->i_mode &= ~S_IWUGO;
/* Set the inode times to the current time. */
- vi->i_atime = vi->i_mtime = vi->i_ctime =
- current_time(vi);
+ vi->i_atime = vi->i_mtime = inode_set_ctime_current(vi);
/*
* Set the file size to 0, the ntfs inode sizes are set to 0 by
* the call to ntfs_init_big_inode() below.
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 1d6c824246c4..962f12ce6c0a 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -85,7 +85,7 @@ int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED;
- generic_fillattr(idmap, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
stat->result_mask |= STATX_BTIME;
stat->btime = ni->i_crtime;
@@ -342,7 +342,7 @@ static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
err = 0;
}
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
if (IS_SYNC(inode)) {
@@ -400,7 +400,7 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
ni_unlock(ni);
ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
if (!IS_DIRSYNC(inode)) {
dirty = 1;
} else {
@@ -642,7 +642,7 @@ out:
filemap_invalidate_unlock(mapping);
if (!err) {
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
}
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 16bd9faa2d28..2b85cb10f0be 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -3265,6 +3265,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
if (is_rec_inuse(ni->mi.mrec) &&
!(sbi->flags & NTFS_FLAGS_LOG_REPLAYING) && inode->i_nlink) {
bool modified = false;
+ struct timespec64 ctime = inode_get_ctime(inode);
/* Update times in standard attribute. */
std = ni_std(ni);
@@ -3280,7 +3281,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
modified = true;
}
- dup.c_time = kernel2nt(&inode->i_ctime);
+ dup.c_time = kernel2nt(&ctime);
if (std->c_time != dup.c_time) {
std->c_time = dup.c_time;
modified = true;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index dc7e7ab701c6..4123e126c4d0 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -44,6 +44,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
u64 t64;
struct MFT_REC *rec;
struct runs_tree *run;
+ struct timespec64 ctime;
inode->i_op = NULL;
/* Setup 'uid' and 'gid' */
@@ -169,7 +170,8 @@ next_attr:
nt2kernel(std5->cr_time, &ni->i_crtime);
#endif
nt2kernel(std5->a_time, &inode->i_atime);
- nt2kernel(std5->c_time, &inode->i_ctime);
+ ctime = inode_get_ctime(inode);
+ nt2kernel(std5->c_time, &ctime);
nt2kernel(std5->m_time, &inode->i_mtime);
ni->std_fa = std5->fa;
@@ -958,7 +960,7 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
if (err >= 0) {
if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
dirty = true;
}
@@ -1658,8 +1660,8 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
d_instantiate(dentry, inode);
/* Set original time. inode times (i_ctime) may be changed in ntfs_init_acl. */
- inode->i_atime = inode->i_mtime = inode->i_ctime = dir->i_mtime =
- dir->i_ctime = ni->i_crtime;
+ inode->i_atime = inode->i_mtime = inode_set_ctime_to_ts(inode, ni->i_crtime);
+ dir->i_mtime = inode_set_ctime_to_ts(dir, ni->i_crtime);
mark_inode_dirty(dir);
mark_inode_dirty(inode);
@@ -1765,9 +1767,9 @@ int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
if (!err) {
drop_nlink(inode);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
- inode->i_ctime = dir->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
if (inode->i_nlink)
mark_inode_dirty(inode);
} else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
index 70f8c859e0ad..ad430d50bd79 100644
--- a/fs/ntfs3/namei.c
+++ b/fs/ntfs3/namei.c
@@ -156,8 +156,8 @@ static int ntfs_link(struct dentry *ode, struct inode *dir, struct dentry *de)
err = ntfs_link_inode(inode, de);
if (!err) {
- dir->i_ctime = dir->i_mtime = inode->i_ctime =
- current_time(dir);
+ dir->i_mtime = inode_set_ctime_to_ts(inode,
+ inode_set_ctime_current(dir));
mark_inode_dirty(inode);
mark_inode_dirty(dir);
d_instantiate(de, inode);
@@ -324,14 +324,11 @@ static int ntfs_rename(struct mnt_idmap *idmap, struct inode *dir,
/* Restore after failed rename failed too. */
_ntfs_bad_inode(inode);
} else if (!err) {
- inode->i_ctime = dir->i_ctime = dir->i_mtime =
- current_time(dir);
+ simple_rename_timestamp(dir, dentry, new_dir, new_dentry);
mark_inode_dirty(inode);
mark_inode_dirty(dir);
- if (dir != new_dir) {
- new_dir->i_mtime = new_dir->i_ctime = dir->i_ctime;
+ if (dir != new_dir)
mark_inode_dirty(new_dir);
- }
if (IS_DIRSYNC(dir))
ntfs_sync_inode(dir);
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 1a02072b6b0e..5fffddea554f 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -569,9 +569,9 @@ static void init_once(void *foo)
}
/*
- * put_ntfs - Noinline to reduce binary size.
+ * Noinline to reduce binary size.
*/
-static noinline void put_ntfs(struct ntfs_sb_info *sbi)
+static noinline void ntfs3_free_sbi(struct ntfs_sb_info *sbi)
{
kfree(sbi->new_rec);
kvfree(ntfs_put_shared(sbi->upcase));
@@ -625,12 +625,6 @@ static void ntfs_put_super(struct super_block *sb)
/* Mark rw ntfs as clear, if possible. */
ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
-
- put_mount_options(sbi->options);
- put_ntfs(sbi);
- sb->s_fs_info = NULL;
-
- sync_blockdev(sb->s_bdev);
}
static int ntfs_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -1564,15 +1558,7 @@ load_root:
put_inode_out:
iput(inode);
out:
- /*
- * Free resources here.
- * ntfs_fs_free will be called with fc->s_fs_info = NULL
- */
- put_mount_options(sbi->options);
- put_ntfs(sbi);
- sb->s_fs_info = NULL;
kfree(boot2);
-
return err;
}
@@ -1659,7 +1645,7 @@ static void ntfs_fs_free(struct fs_context *fc)
struct ntfs_sb_info *sbi = fc->s_fs_info;
if (sbi)
- put_ntfs(sbi);
+ ntfs3_free_sbi(sbi);
if (opts)
put_mount_options(opts);
@@ -1728,13 +1714,24 @@ free_opts:
return -ENOMEM;
}
+static void ntfs3_kill_sb(struct super_block *sb)
+{
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ kill_block_super(sb);
+
+ if (sbi->options)
+ put_mount_options(sbi->options);
+ ntfs3_free_sbi(sbi);
+}
+
// clang-format off
static struct file_system_type ntfs_fs_type = {
.owner = THIS_MODULE,
.name = "ntfs3",
.init_fs_context = ntfs_init_fs_context,
.parameters = ntfs_fs_parameters,
- .kill_sb = kill_block_super,
+ .kill_sb = ntfs3_kill_sb,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
// clang-format on
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
index 023f314e8950..29fd391899e5 100644
--- a/fs/ntfs3/xattr.c
+++ b/fs/ntfs3/xattr.c
@@ -637,7 +637,7 @@ static noinline int ntfs_set_acl_ex(struct mnt_idmap *idmap,
if (!err) {
set_cached_acl(inode, type, acl);
inode->i_mode = mode;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
}
@@ -924,7 +924,7 @@ set_new_fa:
NULL);
out:
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return err;
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 9fd03eaf15f8..e75137a8e7cb 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -191,10 +191,10 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
}
inode->i_mode = new_mode;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
di->i_mode = cpu_to_le16(inode->i_mode);
- di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, di_bh);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 51c93929a146..aef58f1395c8 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7436,10 +7436,10 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
}
inode->i_blocks = ocfs2_inode_sector_count(inode);
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
- di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
- di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_update_inode_fsync_trans(handle, inode, 1);
ocfs2_journal_dirty(handle, di_bh);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 8dfc284e85f0..0fdba30740ab 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2048,7 +2048,7 @@ out_write_size:
}
inode->i_blocks = ocfs2_inode_sector_count(inode);
di->i_size = cpu_to_le64((u64)i_size_read(inode));
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
if (handle)
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 694471fc46b8..8b123d543e6e 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1658,7 +1658,7 @@ int __ocfs2_add_entry(handle_t *handle,
offset, ocfs2_dir_trailer_blk_off(dir->i_sb));
if (ocfs2_dirent_would_fit(de, rec_len)) {
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
if (retval < 0) {
mlog_errno(retval);
@@ -2962,11 +2962,11 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
ocfs2_dinode_new_extent_list(dir, di);
i_size_write(dir, sb->s_blocksize);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
di->i_size = cpu_to_le64(sb->s_blocksize);
- di->i_ctime = di->i_mtime = cpu_to_le64(dir->i_ctime.tv_sec);
- di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(dir->i_ctime.tv_nsec);
+ di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(dir).tv_sec);
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(dir).tv_nsec);
ocfs2_update_inode_fsync_trans(handle, dir, 1);
/*
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index ba26c5567cff..81265123ce6c 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -337,7 +337,7 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
if (inode) {
inode->i_ino = get_next_ino();
inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inc_nlink(inode);
inode->i_fop = &simple_dir_operations;
@@ -360,7 +360,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
inode->i_ino = get_next_ino();
inode_init_owner(&nop_mnt_idmap, inode, parent, mode);
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
ip = DLMFS_I(inode);
ip->ip_conn = DLMFS_I(parent)->ip_conn;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index c28bc983a7b1..c3e2961ee5db 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2162,6 +2162,7 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
+ struct timespec64 ctime = inode_get_ctime(inode);
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
@@ -2185,7 +2186,7 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
lvb->lvb_iatime_packed =
cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
lvb->lvb_ictime_packed =
- cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
+ cpu_to_be64(ocfs2_pack_timespec(&ctime));
lvb->lvb_imtime_packed =
cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
@@ -2208,6 +2209,7 @@ static int ocfs2_refresh_inode_from_lvb(struct inode *inode)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
+ struct timespec64 ctime;
mlog_meta_lvb(0, lockres);
@@ -2238,8 +2240,9 @@ static int ocfs2_refresh_inode_from_lvb(struct inode *inode)
be64_to_cpu(lvb->lvb_iatime_packed));
ocfs2_unpack_timespec(&inode->i_mtime,
be64_to_cpu(lvb->lvb_imtime_packed));
- ocfs2_unpack_timespec(&inode->i_ctime,
+ ocfs2_unpack_timespec(&ctime,
be64_to_cpu(lvb->lvb_ictime_packed));
+ inode_set_ctime_to_ts(inode, ctime);
spin_unlock(&oi->ip_lock);
return 0;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index bf2c17ea96a0..3b91b4cc7c6a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -232,8 +232,10 @@ int ocfs2_should_update_atime(struct inode *inode,
return 0;
if (vfsmnt->mnt_flags & MNT_RELATIME) {
+ struct timespec64 ctime = inode_get_ctime(inode);
+
if ((timespec64_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
- (timespec64_compare(&inode->i_atime, &inode->i_ctime) <= 0))
+ (timespec64_compare(&inode->i_atime, &ctime) <= 0))
return 1;
return 0;
@@ -294,7 +296,7 @@ int ocfs2_set_inode_size(handle_t *handle,
i_size_write(inode, new_i_size);
inode->i_blocks = ocfs2_inode_sector_count(inode);
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
if (status < 0) {
@@ -415,12 +417,12 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
}
i_size_write(inode, new_i_size);
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
di = (struct ocfs2_dinode *) fe_bh->b_data;
di->i_size = cpu_to_le64(new_i_size);
- di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
- di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, fe_bh);
@@ -824,7 +826,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
i_size_write(inode, abs_to);
inode->i_blocks = ocfs2_inode_sector_count(inode);
di->i_size = cpu_to_le64((u64)i_size_read(inode));
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
di->i_mtime_nsec = di->i_ctime_nsec;
@@ -1317,7 +1319,7 @@ int ocfs2_getattr(struct mnt_idmap *idmap, const struct path *path,
goto bail;
}
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
/*
* If there is inline data in the inode, the inode will normally not
* have data blocks allocated (it may have an external xattr block).
@@ -2043,7 +2045,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock;
}
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
if (ret < 0)
mlog_errno(ret);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index bb116c39b581..e8771600b930 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -306,8 +306,8 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
- inode->i_ctime.tv_sec = le64_to_cpu(fe->i_ctime);
- inode->i_ctime.tv_nsec = le32_to_cpu(fe->i_ctime_nsec);
+ inode_set_ctime(inode, le64_to_cpu(fe->i_ctime),
+ le32_to_cpu(fe->i_ctime_nsec));
if (OCFS2_I(inode)->ip_blkno != le64_to_cpu(fe->i_blkno))
mlog(ML_ERROR,
@@ -1314,8 +1314,8 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
fe->i_mode = cpu_to_le16(inode->i_mode);
fe->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
fe->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
- fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
- fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ fe->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+ fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
fe->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
@@ -1352,8 +1352,8 @@ void ocfs2_refresh_inode(struct inode *inode,
inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
- inode->i_ctime.tv_sec = le64_to_cpu(fe->i_ctime);
- inode->i_ctime.tv_nsec = le32_to_cpu(fe->i_ctime_nsec);
+ inode_set_ctime(inode, le64_to_cpu(fe->i_ctime),
+ le32_to_cpu(fe->i_ctime_nsec));
spin_unlock(&OCFS2_I(inode)->ip_lock);
}
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 25d8072ccfce..c19c730c26e2 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -557,7 +557,7 @@ static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
(unsigned long)bh,
(unsigned long long)bh->b_blocknr);
- ocfs2_error(bh->b_bdev->bd_super,
+ ocfs2_error(bh->b_assoc_map->host->i_sb,
"JBD2 has aborted our journal, ocfs2 cannot continue\n");
}
@@ -780,14 +780,14 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
mlog_errno(status);
if (!is_handle_aborted(handle)) {
journal_t *journal = handle->h_transaction->t_journal;
- struct super_block *sb = bh->b_bdev->bd_super;
mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. "
"Aborting transaction and journal.\n");
handle->h_err = status;
jbd2_journal_abort_handle(handle);
jbd2_journal_abort(journal, status);
- ocfs2_abort(sb, "Journal already aborted.\n");
+ ocfs2_abort(bh->b_assoc_map->host->i_sb,
+ "Journal already aborted.\n");
}
}
}
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index b1e32ec4a9d4..05d67968a3a9 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -950,9 +950,9 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
}
di = (struct ocfs2_dinode *)di_bh->b_data;
- inode->i_ctime = current_time(inode);
- di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ inode_set_ctime_current(inode);
+ di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, di_bh);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 17c52225b87d..e4a684d45308 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -793,10 +793,10 @@ static int ocfs2_link(struct dentry *old_dentry,
}
inc_nlink(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
ocfs2_set_links_count(fe, inode->i_nlink);
- fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
- fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ fe->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+ fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_journal_dirty(handle, fe_bh);
err = ocfs2_add_entry(handle, dentry, inode,
@@ -995,7 +995,7 @@ static int ocfs2_unlink(struct inode *dir,
ocfs2_set_links_count(fe, inode->i_nlink);
ocfs2_journal_dirty(handle, fe_bh);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
if (S_ISDIR(inode->i_mode))
drop_nlink(dir);
@@ -1537,7 +1537,7 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
new_dir_bh, &target_insert);
}
- old_inode->i_ctime = current_time(old_inode);
+ inode_set_ctime_current(old_inode);
mark_inode_dirty(old_inode);
status = ocfs2_journal_access_di(handle, INODE_CACHE(old_inode),
@@ -1546,8 +1546,8 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
if (status >= 0) {
old_di = (struct ocfs2_dinode *) old_inode_bh->b_data;
- old_di->i_ctime = cpu_to_le64(old_inode->i_ctime.tv_sec);
- old_di->i_ctime_nsec = cpu_to_le32(old_inode->i_ctime.tv_nsec);
+ old_di->i_ctime = cpu_to_le64(inode_get_ctime(old_inode).tv_sec);
+ old_di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(old_inode).tv_nsec);
ocfs2_journal_dirty(handle, old_inode_bh);
} else
mlog_errno(status);
@@ -1586,9 +1586,9 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
if (new_inode) {
drop_nlink(new_inode);
- new_inode->i_ctime = current_time(new_inode);
+ inode_set_ctime_current(new_inode);
}
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
+ old_dir->i_mtime = inode_set_ctime_current(old_dir);
if (update_dot_dot) {
status = ocfs2_update_entry(old_inode, handle,
@@ -1610,7 +1610,8 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
if (old_dir != new_dir) {
/* Keep the same times on both directories.*/
- new_dir->i_ctime = new_dir->i_mtime = old_dir->i_ctime;
+ new_dir->i_mtime = inode_set_ctime_to_ts(new_dir,
+ inode_get_ctime(old_dir));
/*
* This will also pick up the i_nlink change from the
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 564ab48d03ef..25c8ec3c8c3a 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3750,9 +3750,9 @@ static int ocfs2_change_ctime(struct inode *inode,
goto out_commit;
}
- inode->i_ctime = current_time(inode);
- di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ inode_set_ctime_current(inode);
+ di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_journal_dirty(handle, di_bh);
@@ -4073,10 +4073,10 @@ static int ocfs2_complete_reflink(struct inode *s_inode,
* we want mtime to appear identical to the source and
* update ctime.
*/
- t_inode->i_ctime = current_time(t_inode);
+ inode_set_ctime_current(t_inode);
- di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
- di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
+ di->i_ctime = cpu_to_le64(inode_get_ctime(t_inode).tv_sec);
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(t_inode).tv_nsec);
t_inode->i_mtime = s_inode->i_mtime;
di->i_mtime = s_di->i_mtime;
@@ -4456,7 +4456,7 @@ int ocfs2_reflink_update_dest(struct inode *dest,
if (newlen > i_size_read(dest))
i_size_write(dest, newlen);
spin_unlock(&OCFS2_I(dest)->ip_lock);
- dest->i_ctime = dest->i_mtime = current_time(dest);
+ dest->i_mtime = inode_set_ctime_current(dest);
ret = ocfs2_mark_inode_dirty(handle, dest, d_bh);
if (ret) {
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 4ac77ff6e676..6510ad783c91 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3421,9 +3421,9 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
goto out;
}
- inode->i_ctime = current_time(inode);
- di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ inode_set_ctime_current(inode);
+ di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ocfs2_journal_dirty(ctxt->handle, xis->inode_bh);
}
out:
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index 82cf7e9a665f..6bda275826d6 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -143,7 +143,7 @@ static int omfs_add_link(struct dentry *dentry, struct inode *inode)
mark_buffer_dirty(bh);
brelse(bh);
- dir->i_ctime = current_time(dir);
+ inode_set_ctime_current(dir);
/* mark affected inodes dirty to rebuild checksums */
mark_inode_dirty(dir);
@@ -399,7 +399,7 @@ static int omfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (err)
goto out;
- old_inode->i_ctime = current_time(old_inode);
+ inode_set_ctime_current(old_inode);
mark_inode_dirty(old_inode);
out:
return err;
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index c4c79e07efc7..2f8c1882f45c 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -51,7 +51,7 @@ struct inode *omfs_new_inode(struct inode *dir, umode_t mode)
inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
inode->i_mapping->a_ops = &omfs_aops;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
switch (mode & S_IFMT) {
case S_IFDIR:
inode->i_op = &omfs_dir_inops;
@@ -134,8 +134,8 @@ static int __omfs_write_inode(struct inode *inode, int wait)
oi->i_head.h_magic = OMFS_IMAGIC;
oi->i_size = cpu_to_be64(inode->i_size);
- ctime = inode->i_ctime.tv_sec * 1000LL +
- ((inode->i_ctime.tv_nsec + 999)/1000);
+ ctime = inode_get_ctime(inode).tv_sec * 1000LL +
+ ((inode_get_ctime(inode).tv_nsec + 999)/1000);
oi->i_ctime = cpu_to_be64(ctime);
omfs_update_checksums(oi);
@@ -232,10 +232,9 @@ struct inode *omfs_iget(struct super_block *sb, ino_t ino)
inode->i_atime.tv_sec = ctime;
inode->i_mtime.tv_sec = ctime;
- inode->i_ctime.tv_sec = ctime;
+ inode_set_ctime(inode, ctime, nsecs);
inode->i_atime.tv_nsec = nsecs;
inode->i_mtime.tv_nsec = nsecs;
- inode->i_ctime.tv_nsec = nsecs;
inode->i_mapping->a_ops = &omfs_aops;
diff --git a/fs/open.c b/fs/open.c
index e6ead0f19964..98f6601fbac6 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -671,11 +671,20 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
return err;
}
-static int do_fchmodat(int dfd, const char __user *filename, umode_t mode)
+static int do_fchmodat(int dfd, const char __user *filename, umode_t mode,
+ unsigned int flags)
{
struct path path;
int error;
- unsigned int lookup_flags = LOOKUP_FOLLOW;
+ unsigned int lookup_flags;
+
+ if (unlikely(flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)))
+ return -EINVAL;
+
+ lookup_flags = (flags & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW;
+ if (flags & AT_EMPTY_PATH)
+ lookup_flags |= LOOKUP_EMPTY;
+
retry:
error = user_path_at(dfd, filename, lookup_flags, &path);
if (!error) {
@@ -689,15 +698,21 @@ retry:
return error;
}
+SYSCALL_DEFINE4(fchmodat2, int, dfd, const char __user *, filename,
+ umode_t, mode, unsigned int, flags)
+{
+ return do_fchmodat(dfd, filename, mode, flags);
+}
+
SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename,
umode_t, mode)
{
- return do_fchmodat(dfd, filename, mode);
+ return do_fchmodat(dfd, filename, mode, 0);
}
SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode)
{
- return do_fchmodat(AT_FDCWD, filename, mode);
+ return do_fchmodat(AT_FDCWD, filename, mode, 0);
}
/*
@@ -1150,7 +1165,7 @@ EXPORT_SYMBOL_GPL(kernel_file_open);
* backing_file_open - open a backing file for kernel internal use
* @path: path of the file to open
* @flags: open flags
- * @path: path of the backing file
+ * @real_path: path of the backing file
* @cred: credentials for open
*
* Open a backing file for a stackable filesystem (e.g., overlayfs).
@@ -1503,7 +1518,7 @@ SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode)
* "id" is the POSIX thread ID. We use the
* files pointer for this..
*/
-int filp_close(struct file *filp, fl_owner_t id)
+static int filp_flush(struct file *filp, fl_owner_t id)
{
int retval = 0;
@@ -1520,10 +1535,18 @@ int filp_close(struct file *filp, fl_owner_t id)
dnotify_flush(filp, id);
locks_remove_posix(filp, id);
}
- fput(filp);
return retval;
}
+int filp_close(struct file *filp, fl_owner_t id)
+{
+ int retval;
+
+ retval = filp_flush(filp, id);
+ fput(filp);
+
+ return retval;
+}
EXPORT_SYMBOL(filp_close);
/*
@@ -1533,7 +1556,20 @@ EXPORT_SYMBOL(filp_close);
*/
SYSCALL_DEFINE1(close, unsigned int, fd)
{
- int retval = close_fd(fd);
+ int retval;
+ struct file *file;
+
+ file = close_fd_get_file(fd);
+ if (!file)
+ return -EBADF;
+
+ retval = filp_flush(file, current->files);
+
+ /*
+ * We're returning to user space. Don't bother
+ * with any delayed fput() cases.
+ */
+ __fput_sync(file);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
@@ -1546,7 +1582,7 @@ SYSCALL_DEFINE1(close, unsigned int, fd)
}
/**
- * close_range() - Close all file descriptors in a given range.
+ * sys_close_range() - Close all file descriptors in a given range.
*
* @fd: starting file descriptor to close
* @max_fd: last file descriptor to close
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index f0b7f4d51a17..b2457cb97fa0 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -237,7 +237,7 @@ found:
if (IS_ERR(inode))
return ERR_CAST(inode);
if (inode->i_state & I_NEW) {
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
ent_oi = OP_I(inode);
ent_oi->type = ent_type;
ent_oi->u = ent_data;
@@ -387,8 +387,7 @@ static int openprom_fill_super(struct super_block *s, struct fs_context *fc)
goto out_no_root;
}
- root_inode->i_mtime = root_inode->i_atime =
- root_inode->i_ctime = current_time(root_inode);
+ root_inode->i_mtime = root_inode->i_atime = inode_set_ctime_current(root_inode);
root_inode->i_op = &openprom_inode_operations;
root_inode->i_fop = &openprom_operations;
root_inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 9014bbcc8031..085912268442 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -871,7 +871,7 @@ int orangefs_getattr(struct mnt_idmap *idmap, const struct path *path,
ret = orangefs_inode_getattr(inode,
request_mask & STATX_SIZE ? ORANGEFS_GETATTR_SIZE : 0);
if (ret == 0) {
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
/* override block size reported to stat */
if (!(request_mask & STATX_SIZE))
@@ -900,12 +900,13 @@ int orangefs_permission(struct mnt_idmap *idmap,
return generic_permission(&nop_mnt_idmap, inode, mask);
}
-int orangefs_update_time(struct inode *inode, struct timespec64 *time, int flags)
+int orangefs_update_time(struct inode *inode, int flags)
{
struct iattr iattr;
+
gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n",
get_khandle_from_ino(inode));
- generic_update_time(inode, time, flags);
+ flags = generic_update_time(inode, flags);
memset(&iattr, 0, sizeof iattr);
if (flags & S_ATIME)
iattr.ia_valid |= ATTR_ATIME;
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 77518e248cf7..c9dfd5c6a097 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -421,7 +421,7 @@ static int orangefs_rename(struct mnt_idmap *idmap,
ret);
if (new_dentry->d_inode)
- new_dentry->d_inode->i_ctime = current_time(new_dentry->d_inode);
+ inode_set_ctime_current(d_inode(new_dentry));
op_release(new_op);
return ret;
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index ce20d3443869..b711654ca18a 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -370,7 +370,7 @@ int orangefs_getattr(struct mnt_idmap *idmap, const struct path *path,
int orangefs_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask);
-int orangefs_update_time(struct inode *, struct timespec64 *, int);
+int orangefs_update_time(struct inode *, int);
/*
* defined in xattr.c
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index 46b7dcff18ac..0a9fcfdf552f 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -361,11 +361,11 @@ again2:
downcall.resp.getattr.attributes.atime;
inode->i_mtime.tv_sec = (time64_t)new_op->
downcall.resp.getattr.attributes.mtime;
- inode->i_ctime.tv_sec = (time64_t)new_op->
- downcall.resp.getattr.attributes.ctime;
+ inode_set_ctime(inode,
+ (time64_t)new_op->downcall.resp.getattr.attributes.ctime,
+ 0);
inode->i_atime.tv_nsec = 0;
inode->i_mtime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
/* special case: mark the root inode as sticky */
inode->i_mode = type | (is_root_handle(inode) ? S_ISVTX : 0) |
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 21245b00722a..eaa1e6b3e04a 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -239,6 +239,7 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
static void ovl_file_accessed(struct file *file)
{
struct inode *inode, *upperinode;
+ struct timespec64 ctime, uctime;
if (file->f_flags & O_NOATIME)
return;
@@ -249,10 +250,12 @@ static void ovl_file_accessed(struct file *file)
if (!upperinode)
return;
+ ctime = inode_get_ctime(inode);
+ uctime = inode_get_ctime(upperinode);
if ((!timespec64_equal(&inode->i_mtime, &upperinode->i_mtime) ||
- !timespec64_equal(&inode->i_ctime, &upperinode->i_ctime))) {
+ !timespec64_equal(&ctime, &uctime))) {
inode->i_mtime = upperinode->i_mtime;
- inode->i_ctime = upperinode->i_ctime;
+ inode_set_ctime_to_ts(inode, uctime);
}
touch_atime(&file->f_path);
@@ -290,10 +293,7 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
if (iocb->ki_flags & IOCB_WRITE) {
struct inode *inode = file_inode(orig_iocb->ki_filp);
- /* Actually acquired in ovl_write_iter() */
- __sb_writers_acquired(file_inode(iocb->ki_filp)->i_sb,
- SB_FREEZE_WRITE);
- file_end_write(iocb->ki_filp);
+ kiocb_end_write(iocb);
ovl_copyattr(inode);
}
@@ -409,10 +409,6 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
if (!aio_req)
goto out;
- file_start_write(real.file);
- /* Pacify lockdep, same trick as done in aio_write() */
- __sb_writers_release(file_inode(real.file)->i_sb,
- SB_FREEZE_WRITE);
aio_req->fd = real;
real.flags = 0;
aio_req->orig_iocb = iocb;
@@ -420,6 +416,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
aio_req->iocb.ki_flags = ifl;
aio_req->iocb.ki_complete = ovl_aio_rw_complete;
refcount_set(&aio_req->ref, 2);
+ kiocb_start_write(&aio_req->iocb);
ret = vfs_iocb_iter_write(real.file, &aio_req->iocb, iter);
ovl_aio_put(aio_req);
if (ret != -EIOCBQUEUED)
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index a63e57447be9..f22e27b78025 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -693,7 +693,7 @@ int ovl_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
}
#endif
-int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags)
+int ovl_update_time(struct inode *inode, int flags)
{
if (flags & S_ATIME) {
struct ovl_fs *ofs = inode->i_sb->s_fs_info;
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 9402591f12aa..8bbe6173bef4 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -665,7 +665,7 @@ static inline struct posix_acl *ovl_get_acl_path(const struct path *path,
}
#endif
-int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags);
+int ovl_update_time(struct inode *inode, int flags);
bool ovl_is_private_xattr(struct super_block *sb, const char *name);
struct ovl_inode_params {
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 7ef9e13c404a..c210b5d496a8 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -1202,6 +1202,6 @@ void ovl_copyattr(struct inode *inode)
inode->i_mode = realinode->i_mode;
inode->i_atime = realinode->i_atime;
inode->i_mtime = realinode->i_mtime;
- inode->i_ctime = realinode->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(realinode));
i_size_write(inode, i_size_read(realinode));
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 2d88f73f585a..6c1a9b1db907 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -489,7 +489,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
head = pipe->head;
if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
unsigned int mask = pipe->ring_size - 1;
- struct pipe_buffer *buf = &pipe->bufs[head & mask];
+ struct pipe_buffer *buf;
struct page *page = pipe->tmp_page;
int copied;
@@ -899,7 +899,7 @@ static struct inode * get_pipe_inode(void)
inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
return inode;
@@ -1236,7 +1236,7 @@ const struct file_operations pipefifo_fops = {
* Currently we rely on the pipe array holding a power-of-2 number
* of pages. Returns 0 on error.
*/
-unsigned int round_pipe_size(unsigned long size)
+unsigned int round_pipe_size(unsigned int size)
{
if (size > (1U << 31))
return 0;
@@ -1319,7 +1319,7 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
* Allocate a new array of pipe buffers and copy the info over. Returns the
* pipe size if successful, or return -ERROR on error.
*/
-static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
+static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
{
unsigned long user_bufs;
unsigned int nr_slots, size;
@@ -1387,7 +1387,7 @@ struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
return pipe;
}
-long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+long pipe_fcntl(struct file *file, unsigned int cmd, unsigned int arg)
{
struct pipe_inode_info *pipe;
long ret;
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 7fa1b738bbab..a05fe94970ce 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -1027,7 +1027,7 @@ int simple_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
return error;
}
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
if (IS_I_VERSION(inode))
inode_inc_iversion(inode);
set_cached_acl(inode, type, acl);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9df3f4839662..7576effe8d52 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1902,7 +1902,7 @@ struct inode *proc_pid_make_inode(struct super_block *sb,
ei = PROC_I(inode);
inode->i_mode = mode;
inode->i_ino = get_next_ino();
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_op = &proc_def_inode_operations;
/*
@@ -1966,7 +1966,7 @@ int pid_getattr(struct mnt_idmap *idmap, const struct path *path,
struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
struct task_struct *task;
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
stat->uid = GLOBAL_ROOT_UID;
stat->gid = GLOBAL_ROOT_GID;
@@ -3583,7 +3583,8 @@ static int proc_tid_comm_permission(struct mnt_idmap *idmap,
}
static const struct inode_operations proc_tid_comm_inode_operations = {
- .permission = proc_tid_comm_permission,
+ .setattr = proc_setattr,
+ .permission = proc_tid_comm_permission,
};
/*
@@ -3899,7 +3900,7 @@ static int proc_task_getattr(struct mnt_idmap *idmap,
{
struct inode *inode = d_inode(path->dentry);
struct task_struct *p = get_proc_task(inode);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (p) {
stat->nlink += get_nr_threads(p);
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index b3140deebbbf..6276b3938842 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -352,7 +352,7 @@ static int proc_fd_getattr(struct mnt_idmap *idmap,
struct inode *inode = d_inode(path->dentry);
int rv = 0;
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
/* If it's a directory, put the number of open fds there */
if (S_ISDIR(inode->i_mode)) {
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 42ae38ff6e7e..775ce0bcf08c 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -146,7 +146,7 @@ static int proc_getattr(struct mnt_idmap *idmap,
}
}
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
return 0;
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 67b09a1d9433..532dc9d240f7 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -660,7 +660,7 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
inode->i_private = de->data;
inode->i_ino = de->low_ino;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
PROC_I(inode)->pde = de;
if (is_empty_pde(de)) {
make_empty_dir_inode(inode);
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index a0c0419872e3..2ba31b6d68c0 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -308,7 +308,7 @@ static int proc_tgid_net_getattr(struct mnt_idmap *idmap,
net = get_proc_task_net(inode);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (net != NULL) {
stat->nlink = net->proc_net->nlink;
@@ -321,6 +321,7 @@ static int proc_tgid_net_getattr(struct mnt_idmap *idmap,
const struct inode_operations proc_net_inode_operations = {
.lookup = proc_tgid_net_lookup,
.getattr = proc_tgid_net_getattr,
+ .setattr = proc_setattr,
};
static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 5ea42653126e..bf06344a42cc 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -463,7 +463,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
head->count++;
spin_unlock(&sysctl_lock);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_mode = table->mode;
if (!S_ISDIR(table->mode)) {
inode->i_mode |= S_IFREG;
@@ -849,7 +849,7 @@ static int proc_sys_getattr(struct mnt_idmap *idmap,
if (IS_ERR(head))
return PTR_ERR(head);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
if (table)
stat->mode = (stat->mode & S_IFMT) | table->mode;
diff --git a/fs/proc/root.c b/fs/proc/root.c
index a86e65a608da..9191248f2dac 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -314,7 +314,8 @@ static int proc_root_getattr(struct mnt_idmap *idmap,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
- generic_fillattr(&nop_mnt_idmap, d_inode(path->dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(path->dentry),
+ stat);
stat->nlink = proc_root.nlink + nr_processes();
return 0;
}
diff --git a/fs/proc/self.c b/fs/proc/self.c
index 72cd69bcaf4a..ecc4da8d265e 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -46,7 +46,7 @@ int proc_setup_self(struct super_block *s)
struct inode *inode = new_inode(s);
if (inode) {
inode->i_ino = self_inum;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 507cd4e59d07..fafff1bd34cd 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -587,8 +587,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
bool migration = false;
if (pmd_present(*pmd)) {
- /* FOLL_DUMP will return -EFAULT on huge zero page */
- page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
+ page = vm_normal_page_pmd(vma, addr, *pmd);
} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
swp_entry_t entry = pmd_to_swp_entry(*pmd);
@@ -758,12 +757,14 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
static const struct mm_walk_ops smaps_walk_ops = {
.pmd_entry = smaps_pte_range,
.hugetlb_entry = smaps_hugetlb_range,
+ .walk_lock = PGWALK_RDLOCK,
};
static const struct mm_walk_ops smaps_shmem_walk_ops = {
.pmd_entry = smaps_pte_range,
.hugetlb_entry = smaps_hugetlb_range,
.pte_hole = smaps_pte_hole,
+ .walk_lock = PGWALK_RDLOCK,
};
/*
@@ -1245,6 +1246,7 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end,
static const struct mm_walk_ops clear_refs_walk_ops = {
.pmd_entry = clear_refs_pte_range,
.test_walk = clear_refs_test_walk,
+ .walk_lock = PGWALK_WRLOCK,
};
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
@@ -1622,6 +1624,7 @@ static const struct mm_walk_ops pagemap_ops = {
.pmd_entry = pagemap_pmd_range,
.pte_hole = pagemap_pte_hole,
.hugetlb_entry = pagemap_hugetlb_range,
+ .walk_lock = PGWALK_RDLOCK,
};
/*
@@ -1935,6 +1938,7 @@ static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
static const struct mm_walk_ops show_numa_ops = {
.hugetlb_entry = gather_hugetlb_stats,
.pmd_entry = gather_pte_stats,
+ .walk_lock = PGWALK_RDLOCK,
};
/*
diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
index a553273fbd41..63ac1f93289f 100644
--- a/fs/proc/thread_self.c
+++ b/fs/proc/thread_self.c
@@ -46,7 +46,7 @@ int proc_setup_thread_self(struct super_block *s)
struct inode *inode = new_inode(s);
if (inode) {
inode->i_ino = thread_self_inum;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index c49d554cc9ae..3acc38600cd1 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config PSTORE
tristate "Persistent store support"
- select CRYPTO if PSTORE_COMPRESS
default n
help
This option enables generic access to platform level
@@ -22,99 +21,18 @@ config PSTORE_DEFAULT_KMSG_BYTES
Defines default size of pstore kernel log storage.
Can be enlarged if needed, not recommended to shrink it.
-config PSTORE_DEFLATE_COMPRESS
- tristate "DEFLATE (ZLIB) compression"
- default y
- depends on PSTORE
- select CRYPTO_DEFLATE
- help
- This option enables DEFLATE (also known as ZLIB) compression
- algorithm support.
-
-config PSTORE_LZO_COMPRESS
- tristate "LZO compression"
- depends on PSTORE
- select CRYPTO_LZO
- help
- This option enables LZO compression algorithm support.
-
-config PSTORE_LZ4_COMPRESS
- tristate "LZ4 compression"
- depends on PSTORE
- select CRYPTO_LZ4
- help
- This option enables LZ4 compression algorithm support.
-
-config PSTORE_LZ4HC_COMPRESS
- tristate "LZ4HC compression"
- depends on PSTORE
- select CRYPTO_LZ4HC
- help
- This option enables LZ4HC (high compression) mode algorithm.
-
-config PSTORE_842_COMPRESS
- bool "842 compression"
- depends on PSTORE
- select CRYPTO_842
- help
- This option enables 842 compression algorithm support.
-
-config PSTORE_ZSTD_COMPRESS
- bool "zstd compression"
- depends on PSTORE
- select CRYPTO_ZSTD
- help
- This option enables zstd compression algorithm support.
-
config PSTORE_COMPRESS
- def_bool y
+ bool "Pstore compression (deflate)"
depends on PSTORE
- depends on PSTORE_DEFLATE_COMPRESS || PSTORE_LZO_COMPRESS || \
- PSTORE_LZ4_COMPRESS || PSTORE_LZ4HC_COMPRESS || \
- PSTORE_842_COMPRESS || PSTORE_ZSTD_COMPRESS
-
-choice
- prompt "Default pstore compression algorithm"
- depends on PSTORE_COMPRESS
+ select ZLIB_INFLATE
+ select ZLIB_DEFLATE
+ default y
help
- This option chooses the default active compression algorithm.
- This change be changed at boot with "pstore.compress=..." on
- the kernel command line.
-
- Currently, pstore has support for 6 compression algorithms:
- deflate, lzo, lz4, lz4hc, 842 and zstd.
-
- The default compression algorithm is deflate.
-
- config PSTORE_DEFLATE_COMPRESS_DEFAULT
- bool "deflate" if PSTORE_DEFLATE_COMPRESS
-
- config PSTORE_LZO_COMPRESS_DEFAULT
- bool "lzo" if PSTORE_LZO_COMPRESS
-
- config PSTORE_LZ4_COMPRESS_DEFAULT
- bool "lz4" if PSTORE_LZ4_COMPRESS
-
- config PSTORE_LZ4HC_COMPRESS_DEFAULT
- bool "lz4hc" if PSTORE_LZ4HC_COMPRESS
-
- config PSTORE_842_COMPRESS_DEFAULT
- bool "842" if PSTORE_842_COMPRESS
-
- config PSTORE_ZSTD_COMPRESS_DEFAULT
- bool "zstd" if PSTORE_ZSTD_COMPRESS
-
-endchoice
-
-config PSTORE_COMPRESS_DEFAULT
- string
- depends on PSTORE_COMPRESS
- default "deflate" if PSTORE_DEFLATE_COMPRESS_DEFAULT
- default "lzo" if PSTORE_LZO_COMPRESS_DEFAULT
- default "lz4" if PSTORE_LZ4_COMPRESS_DEFAULT
- default "lz4hc" if PSTORE_LZ4HC_COMPRESS_DEFAULT
- default "842" if PSTORE_842_COMPRESS_DEFAULT
- default "zstd" if PSTORE_ZSTD_COMPRESS_DEFAULT
+ Whether pstore records should be compressed before being written to
+ the backing store. This is implemented using the zlib 'deflate'
+ algorithm, using the library implementation instead of using the full
+ blown crypto API. This reduces the risk of secondary oopses or other
+ problems while pstore is recording panic metadata.
config PSTORE_CONSOLE
bool "Log kernel console messages"
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index ffbadb8b3032..585360706b33 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -54,7 +54,7 @@ static void free_pstore_private(struct pstore_private *private)
if (!private)
return;
if (private->record) {
- kfree(private->record->buf);
+ kvfree(private->record->buf);
kfree(private->record->priv);
kfree(private->record);
}
@@ -223,7 +223,7 @@ static struct inode *pstore_get_inode(struct super_block *sb)
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
}
return inode;
}
@@ -390,7 +390,7 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
inode->i_private = private;
if (record->time.tv_sec)
- inode->i_mtime = inode->i_ctime = record->time;
+ inode->i_mtime = inode_set_ctime_to_ts(inode, record->time);
d_add(dentry, inode);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index cbc0b468c1ab..62356d542ef6 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -14,24 +14,17 @@
#include <linux/init.h>
#include <linux/kmsg_dump.h>
#include <linux/console.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pstore.h>
-#if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
-#include <linux/lzo.h>
-#endif
-#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
-#include <linux/lz4.h>
-#endif
-#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
-#include <linux/zstd.h>
-#endif
-#include <linux/crypto.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/jiffies.h>
+#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#include <linux/zlib.h>
#include "internal.h"
@@ -80,12 +73,21 @@ static char *backend;
module_param(backend, charp, 0444);
MODULE_PARM_DESC(backend, "specific backend to use");
-static char *compress =
-#ifdef CONFIG_PSTORE_COMPRESS_DEFAULT
- CONFIG_PSTORE_COMPRESS_DEFAULT;
-#else
- NULL;
-#endif
+/*
+ * pstore no longer implements compression via the crypto API, and only
+ * supports zlib deflate compression implemented using the zlib library
+ * interface. This removes additional complexity which is hard to justify for a
+ * diagnostic facility that has to operate in conditions where the system may
+ * have become unstable. Zlib deflate is comparatively small in terms of code
+ * size, and compresses ASCII text comparatively well. In terms of compression
+ * speed, deflate is not the best performer but for recording the log output on
+ * a kernel panic, this is not considered critical.
+ *
+ * The only remaining arguments supported by the compress= module parameter are
+ * 'deflate' and 'none'. To retain compatibility with existing installations,
+ * all other values are logged and replaced with 'deflate'.
+ */
+static char *compress = "deflate";
module_param(compress, charp, 0444);
MODULE_PARM_DESC(compress, "compression to use");
@@ -94,16 +96,9 @@ unsigned long kmsg_bytes = CONFIG_PSTORE_DEFAULT_KMSG_BYTES;
module_param(kmsg_bytes, ulong, 0444);
MODULE_PARM_DESC(kmsg_bytes, "amount of kernel log to snapshot (in bytes)");
-/* Compression parameters */
-static struct crypto_comp *tfm;
-
-struct pstore_zbackend {
- int (*zbufsize)(size_t size);
- const char *name;
-};
+static void *compress_workspace;
static char *big_oops_buf;
-static size_t big_oops_buf_sz;
void pstore_set_kmsg_bytes(int bytes)
{
@@ -168,206 +163,89 @@ static bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
}
}
-#if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
-static int zbufsize_deflate(size_t size)
-{
- size_t cmpr;
-
- switch (size) {
- /* buffer range for efivars */
- case 1000 ... 2000:
- cmpr = 56;
- break;
- case 2001 ... 3000:
- cmpr = 54;
- break;
- case 3001 ... 3999:
- cmpr = 52;
- break;
- /* buffer range for nvram, erst */
- case 4000 ... 10000:
- cmpr = 45;
- break;
- default:
- cmpr = 60;
- break;
- }
-
- return (size * 100) / cmpr;
-}
-#endif
-
-#if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
-static int zbufsize_lzo(size_t size)
-{
- return lzo1x_worst_compress(size);
-}
-#endif
-
-#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
-static int zbufsize_lz4(size_t size)
-{
- return LZ4_compressBound(size);
-}
-#endif
-
-#if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
-static int zbufsize_842(size_t size)
-{
- return size;
-}
-#endif
-
-#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
-static int zbufsize_zstd(size_t size)
-{
- return zstd_compress_bound(size);
-}
-#endif
-
-static const struct pstore_zbackend *zbackend __ro_after_init;
-
-static const struct pstore_zbackend zbackends[] = {
-#if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
- {
- .zbufsize = zbufsize_deflate,
- .name = "deflate",
- },
-#endif
-#if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
- {
- .zbufsize = zbufsize_lzo,
- .name = "lzo",
- },
-#endif
-#if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS)
- {
- .zbufsize = zbufsize_lz4,
- .name = "lz4",
- },
-#endif
-#if IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
- {
- .zbufsize = zbufsize_lz4,
- .name = "lz4hc",
- },
-#endif
-#if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
- {
- .zbufsize = zbufsize_842,
- .name = "842",
- },
-#endif
-#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
- {
- .zbufsize = zbufsize_zstd,
- .name = "zstd",
- },
-#endif
- { }
-};
-
static int pstore_compress(const void *in, void *out,
unsigned int inlen, unsigned int outlen)
{
+ struct z_stream_s zstream = {
+ .next_in = in,
+ .avail_in = inlen,
+ .next_out = out,
+ .avail_out = outlen,
+ .workspace = compress_workspace,
+ };
int ret;
if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
return -EINVAL;
- ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
- if (ret) {
- pr_err("crypto_comp_compress failed, ret = %d!\n", ret);
- return ret;
- }
+ ret = zlib_deflateInit2(&zstream, Z_DEFAULT_COMPRESSION, Z_DEFLATED,
+ -MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
+ if (ret != Z_OK)
+ return -EINVAL;
+
+ ret = zlib_deflate(&zstream, Z_FINISH);
+ if (ret != Z_STREAM_END)
+ return -EINVAL;
+
+ ret = zlib_deflateEnd(&zstream);
+ if (ret != Z_OK)
+ pr_warn_once("zlib_deflateEnd() failed: %d\n", ret);
- return outlen;
+ return zstream.total_out;
}
static void allocate_buf_for_compression(void)
{
- struct crypto_comp *ctx;
- int size;
char *buf;
- /* Skip if not built-in or compression backend not selected yet. */
- if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
- return;
-
- /* Skip if no pstore backend yet or compression init already done. */
- if (!psinfo || tfm)
- return;
-
- if (!crypto_has_comp(zbackend->name, 0, 0)) {
- pr_err("Unknown compression: %s\n", zbackend->name);
+ /* Skip if not built-in or compression disabled. */
+ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !compress ||
+ !strcmp(compress, "none")) {
+ compress = NULL;
return;
}
- size = zbackend->zbufsize(psinfo->bufsize);
- if (size <= 0) {
- pr_err("Invalid compression size for %s: %d\n",
- zbackend->name, size);
- return;
+ if (strcmp(compress, "deflate")) {
+ pr_err("Unsupported compression '%s', falling back to deflate\n",
+ compress);
+ compress = "deflate";
}
- buf = kmalloc(size, GFP_KERNEL);
+ /*
+ * The compression buffer only needs to be as large as the maximum
+ * uncompressed record size, since any record that would be expanded by
+ * compression is just stored uncompressed.
+ */
+ buf = kvzalloc(psinfo->bufsize, GFP_KERNEL);
if (!buf) {
- pr_err("Failed %d byte compression buffer allocation for: %s\n",
- size, zbackend->name);
+ pr_err("Failed %zu byte compression buffer allocation for: %s\n",
+ psinfo->bufsize, compress);
return;
}
- ctx = crypto_alloc_comp(zbackend->name, 0, 0);
- if (IS_ERR_OR_NULL(ctx)) {
- kfree(buf);
- pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
- PTR_ERR(ctx));
+ compress_workspace =
+ vmalloc(zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL));
+ if (!compress_workspace) {
+ pr_err("Failed to allocate zlib deflate workspace\n");
+ kvfree(buf);
return;
}
/* A non-NULL big_oops_buf indicates compression is available. */
- tfm = ctx;
- big_oops_buf_sz = size;
big_oops_buf = buf;
- pr_info("Using crash dump compression: %s\n", zbackend->name);
+ pr_info("Using crash dump compression: %s\n", compress);
}
static void free_buf_for_compression(void)
{
- if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
- crypto_free_comp(tfm);
- tfm = NULL;
+ if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && compress_workspace) {
+ vfree(compress_workspace);
+ compress_workspace = NULL;
}
- kfree(big_oops_buf);
- big_oops_buf = NULL;
- big_oops_buf_sz = 0;
-}
-/*
- * Called when compression fails, since the printk buffer
- * would be fetched for compression calling it again when
- * compression fails would have moved the iterator of
- * printk buffer which results in fetching old contents.
- * Copy the recent messages from big_oops_buf to psinfo->buf
- */
-static size_t copy_kmsg_to_buffer(int hsize, size_t len)
-{
- size_t total_len;
- size_t diff;
-
- total_len = hsize + len;
-
- if (total_len > psinfo->bufsize) {
- diff = total_len - psinfo->bufsize + hsize;
- memcpy(psinfo->buf, big_oops_buf, hsize);
- memcpy(psinfo->buf + hsize, big_oops_buf + diff,
- psinfo->bufsize - hsize);
- total_len = psinfo->bufsize;
- } else
- memcpy(psinfo->buf, big_oops_buf, total_len);
-
- return total_len;
+ kvfree(big_oops_buf);
+ big_oops_buf = NULL;
}
void pstore_record_init(struct pstore_record *record,
@@ -426,13 +304,8 @@ static void pstore_dump(struct kmsg_dumper *dumper,
record.part = part;
record.buf = psinfo->buf;
- if (big_oops_buf) {
- dst = big_oops_buf;
- dst_size = big_oops_buf_sz;
- } else {
- dst = psinfo->buf;
- dst_size = psinfo->bufsize;
- }
+ dst = big_oops_buf ?: psinfo->buf;
+ dst_size = psinfo->bufsize;
/* Write dump header. */
header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
@@ -453,8 +326,8 @@ static void pstore_dump(struct kmsg_dumper *dumper,
record.compressed = true;
record.size = zipped_len;
} else {
- record.size = copy_kmsg_to_buffer(header_size,
- dump_size);
+ record.size = header_size + dump_size;
+ memcpy(psinfo->buf, dst, record.size);
}
} else {
record.size = header_size + dump_size;
@@ -549,7 +422,7 @@ static int pstore_write_user_compat(struct pstore_record *record,
if (record->buf)
return -EINVAL;
- record->buf = memdup_user(buf, record->size);
+ record->buf = vmemdup_user(buf, record->size);
if (IS_ERR(record->buf)) {
ret = PTR_ERR(record->buf);
goto out;
@@ -557,7 +430,7 @@ static int pstore_write_user_compat(struct pstore_record *record,
ret = record->psi->write(record);
- kfree(record->buf);
+ kvfree(record->buf);
out:
record->buf = NULL;
@@ -681,7 +554,8 @@ void pstore_unregister(struct pstore_info *psi)
}
EXPORT_SYMBOL_GPL(pstore_unregister);
-static void decompress_record(struct pstore_record *record)
+static void decompress_record(struct pstore_record *record,
+ struct z_stream_s *zstream)
{
int ret;
int unzipped_len;
@@ -697,40 +571,50 @@ static void decompress_record(struct pstore_record *record)
}
/* Missing compression buffer means compression was not initialized. */
- if (!big_oops_buf) {
+ if (!zstream->workspace) {
pr_warn("no decompression method initialized!\n");
return;
}
+ ret = zlib_inflateReset(zstream);
+ if (ret != Z_OK) {
+ pr_err("zlib_inflateReset() failed, ret = %d!\n", ret);
+ return;
+ }
+
/* Allocate enough space to hold max decompression and ECC. */
- unzipped_len = big_oops_buf_sz;
- workspace = kmalloc(unzipped_len + record->ecc_notice_size,
- GFP_KERNEL);
+ workspace = kvzalloc(psinfo->bufsize + record->ecc_notice_size,
+ GFP_KERNEL);
if (!workspace)
return;
- /* After decompression "unzipped_len" is almost certainly smaller. */
- ret = crypto_comp_decompress(tfm, record->buf, record->size,
- workspace, &unzipped_len);
- if (ret) {
- pr_err("crypto_comp_decompress failed, ret = %d!\n", ret);
- kfree(workspace);
+ zstream->next_in = record->buf;
+ zstream->avail_in = record->size;
+ zstream->next_out = workspace;
+ zstream->avail_out = psinfo->bufsize;
+
+ ret = zlib_inflate(zstream, Z_FINISH);
+ if (ret != Z_STREAM_END) {
+ pr_err("zlib_inflate() failed, ret = %d!\n", ret);
+ kvfree(workspace);
return;
}
+ unzipped_len = zstream->total_out;
+
/* Append ECC notice to decompressed buffer. */
memcpy(workspace + unzipped_len, record->buf + record->size,
record->ecc_notice_size);
/* Copy decompressed contents into an minimum-sized allocation. */
- unzipped = kmemdup(workspace, unzipped_len + record->ecc_notice_size,
- GFP_KERNEL);
- kfree(workspace);
+ unzipped = kvmemdup(workspace, unzipped_len + record->ecc_notice_size,
+ GFP_KERNEL);
+ kvfree(workspace);
if (!unzipped)
return;
/* Swap out compressed contents with decompressed contents. */
- kfree(record->buf);
+ kvfree(record->buf);
record->buf = unzipped;
record->size = unzipped_len;
record->compressed = false;
@@ -747,10 +631,17 @@ void pstore_get_backend_records(struct pstore_info *psi,
{
int failed = 0;
unsigned int stop_loop = 65536;
+ struct z_stream_s zstream = {};
if (!psi || !root)
return;
+ if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && compress) {
+ zstream.workspace = kvmalloc(zlib_inflate_workspacesize(),
+ GFP_KERNEL);
+ zlib_inflateInit2(&zstream, -DEF_WBITS);
+ }
+
mutex_lock(&psi->read_mutex);
if (psi->open && psi->open(psi))
goto out;
@@ -779,11 +670,11 @@ void pstore_get_backend_records(struct pstore_info *psi,
break;
}
- decompress_record(record);
+ decompress_record(record, &zstream);
rc = pstore_mkfile(root, record);
if (rc) {
/* pstore_mkfile() did not take record, so free it. */
- kfree(record->buf);
+ kvfree(record->buf);
kfree(record->priv);
kfree(record);
if (rc != -EEXIST || !quiet)
@@ -795,6 +686,12 @@ void pstore_get_backend_records(struct pstore_info *psi,
out:
mutex_unlock(&psi->read_mutex);
+ if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && compress) {
+ if (zlib_inflateEnd(&zstream) != Z_OK)
+ pr_warn("zlib_inflateEnd() failed\n");
+ kvfree(zstream.workspace);
+ }
+
if (failed)
pr_warn("failed to create %d record(s) from '%s'\n",
failed, psi->name);
@@ -818,34 +715,10 @@ static void pstore_timefunc(struct timer_list *unused)
pstore_timer_kick();
}
-static void __init pstore_choose_compression(void)
-{
- const struct pstore_zbackend *step;
-
- if (!compress)
- return;
-
- for (step = zbackends; step->name; step++) {
- if (!strcmp(compress, step->name)) {
- zbackend = step;
- return;
- }
- }
-}
-
static int __init pstore_init(void)
{
int ret;
- pstore_choose_compression();
-
- /*
- * Check if any pstore backends registered earlier but did not
- * initialize compression because crypto was not ready. If so,
- * initialize compression now.
- */
- allocate_buf_for_compression();
-
ret = pstore_init_fs();
if (ret)
free_buf_for_compression();
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 2f625e1fa8d8..d36702c7ab3c 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -20,6 +20,7 @@
#include <linux/compiler.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/mm.h>
#include "internal.h"
#include "ram_internal.h"
@@ -268,7 +269,7 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record)
/* ECC correction notice */
record->ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0);
- record->buf = kmalloc(size + record->ecc_notice_size + 1, GFP_KERNEL);
+ record->buf = kvzalloc(size + record->ecc_notice_size + 1, GFP_KERNEL);
if (record->buf == NULL) {
size = -ENOMEM;
goto out;
@@ -282,7 +283,7 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record)
out:
if (free_prz) {
- kfree(prz->old_log);
+ kvfree(prz->old_log);
kfree(prz);
}
@@ -833,7 +834,7 @@ static int ramoops_probe(struct platform_device *pdev)
*/
if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) {
cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
- cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
+ cxt->pstore.buf = kvzalloc(cxt->pstore.bufsize, GFP_KERNEL);
if (!cxt->pstore.buf) {
pr_err("cannot allocate pstore crash dump buffer\n");
err = -ENOMEM;
@@ -866,7 +867,7 @@ static int ramoops_probe(struct platform_device *pdev)
return 0;
fail_buf:
- kfree(cxt->pstore.buf);
+ kvfree(cxt->pstore.buf);
fail_clear:
cxt->pstore.bufsize = 0;
fail_init:
@@ -881,7 +882,7 @@ static void ramoops_remove(struct platform_device *pdev)
pstore_unregister(&cxt->pstore);
- kfree(cxt->pstore.buf);
+ kvfree(cxt->pstore.buf);
cxt->pstore.bufsize = 0;
ramoops_free_przs(cxt);
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 85aaf0fc6d7d..650e437b55e6 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <asm/page.h>
#include "ram_internal.h"
@@ -24,12 +25,10 @@
/**
* struct persistent_ram_buffer - persistent circular RAM buffer
*
- * @sig:
- * signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value)
- * @start:
- * offset into @data where the beginning of the stored bytes begin
- * @size:
- * number of valid bytes stored in @data
+ * @sig: Signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value)
+ * @start: First valid byte in the buffer.
+ * @size: Number of valid bytes in the buffer.
+ * @data: The contents of the buffer.
*/
struct persistent_ram_buffer {
uint32_t sig;
@@ -301,7 +300,7 @@ void persistent_ram_save_old(struct persistent_ram_zone *prz)
if (!prz->old_log) {
persistent_ram_ecc_old(prz);
- prz->old_log = kmalloc(size, GFP_KERNEL);
+ prz->old_log = kvzalloc(size, GFP_KERNEL);
}
if (!prz->old_log) {
pr_err("failed to allocate buffer\n");
@@ -385,7 +384,7 @@ void *persistent_ram_old(struct persistent_ram_zone *prz)
void persistent_ram_free_old(struct persistent_ram_zone *prz)
{
- kfree(prz->old_log);
+ kvfree(prz->old_log);
prz->old_log = NULL;
prz->old_log_size = 0;
}
@@ -519,7 +518,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
sig ^= PERSISTENT_RAM_SIG;
if (prz->buffer->sig == sig) {
- if (buffer_size(prz) == 0) {
+ if (buffer_size(prz) == 0 && buffer_start(prz) == 0) {
pr_debug("found existing empty buffer\n");
return 0;
}
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 391ea402920d..a7171f5532a1 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -305,8 +305,7 @@ struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
inode->i_mtime.tv_nsec = 0;
inode->i_atime.tv_sec = le32_to_cpu(raw_inode->di_atime);
inode->i_atime.tv_nsec = 0;
- inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->di_ctime);
- inode->i_ctime.tv_nsec = 0;
+ inode_set_ctime(inode, le32_to_cpu(raw_inode->di_ctime), 0);
inode->i_blocks = le32_to_cpu(raw_inode->di_first_xtnt.xtnt_size);
memcpy(qnx4_inode, raw_inode, QNX4_DIR_ENTRY_SIZE);
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 85b2fa3b211c..21f90d519f1a 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -562,8 +562,7 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
inode->i_mtime.tv_nsec = 0;
inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_atime);
inode->i_atime.tv_nsec = 0;
- inode->i_ctime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_ctime);
- inode->i_ctime.tv_nsec = 0;
+ inode_set_ctime(inode, fs32_to_cpu(sbi, raw_inode->di_ctime), 0);
/* calc blocks based on 512 byte blocksize */
inode->i_blocks = (inode->i_size + 511) >> 9;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index e3e4f4047657..4d826c369da2 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2367,7 +2367,7 @@ int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
if (!fmt)
return -ESRCH;
- if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
+ if (!sb->dq_op || !sb->s_qcop ||
(type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
error = -EINVAL;
goto out_fmt;
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index fef477c78107..18e8387cab41 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -65,7 +65,7 @@ struct inode *ramfs_get_inode(struct super_block *sb,
inode->i_mapping->a_ops = &ram_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
mapping_set_unevictable(inode->i_mapping);
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
@@ -105,7 +105,7 @@ ramfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
error = 0;
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
}
return error;
}
@@ -138,7 +138,7 @@ static int ramfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
if (!error) {
d_instantiate(dentry, inode);
dget(dentry);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
} else
iput(inode);
}
diff --git a/fs/read_write.c b/fs/read_write.c
index b07de77ef126..4771701c896b 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(vfs_setpos);
* @file: file structure to seek on
* @offset: file offset to seek to
* @whence: type of seek
- * @size: max size of this file in file system
+ * @maxsize: max size of this file in file system
* @eof: offset used for SEEK_END position
*
* This is a variant of generic_file_llseek that allows passing in a custom
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 77bd3b27059f..86e55d4bb10d 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1259,9 +1259,8 @@ static void init_inode(struct inode *inode, struct treepath *path)
inode->i_size = sd_v1_size(sd);
inode->i_atime.tv_sec = sd_v1_atime(sd);
inode->i_mtime.tv_sec = sd_v1_mtime(sd);
- inode->i_ctime.tv_sec = sd_v1_ctime(sd);
+ inode_set_ctime(inode, sd_v1_ctime(sd), 0);
inode->i_atime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
inode->i_mtime.tv_nsec = 0;
inode->i_blocks = sd_v1_blocks(sd);
@@ -1314,8 +1313,7 @@ static void init_inode(struct inode *inode, struct treepath *path)
i_gid_write(inode, sd_v2_gid(sd));
inode->i_mtime.tv_sec = sd_v2_mtime(sd);
inode->i_atime.tv_sec = sd_v2_atime(sd);
- inode->i_ctime.tv_sec = sd_v2_ctime(sd);
- inode->i_ctime.tv_nsec = 0;
+ inode_set_ctime(inode, sd_v2_ctime(sd), 0);
inode->i_mtime.tv_nsec = 0;
inode->i_atime.tv_nsec = 0;
inode->i_blocks = sd_v2_blocks(sd);
@@ -1374,7 +1372,7 @@ static void inode2sd(void *sd, struct inode *inode, loff_t size)
set_sd_v2_gid(sd_v2, i_gid_read(inode));
set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
- set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
+ set_sd_v2_ctime(sd_v2, inode_get_ctime(inode).tv_sec);
set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE));
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev));
@@ -1394,7 +1392,7 @@ static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
set_sd_v1_nlink(sd_v1, inode->i_nlink);
set_sd_v1_size(sd_v1, size);
set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);
- set_sd_v1_ctime(sd_v1, inode->i_ctime.tv_sec);
+ set_sd_v1_ctime(sd_v1, inode_get_ctime(inode).tv_sec);
set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
@@ -1986,7 +1984,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
/* uid and gid must already be set by the caller for quota init */
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_size = i_size;
inode->i_blocks = 0;
inode->i_bytes = 0;
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 6bf9b54e58ca..dd33f8cc6eda 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -55,7 +55,7 @@ int reiserfs_fileattr_set(struct mnt_idmap *idmap,
}
sd_attrs_to_i_attrs(flags, inode);
REISERFS_I(inode)->i_attrs = flags;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
err = 0;
unlock:
@@ -107,7 +107,7 @@ long reiserfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
err = -EFAULT;
goto setversion_out;
}
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
setversion_out:
mnt_drop_write_file(filp);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 479aa4a57602..015bfe4e4524 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2326,7 +2326,7 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
int i, j;
bh = __getblk(dev, block, bufsize);
- if (buffer_uptodate(bh))
+ if (!bh || buffer_uptodate(bh))
return (bh);
if (block + BUFNR > max_block) {
@@ -2336,6 +2336,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
j = 1;
for (i = 1; i < blocks; i++) {
bh = __getblk(dev, block + i, bufsize);
+ if (!bh)
+ break;
if (buffer_uptodate(bh)) {
brelse(bh);
break;
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 52240cc891cf..9c5704be2435 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -572,7 +572,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
}
dir->i_size += paste_size;
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
if (!S_ISDIR(inode->i_mode) && visible)
/* reiserfs_mkdir or reiserfs_rename will do that by itself */
reiserfs_update_sd(th, dir);
@@ -966,7 +966,8 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
inode->i_nlink);
clear_nlink(inode);
- inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_to_ts(dir,
+ inode_set_ctime_current(inode));
reiserfs_update_sd(&th, inode);
DEC_DIR_INODE_NLINK(dir)
@@ -1070,11 +1071,11 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
inc_nlink(inode);
goto end_unlink;
}
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
reiserfs_update_sd(&th, inode);
dir->i_size -= (de.de_entrylen + DEH_SIZE);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
reiserfs_update_sd(&th, dir);
if (!savelink)
@@ -1250,7 +1251,7 @@ static int reiserfs_link(struct dentry *old_dentry, struct inode *dir,
return err ? err : retval;
}
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
reiserfs_update_sd(&th, inode);
ihold(inode);
@@ -1325,7 +1326,6 @@ static int reiserfs_rename(struct mnt_idmap *idmap,
int jbegin_count;
umode_t old_inode_mode;
unsigned long savelink = 1;
- struct timespec64 ctime;
if (flags & ~RENAME_NOREPLACE)
return -EINVAL;
@@ -1576,14 +1576,11 @@ static int reiserfs_rename(struct mnt_idmap *idmap,
mark_de_hidden(old_de.de_deh + old_de.de_entry_num);
journal_mark_dirty(&th, old_de.de_bh);
- ctime = current_time(old_dir);
- old_dir->i_ctime = old_dir->i_mtime = ctime;
- new_dir->i_ctime = new_dir->i_mtime = ctime;
/*
* thanks to Alex Adriaanse <alex_a@caltech.edu> for patch
* which adds ctime update of renamed object
*/
- old_inode->i_ctime = ctime;
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
if (new_dentry_inode) {
/* adjust link number of the victim */
@@ -1592,7 +1589,6 @@ static int reiserfs_rename(struct mnt_idmap *idmap,
} else {
drop_nlink(new_dentry_inode);
}
- new_dentry_inode->i_ctime = ctime;
savelink = new_dentry_inode->i_nlink;
}
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index ce5003986789..3676e02a0232 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -2004,7 +2004,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
if (update_timestamps) {
inode->i_mtime = current_time(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
}
reiserfs_update_sd(th, inode);
@@ -2029,7 +2029,7 @@ update_and_out:
if (update_timestamps) {
/* this is truncate, not file closing */
inode->i_mtime = current_time(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
}
reiserfs_update_sd(th, inode);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 929acce6e731..7eaf36b3de12 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2587,7 +2587,7 @@ out:
return err;
if (inode->i_size < off + len - towrite)
i_size_write(inode, off + len - towrite);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return len - towrite;
}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 651027967159..6000964c2b80 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -466,12 +466,13 @@ int reiserfs_commit_write(struct file *f, struct page *page,
static void update_ctime(struct inode *inode)
{
struct timespec64 now = current_time(inode);
+ struct timespec64 ctime = inode_get_ctime(inode);
if (inode_unhashed(inode) || !inode->i_nlink ||
- timespec64_equal(&inode->i_ctime, &now))
+ timespec64_equal(&ctime, &now))
return;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_to_ts(inode, now);
mark_inode_dirty(inode);
}
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 138060452678..064264992b49 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -285,7 +285,7 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
if (error == -ENODATA) {
error = 0;
if (type == ACL_TYPE_ACCESS) {
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
}
}
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index c59b230d55b4..5c35f6c76037 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -322,8 +322,7 @@ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos)
set_nlink(i, 1); /* Hard to decide.. */
i->i_size = be32_to_cpu(ri.size);
- i->i_mtime.tv_sec = i->i_atime.tv_sec = i->i_ctime.tv_sec = 0;
- i->i_mtime.tv_nsec = i->i_atime.tv_nsec = i->i_ctime.tv_nsec = 0;
+ i->i_mtime = i->i_atime = inode_set_ctime(i, 0, 0);
/* set up mode and ops */
mode = romfs_modemap[nextfh & ROMFH_TYPE];
@@ -583,16 +582,18 @@ static int romfs_init_fs_context(struct fs_context *fc)
*/
static void romfs_kill_sb(struct super_block *sb)
{
+ generic_shutdown_super(sb);
+
#ifdef CONFIG_ROMFS_ON_MTD
if (sb->s_mtd) {
- kill_mtd_super(sb);
- return;
+ put_mtd_device(sb->s_mtd);
+ sb->s_mtd = NULL;
}
#endif
#ifdef CONFIG_ROMFS_ON_BLOCK
if (sb->s_bdev) {
- kill_block_super(sb);
- return;
+ sync_blockdev(sb->s_bdev);
+ blkdev_put(sb->s_bdev, sb);
}
#endif
}
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index a4d8b0ea1c8c..6fc8f43b1c9d 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -1077,7 +1077,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
}
static int
-cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
+cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv)
{
/*
* Note that this is called by vfs setlease with i_lock held to
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 6bc44f79d2e9..2108b3b40ce9 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -1085,7 +1085,7 @@ int cifs_close(struct inode *inode, struct file *file)
!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
dclose) {
if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
}
spin_lock(&cinode->deferred_lock);
cifs_add_deferred_close(cfile, dclose);
@@ -2596,7 +2596,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
write_data, to - from, &offset);
cifsFileInfo_put(open_file);
/* Does mm or vfs already set times? */
- inode->i_atime = inode->i_mtime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
if ((bytes_written > 0) && (offset))
rc = 0;
else if (bytes_written < 0)
diff --git a/fs/smb/client/fscache.h b/fs/smb/client/fscache.h
index 173999610997..84f3b09367d2 100644
--- a/fs/smb/client/fscache.h
+++ b/fs/smb/client/fscache.h
@@ -50,12 +50,13 @@ void cifs_fscache_fill_coherency(struct inode *inode,
struct cifs_fscache_inode_coherency_data *cd)
{
struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ struct timespec64 ctime = inode_get_ctime(inode);
memset(cd, 0, sizeof(*cd));
cd->last_write_time_sec = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec);
cd->last_write_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec);
- cd->last_change_time_sec = cpu_to_le64(cifsi->netfs.inode.i_ctime.tv_sec);
- cd->last_change_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_ctime.tv_nsec);
+ cd->last_change_time_sec = cpu_to_le64(ctime.tv_sec);
+ cd->last_change_time_nsec = cpu_to_le32(ctime.tv_nsec);
}
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index c3eeae07e139..93fe43789d7a 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -172,7 +172,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
else
inode->i_atime = fattr->cf_atime;
inode->i_mtime = fattr->cf_mtime;
- inode->i_ctime = fattr->cf_ctime;
+ inode_set_ctime_to_ts(inode, fattr->cf_ctime);
inode->i_rdev = fattr->cf_rdev;
cifs_nlink_fattr_to_inode(inode, fattr);
inode->i_uid = fattr->cf_uid;
@@ -1744,9 +1744,9 @@ out_reval:
cifs_inode = CIFS_I(inode);
cifs_inode->time = 0; /* will force revalidate to get info
when needed */
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
}
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
cifs_inode = CIFS_I(dir);
CIFS_I(dir)->time = 0; /* force revalidate of dir as well */
unlink_out:
@@ -2060,8 +2060,8 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
*/
cifsInode->time = 0;
- d_inode(direntry)->i_ctime = inode->i_ctime = inode->i_mtime =
- current_time(inode);
+ inode_set_ctime_current(d_inode(direntry));
+ inode->i_mtime = inode_set_ctime_current(inode);
rmdir_exit:
free_dentry_path(page);
@@ -2267,8 +2267,8 @@ unlink_target:
/* force revalidate to go get info when needed */
CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
- source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
- target_dir->i_mtime = current_time(source_dir);
+ source_dir->i_mtime = target_dir->i_mtime = inode_set_ctime_to_ts(source_dir,
+ inode_set_ctime_current(target_dir));
cifs_rename_exit:
kfree(info_buf_source);
@@ -2540,7 +2540,7 @@ int cifs_getattr(struct mnt_idmap *idmap, const struct path *path,
return rc;
}
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
stat->blksize = cifs_sb->ctx->bsize;
stat->ino = CIFS_I(inode)->uniqueid;
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 0f62bc373ad0..182e2e879ecf 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -1396,7 +1396,8 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
if (file_inf.LastWriteTime)
inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
if (file_inf.ChangeTime)
- inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
+ inode_set_ctime_to_ts(inode,
+ cifs_NTtimeToUnix(file_inf.ChangeTime));
if (file_inf.LastAccessTime)
inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 7cc1b0c47d0a..a947c18915c2 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -4402,8 +4402,8 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
}
basic_info = (struct smb2_file_basic_info *)rsp->Buffer;
- generic_fillattr(file_mnt_idmap(fp->filp), file_inode(fp->filp),
- &stat);
+ generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS,
+ file_inode(fp->filp), &stat);
basic_info->CreationTime = cpu_to_le64(fp->create_time);
time = ksmbd_UnixTimeToNT(stat.atime);
basic_info->LastAccessTime = cpu_to_le64(time);
@@ -4428,7 +4428,7 @@ static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
struct kstat stat;
inode = file_inode(fp->filp);
- generic_fillattr(file_mnt_idmap(fp->filp), inode, &stat);
+ generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat);
sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
delete_pending = ksmbd_inode_pending_delete(fp);
@@ -4482,7 +4482,7 @@ static int get_file_all_info(struct ksmbd_work *work,
return PTR_ERR(filename);
inode = file_inode(fp->filp);
- generic_fillattr(file_mnt_idmap(fp->filp), inode, &stat);
+ generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat);
ksmbd_debug(SMB, "filename = %s\n", filename);
delete_pending = ksmbd_inode_pending_delete(fp);
@@ -4559,8 +4559,8 @@ static void get_file_stream_info(struct ksmbd_work *work,
int buf_free_len;
struct smb2_query_info_req *req = ksmbd_req_buf_next(work);
- generic_fillattr(file_mnt_idmap(fp->filp), file_inode(fp->filp),
- &stat);
+ generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS,
+ file_inode(fp->filp), &stat);
file_info = (struct smb2_file_stream_info *)rsp->Buffer;
buf_free_len =
@@ -4650,8 +4650,8 @@ static void get_file_internal_info(struct smb2_query_info_rsp *rsp,
struct smb2_file_internal_info *file_info;
struct kstat stat;
- generic_fillattr(file_mnt_idmap(fp->filp), file_inode(fp->filp),
- &stat);
+ generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS,
+ file_inode(fp->filp), &stat);
file_info = (struct smb2_file_internal_info *)rsp->Buffer;
file_info->IndexNumber = cpu_to_le64(stat.ino);
rsp->OutputBufferLength =
@@ -4676,7 +4676,7 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer;
inode = file_inode(fp->filp);
- generic_fillattr(file_mnt_idmap(fp->filp), inode, &stat);
+ generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat);
file_info->CreationTime = cpu_to_le64(fp->create_time);
time = ksmbd_UnixTimeToNT(stat.atime);
@@ -4737,8 +4737,8 @@ static void get_file_compression_info(struct smb2_query_info_rsp *rsp,
struct smb2_file_comp_info *file_info;
struct kstat stat;
- generic_fillattr(file_mnt_idmap(fp->filp), file_inode(fp->filp),
- &stat);
+ generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS,
+ file_inode(fp->filp), &stat);
file_info = (struct smb2_file_comp_info *)rsp->Buffer;
file_info->CompressedFileSize = cpu_to_le64(stat.blocks << 9);
@@ -4790,7 +4790,7 @@ static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
file_info->LastAccessTime = cpu_to_le64(time);
time = ksmbd_UnixTimeToNT(inode->i_mtime);
file_info->LastWriteTime = cpu_to_le64(time);
- time = ksmbd_UnixTimeToNT(inode->i_ctime);
+ time = ksmbd_UnixTimeToNT(inode_get_ctime(inode));
file_info->ChangeTime = cpu_to_le64(time);
file_info->DosAttributes = fp->f_ci->m_fattr;
file_info->Inode = cpu_to_le64(inode->i_ino);
@@ -5433,7 +5433,7 @@ int smb2_close(struct ksmbd_work *work)
rsp->LastAccessTime = cpu_to_le64(time);
time = ksmbd_UnixTimeToNT(inode->i_mtime);
rsp->LastWriteTime = cpu_to_le64(time);
- time = ksmbd_UnixTimeToNT(inode->i_ctime);
+ time = ksmbd_UnixTimeToNT(inode_get_ctime(inode));
rsp->ChangeTime = cpu_to_le64(time);
ksmbd_fd_put(work, fp);
} else {
@@ -5656,7 +5656,7 @@ static int set_file_basic_info(struct ksmbd_file *fp,
if (file_info->ChangeTime)
attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
else
- attrs.ia_ctime = inode->i_ctime;
+ attrs.ia_ctime = inode_get_ctime(inode);
if (file_info->LastWriteTime) {
attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
@@ -5701,7 +5701,7 @@ static int set_file_basic_info(struct ksmbd_file *fp,
return -EACCES;
inode_lock(inode);
- inode->i_ctime = attrs.ia_ctime;
+ inode_set_ctime_to_ts(inode, attrs.ia_ctime);
attrs.ia_valid &= ~ATTR_CTIME;
rc = notify_change(idmap, dentry, &attrs, NULL);
inode_unlock(inode);
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 3d5d652153a5..d48756a339a5 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -1659,7 +1659,8 @@ int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
u64 time;
int rc;
- generic_fillattr(idmap, d_inode(dentry), ksmbd_kstat->kstat);
+ generic_fillattr(idmap, STATX_BASIC_STATS, d_inode(dentry),
+ ksmbd_kstat->kstat);
time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime);
ksmbd_kstat->create_time = time;
diff --git a/fs/splice.c b/fs/splice.c
index 3e2a31e1ce6a..02631013b09f 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -120,17 +120,17 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
- struct page *page = buf->page;
+ struct folio *folio = page_folio(buf->page);
int err;
- if (!PageUptodate(page)) {
- lock_page(page);
+ if (!folio_test_uptodate(folio)) {
+ folio_lock(folio);
/*
- * Page got truncated/unhashed. This will cause a 0-byte
+ * Folio got truncated/unhashed. This will cause a 0-byte
* splice, if this is the first page.
*/
- if (!page->mapping) {
+ if (!folio->mapping) {
err = -ENODATA;
goto error;
}
@@ -138,20 +138,18 @@ static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe,
/*
* Uh oh, read-error from disk.
*/
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
err = -EIO;
goto error;
}
- /*
- * Page is ok afterall, we are done.
- */
- unlock_page(page);
+ /* Folio is ok after all, we are done */
+ folio_unlock(folio);
}
return 0;
error:
- unlock_page(page);
+ folio_unlock(folio);
return err;
}
@@ -1269,10 +1267,8 @@ long do_splice(struct file *in, loff_t *off_in, struct file *out,
if ((in->f_flags | out->f_flags) & O_NONBLOCK)
flags |= SPLICE_F_NONBLOCK;
- return splice_pipe_to_pipe(ipipe, opipe, len, flags);
- }
-
- if (ipipe) {
+ ret = splice_pipe_to_pipe(ipipe, opipe, len, flags);
+ } else if (ipipe) {
if (off_in)
return -ESPIPE;
if (off_out) {
@@ -1297,18 +1293,11 @@ long do_splice(struct file *in, loff_t *off_in, struct file *out,
ret = do_splice_from(ipipe, out, &offset, len, flags);
file_end_write(out);
- if (ret > 0)
- fsnotify_modify(out);
-
if (!off_out)
out->f_pos = offset;
else
*off_out = offset;
-
- return ret;
- }
-
- if (opipe) {
+ } else if (opipe) {
if (off_out)
return -ESPIPE;
if (off_in) {
@@ -1324,18 +1313,25 @@ long do_splice(struct file *in, loff_t *off_in, struct file *out,
ret = splice_file_to_pipe(in, opipe, &offset, len, flags);
- if (ret > 0)
- fsnotify_access(in);
-
if (!off_in)
in->f_pos = offset;
else
*off_in = offset;
+ } else {
+ ret = -EINVAL;
+ }
- return ret;
+ if (ret > 0) {
+ /*
+ * Generate modify out before access in:
+ * do_splice_from() may've already sent modify out,
+ * and this ensures the events get merged.
+ */
+ fsnotify_modify(out);
+ fsnotify_access(in);
}
- return -EINVAL;
+ return ret;
}
static long __do_splice(struct file *in, loff_t __user *off_in,
@@ -1464,6 +1460,9 @@ static long vmsplice_to_user(struct file *file, struct iov_iter *iter,
pipe_unlock(pipe);
}
+ if (ret > 0)
+ fsnotify_access(file);
+
return ret;
}
@@ -1493,8 +1492,10 @@ static long vmsplice_to_pipe(struct file *file, struct iov_iter *iter,
if (!ret)
ret = iter_to_pipe(iter, pipe, buf_flag);
pipe_unlock(pipe);
- if (ret > 0)
+ if (ret > 0) {
wakeup_pipe_readers(pipe);
+ fsnotify_modify(file);
+ }
return ret;
}
@@ -1928,6 +1929,11 @@ long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags)
}
}
+ if (ret > 0) {
+ fsnotify_access(in);
+ fsnotify_modify(out);
+ }
+
return ret;
}
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index 24463145b351..c6e626b00546 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -61,7 +61,7 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime);
inode->i_atime.tv_sec = inode->i_mtime.tv_sec;
- inode->i_ctime.tv_sec = inode->i_mtime.tv_sec;
+ inode_set_ctime(inode, inode->i_mtime.tv_sec, 0);
inode->i_mode = le16_to_cpu(sqsh_ino->mode);
inode->i_size = 0;
diff --git a/fs/stack.c b/fs/stack.c
index c9830924eb12..b5e01bdb5f5f 100644
--- a/fs/stack.c
+++ b/fs/stack.c
@@ -68,7 +68,7 @@ void fsstack_copy_attr_all(struct inode *dest, const struct inode *src)
dest->i_rdev = src->i_rdev;
dest->i_atime = src->i_atime;
dest->i_mtime = src->i_mtime;
- dest->i_ctime = src->i_ctime;
+ inode_set_ctime_to_ts(dest, inode_get_ctime(src));
dest->i_blkbits = src->i_blkbits;
dest->i_flags = src->i_flags;
set_nlink(dest, src->i_nlink);
diff --git a/fs/stat.c b/fs/stat.c
index 7c238da22ef0..136711ae72fb 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -27,10 +27,42 @@
#include "mount.h"
/**
+ * fill_mg_cmtime - Fill in the mtime and ctime and flag ctime as QUERIED
+ * @stat: where to store the resulting values
+ * @request_mask: STATX_* values requested
+ * @inode: inode from which to grab the c/mtime
+ *
+ * Given @inode, grab the ctime and mtime out if it and store the result
+ * in @stat. When fetching the value, flag it as queried so the next write
+ * will use a fine-grained timestamp.
+ */
+void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode)
+{
+ atomic_long_t *pnsec = (atomic_long_t *)&inode->__i_ctime.tv_nsec;
+
+ /* If neither time was requested, then don't report them */
+ if (!(request_mask & (STATX_CTIME|STATX_MTIME))) {
+ stat->result_mask &= ~(STATX_CTIME|STATX_MTIME);
+ return;
+ }
+
+ stat->mtime = inode->i_mtime;
+ stat->ctime.tv_sec = inode->__i_ctime.tv_sec;
+ /*
+ * Atomically set the QUERIED flag and fetch the new value with
+ * the flag masked off.
+ */
+ stat->ctime.tv_nsec = atomic_long_fetch_or(I_CTIME_QUERIED, pnsec) &
+ ~I_CTIME_QUERIED;
+}
+EXPORT_SYMBOL(fill_mg_cmtime);
+
+/**
* generic_fillattr - Fill in the basic attributes from the inode struct
- * @idmap: idmap of the mount the inode was found from
- * @inode: Inode to use as the source
- * @stat: Where to fill in the attributes
+ * @idmap: idmap of the mount the inode was found from
+ * @request_mask: statx request_mask
+ * @inode: Inode to use as the source
+ * @stat: Where to fill in the attributes
*
* Fill in the basic attributes in the kstat structure from data that's to be
* found on the VFS inode structure. This is the default if no getattr inode
@@ -42,8 +74,8 @@
* uid and gid filds. On non-idmapped mounts or if permission checking is to be
* performed on the raw inode simply passs @nop_mnt_idmap.
*/
-void generic_fillattr(struct mnt_idmap *idmap, struct inode *inode,
- struct kstat *stat)
+void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask,
+ struct inode *inode, struct kstat *stat)
{
vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode);
vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
@@ -57,10 +89,22 @@ void generic_fillattr(struct mnt_idmap *idmap, struct inode *inode,
stat->rdev = inode->i_rdev;
stat->size = i_size_read(inode);
stat->atime = inode->i_atime;
- stat->mtime = inode->i_mtime;
- stat->ctime = inode->i_ctime;
+
+ if (is_mgtime(inode)) {
+ fill_mg_cmtime(stat, request_mask, inode);
+ } else {
+ stat->mtime = inode->i_mtime;
+ stat->ctime = inode_get_ctime(inode);
+ }
+
stat->blksize = i_blocksize(inode);
stat->blocks = inode->i_blocks;
+
+ if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) {
+ stat->result_mask |= STATX_CHANGE_COOKIE;
+ stat->change_cookie = inode_query_iversion(inode);
+ }
+
}
EXPORT_SYMBOL(generic_fillattr);
@@ -123,17 +167,12 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
STATX_ATTR_DAX);
- if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) {
- stat->result_mask |= STATX_CHANGE_COOKIE;
- stat->change_cookie = inode_query_iversion(inode);
- }
-
idmap = mnt_idmap(path->mnt);
if (inode->i_op->getattr)
return inode->i_op->getattr(idmap, path, stat,
request_mask, query_flags);
- generic_fillattr(idmap, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
return 0;
}
EXPORT_SYMBOL(vfs_getattr_nosec);
diff --git a/fs/super.c b/fs/super.c
index e781226e2880..692654c2af36 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -39,7 +39,7 @@
#include <uapi/linux/mount.h>
#include "internal.h"
-static int thaw_super_locked(struct super_block *sb);
+static int thaw_super_locked(struct super_block *sb, enum freeze_holder who);
static LIST_HEAD(super_blocks);
static DEFINE_SPINLOCK(sb_lock);
@@ -50,6 +50,130 @@ static char *sb_writers_name[SB_FREEZE_LEVELS] = {
"sb_internal",
};
+static inline void __super_lock(struct super_block *sb, bool excl)
+{
+ if (excl)
+ down_write(&sb->s_umount);
+ else
+ down_read(&sb->s_umount);
+}
+
+static inline void super_unlock(struct super_block *sb, bool excl)
+{
+ if (excl)
+ up_write(&sb->s_umount);
+ else
+ up_read(&sb->s_umount);
+}
+
+static inline void __super_lock_excl(struct super_block *sb)
+{
+ __super_lock(sb, true);
+}
+
+static inline void super_unlock_excl(struct super_block *sb)
+{
+ super_unlock(sb, true);
+}
+
+static inline void super_unlock_shared(struct super_block *sb)
+{
+ super_unlock(sb, false);
+}
+
+static inline bool wait_born(struct super_block *sb)
+{
+ unsigned int flags;
+
+ /*
+ * Pairs with smp_store_release() in super_wake() and ensures
+ * that we see SB_BORN or SB_DYING after we're woken.
+ */
+ flags = smp_load_acquire(&sb->s_flags);
+ return flags & (SB_BORN | SB_DYING);
+}
+
+/**
+ * super_lock - wait for superblock to become ready and lock it
+ * @sb: superblock to wait for
+ * @excl: whether exclusive access is required
+ *
+ * If the superblock has neither passed through vfs_get_tree() or
+ * generic_shutdown_super() yet wait for it to happen. Either superblock
+ * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
+ * woken and we'll see SB_DYING.
+ *
+ * The caller must have acquired a temporary reference on @sb->s_count.
+ *
+ * Return: This returns true if SB_BORN was set, false if SB_DYING was
+ * set. The function acquires s_umount and returns with it held.
+ */
+static __must_check bool super_lock(struct super_block *sb, bool excl)
+{
+
+ lockdep_assert_not_held(&sb->s_umount);
+
+relock:
+ __super_lock(sb, excl);
+
+ /*
+ * Has gone through generic_shutdown_super() in the meantime.
+ * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
+ * grab a reference to this. Tell them so.
+ */
+ if (sb->s_flags & SB_DYING)
+ return false;
+
+ /* Has called ->get_tree() successfully. */
+ if (sb->s_flags & SB_BORN)
+ return true;
+
+ super_unlock(sb, excl);
+
+ /* wait until the superblock is ready or dying */
+ wait_var_event(&sb->s_flags, wait_born(sb));
+
+ /*
+ * Neither SB_BORN nor SB_DYING are ever unset so we never loop.
+ * Just reacquire @sb->s_umount for the caller.
+ */
+ goto relock;
+}
+
+/* wait and acquire read-side of @sb->s_umount */
+static inline bool super_lock_shared(struct super_block *sb)
+{
+ return super_lock(sb, false);
+}
+
+/* wait and acquire write-side of @sb->s_umount */
+static inline bool super_lock_excl(struct super_block *sb)
+{
+ return super_lock(sb, true);
+}
+
+/* wake waiters */
+#define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
+static void super_wake(struct super_block *sb, unsigned int flag)
+{
+ WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
+ WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
+
+ /*
+ * Pairs with smp_load_acquire() in super_lock() to make sure
+ * all initializations in the superblock are seen by the user
+ * seeing SB_BORN sent.
+ */
+ smp_store_release(&sb->s_flags, sb->s_flags | flag);
+ /*
+ * Pairs with the barrier in prepare_to_wait_event() to make sure
+ * ___wait_var_event() either sees SB_BORN set or
+ * waitqueue_active() check in wake_up_var() sees the waiter.
+ */
+ smp_mb();
+ wake_up_var(&sb->s_flags);
+}
+
/*
* One thing we have to be careful of with a per-sb shrinker is that we don't
* drop the last active reference to the superblock from within the shrinker.
@@ -76,7 +200,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
if (!(sc->gfp_mask & __GFP_FS))
return SHRINK_STOP;
- if (!trylock_super(sb))
+ if (!super_trylock_shared(sb))
return SHRINK_STOP;
if (sb->s_op->nr_cached_objects)
@@ -110,7 +234,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
freed += sb->s_op->free_cached_objects(sb, sc);
}
- up_read(&sb->s_umount);
+ super_unlock_shared(sb);
return freed;
}
@@ -123,17 +247,17 @@ static unsigned long super_cache_count(struct shrinker *shrink,
sb = container_of(shrink, struct super_block, s_shrink);
/*
- * We don't call trylock_super() here as it is a scalability bottleneck,
- * so we're exposed to partial setup state. The shrinker rwsem does not
- * protect filesystem operations backing list_lru_shrink_count() or
- * s_op->nr_cached_objects(). Counts can change between
- * super_cache_count and super_cache_scan, so we really don't need locks
- * here.
+ * We don't call super_trylock_shared() here as it is a scalability
+ * bottleneck, so we're exposed to partial setup state. The shrinker
+ * rwsem does not protect filesystem operations backing
+ * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
+ * change between super_cache_count and super_cache_scan, so we really
+ * don't need locks here.
*
* However, if we are currently mounting the superblock, the underlying
* filesystem might be in a state of partial construction and hence it
- * is dangerous to access it. trylock_super() uses a SB_BORN check to
- * avoid this situation, so do the same here. The memory barrier is
+ * is dangerous to access it. super_trylock_shared() uses a SB_BORN check
+ * to avoid this situation, so do the same here. The memory barrier is
* matched with the one in mount_fs() as we don't hold locks here.
*/
if (!(sb->s_flags & SB_BORN))
@@ -176,7 +300,7 @@ static void destroy_unused_super(struct super_block *s)
{
if (!s)
return;
- up_write(&s->s_umount);
+ super_unlock_excl(s);
list_lru_destroy(&s->s_dentry_lru);
list_lru_destroy(&s->s_inode_lru);
security_sb_free(s);
@@ -337,10 +461,29 @@ void deactivate_locked_super(struct super_block *s)
list_lru_destroy(&s->s_dentry_lru);
list_lru_destroy(&s->s_inode_lru);
+ /*
+ * Remove it from @fs_supers so it isn't found by new
+ * sget{_fc}() walkers anymore. Any concurrent mounter still
+ * managing to grab a temporary reference is guaranteed to
+ * already see SB_DYING and will wait until we notify them about
+ * SB_DEAD.
+ */
+ spin_lock(&sb_lock);
+ hlist_del_init(&s->s_instances);
+ spin_unlock(&sb_lock);
+
+ /*
+ * Let concurrent mounts know that this thing is really dead.
+ * We don't need @sb->s_umount here as every concurrent caller
+ * will see SB_DYING and either discard the superblock or wait
+ * for SB_DEAD.
+ */
+ super_wake(s, SB_DEAD);
+
put_filesystem(fs);
put_super(s);
} else {
- up_write(&s->s_umount);
+ super_unlock_excl(s);
}
}
@@ -357,7 +500,7 @@ EXPORT_SYMBOL(deactivate_locked_super);
void deactivate_super(struct super_block *s)
{
if (!atomic_add_unless(&s->s_active, -1, 1)) {
- down_write(&s->s_umount);
+ __super_lock_excl(s);
deactivate_locked_super(s);
}
}
@@ -379,20 +522,61 @@ EXPORT_SYMBOL(deactivate_super);
*/
static int grab_super(struct super_block *s) __releases(sb_lock)
{
+ bool born;
+
s->s_count++;
spin_unlock(&sb_lock);
- down_write(&s->s_umount);
- if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
+ born = super_lock_excl(s);
+ if (born && atomic_inc_not_zero(&s->s_active)) {
put_super(s);
return 1;
}
- up_write(&s->s_umount);
+ super_unlock_excl(s);
put_super(s);
return 0;
}
+static inline bool wait_dead(struct super_block *sb)
+{
+ unsigned int flags;
+
+ /*
+ * Pairs with memory barrier in super_wake() and ensures
+ * that we see SB_DEAD after we're woken.
+ */
+ flags = smp_load_acquire(&sb->s_flags);
+ return flags & SB_DEAD;
+}
+
+/**
+ * grab_super_dead - acquire an active reference to a superblock
+ * @sb: superblock to acquire
+ *
+ * Acquire a temporary reference on a superblock and try to trade it for
+ * an active reference. This is used in sget{_fc}() to wait for a
+ * superblock to either become SB_BORN or for it to pass through
+ * sb->kill() and be marked as SB_DEAD.
+ *
+ * Return: This returns true if an active reference could be acquired,
+ * false if not.
+ */
+static bool grab_super_dead(struct super_block *sb)
+{
+
+ sb->s_count++;
+ if (grab_super(sb)) {
+ put_super(sb);
+ lockdep_assert_held(&sb->s_umount);
+ return true;
+ }
+ wait_var_event(&sb->s_flags, wait_dead(sb));
+ put_super(sb);
+ lockdep_assert_not_held(&sb->s_umount);
+ return false;
+}
+
/*
- * trylock_super - try to grab ->s_umount shared
+ * super_trylock_shared - try to grab ->s_umount shared
* @sb: reference we are trying to grab
*
* Try to prevent fs shutdown. This is used in places where we
@@ -408,13 +592,13 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
* of down_read(). There's a couple of places that are OK with that, but
* it's very much not a general-purpose interface.
*/
-bool trylock_super(struct super_block *sb)
+bool super_trylock_shared(struct super_block *sb)
{
if (down_read_trylock(&sb->s_umount)) {
- if (!hlist_unhashed(&sb->s_instances) &&
- sb->s_root && (sb->s_flags & SB_BORN))
+ if (!(sb->s_flags & SB_DYING) && sb->s_root &&
+ (sb->s_flags & SB_BORN))
return true;
- up_read(&sb->s_umount);
+ super_unlock_shared(sb);
}
return false;
@@ -439,13 +623,13 @@ bool trylock_super(struct super_block *sb)
void retire_super(struct super_block *sb)
{
WARN_ON(!sb->s_bdev);
- down_write(&sb->s_umount);
+ __super_lock_excl(sb);
if (sb->s_iflags & SB_I_PERSB_BDI) {
bdi_unregister(sb->s_bdi);
sb->s_iflags &= ~SB_I_PERSB_BDI;
}
sb->s_iflags |= SB_I_RETIRED;
- up_write(&sb->s_umount);
+ super_unlock_excl(sb);
}
EXPORT_SYMBOL(retire_super);
@@ -517,11 +701,17 @@ void generic_shutdown_super(struct super_block *sb)
spin_unlock(&sb->s_inode_list_lock);
}
}
- spin_lock(&sb_lock);
- /* should be initialized for __put_super_and_need_restart() */
- hlist_del_init(&sb->s_instances);
- spin_unlock(&sb_lock);
- up_write(&sb->s_umount);
+ /*
+ * Broadcast to everyone that grabbed a temporary reference to this
+ * superblock before we removed it from @fs_supers that the superblock
+ * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
+ * discard this superblock and treat it as dead.
+ *
+ * We leave the superblock on @fs_supers so it can be found by
+ * sget{_fc}() until we passed sb->kill_sb().
+ */
+ super_wake(sb, SB_DYING);
+ super_unlock_excl(sb);
if (sb->s_bdi != &noop_backing_dev_info) {
if (sb->s_iflags & SB_I_PERSB_BDI)
bdi_unregister(sb->s_bdi);
@@ -546,17 +736,31 @@ bool mount_capable(struct fs_context *fc)
* @test: Comparison callback
* @set: Setup callback
*
- * Find or create a superblock using the parameters stored in the filesystem
- * context and the two callback functions.
+ * Create a new superblock or find an existing one.
+ *
+ * The @test callback is used to find a matching existing superblock.
+ * Whether or not the requested parameters in @fc are taken into account
+ * is specific to the @test callback that is used. They may even be
+ * completely ignored.
+ *
+ * If an extant superblock is matched, it will be returned unless:
*
- * If an extant superblock is matched, then that will be returned with an
- * elevated reference count that the caller must transfer or discard.
+ * (1) the namespace the filesystem context @fc and the extant
+ * superblock's namespace differ
+ *
+ * (2) the filesystem context @fc has requested that reusing an extant
+ * superblock is not allowed
+ *
+ * In both cases EBUSY will be returned.
*
* If no match is made, a new superblock will be allocated and basic
- * initialisation will be performed (s_type, s_fs_info and s_id will be set and
- * the set() callback will be invoked), the superblock will be published and it
- * will be returned in a partially constructed state with SB_BORN and SB_ACTIVE
- * as yet unset.
+ * initialisation will be performed (s_type, s_fs_info and s_id will be
+ * set and the @set callback will be invoked), the superblock will be
+ * published and it will be returned in a partially constructed state
+ * with SB_BORN and SB_ACTIVE as yet unset.
+ *
+ * Return: On success, an extant or newly created superblock is
+ * returned. On failure an error pointer is returned.
*/
struct super_block *sget_fc(struct fs_context *fc,
int (*test)(struct super_block *, struct fs_context *),
@@ -595,6 +799,11 @@ retry:
s->s_type = fc->fs_type;
s->s_iflags |= fc->s_iflags;
strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
+ /*
+ * Make the superblock visible on @super_blocks and @fs_supers.
+ * It's in a nascent state and users should wait on SB_BORN or
+ * SB_DYING to be set.
+ */
list_add_tail(&s->s_list, &super_blocks);
hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
spin_unlock(&sb_lock);
@@ -603,12 +812,16 @@ retry:
return s;
share_extant_sb:
- if (user_ns != old->s_user_ns) {
+ if (user_ns != old->s_user_ns || fc->exclusive) {
spin_unlock(&sb_lock);
destroy_unused_super(s);
+ if (fc->exclusive)
+ warnfc(fc, "reusing existing filesystem not allowed");
+ else
+ warnfc(fc, "reusing existing filesystem in another namespace not allowed");
return ERR_PTR(-EBUSY);
}
- if (!grab_super(old))
+ if (!grab_super_dead(old))
goto retry;
destroy_unused_super(s);
return old;
@@ -652,7 +865,7 @@ retry:
destroy_unused_super(s);
return ERR_PTR(-EBUSY);
}
- if (!grab_super(old))
+ if (!grab_super_dead(old))
goto retry;
destroy_unused_super(s);
return old;
@@ -685,7 +898,7 @@ EXPORT_SYMBOL(sget);
void drop_super(struct super_block *sb)
{
- up_read(&sb->s_umount);
+ super_unlock_shared(sb);
put_super(sb);
}
@@ -693,7 +906,7 @@ EXPORT_SYMBOL(drop_super);
void drop_super_exclusive(struct super_block *sb)
{
- up_write(&sb->s_umount);
+ super_unlock_excl(sb);
put_super(sb);
}
EXPORT_SYMBOL(drop_super_exclusive);
@@ -704,7 +917,8 @@ static void __iterate_supers(void (*f)(struct super_block *))
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- if (hlist_unhashed(&sb->s_instances))
+ /* Pairs with memory marrier in super_wake(). */
+ if (smp_load_acquire(&sb->s_flags) & SB_DYING)
continue;
sb->s_count++;
spin_unlock(&sb_lock);
@@ -734,15 +948,15 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- if (hlist_unhashed(&sb->s_instances))
- continue;
+ bool born;
+
sb->s_count++;
spin_unlock(&sb_lock);
- down_read(&sb->s_umount);
- if (sb->s_root && (sb->s_flags & SB_BORN))
+ born = super_lock_shared(sb);
+ if (born && sb->s_root)
f(sb, arg);
- up_read(&sb->s_umount);
+ super_unlock_shared(sb);
spin_lock(&sb_lock);
if (p)
@@ -770,13 +984,15 @@ void iterate_supers_type(struct file_system_type *type,
spin_lock(&sb_lock);
hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
+ bool born;
+
sb->s_count++;
spin_unlock(&sb_lock);
- down_read(&sb->s_umount);
- if (sb->s_root && (sb->s_flags & SB_BORN))
+ born = super_lock_shared(sb);
+ if (born && sb->s_root)
f(sb, arg);
- up_read(&sb->s_umount);
+ super_unlock_shared(sb);
spin_lock(&sb_lock);
if (p)
@@ -791,43 +1007,6 @@ void iterate_supers_type(struct file_system_type *type,
EXPORT_SYMBOL(iterate_supers_type);
/**
- * get_super - get the superblock of a device
- * @bdev: device to get the superblock for
- *
- * Scans the superblock list and finds the superblock of the file system
- * mounted on the device given. %NULL is returned if no match is found.
- */
-struct super_block *get_super(struct block_device *bdev)
-{
- struct super_block *sb;
-
- if (!bdev)
- return NULL;
-
- spin_lock(&sb_lock);
-rescan:
- list_for_each_entry(sb, &super_blocks, s_list) {
- if (hlist_unhashed(&sb->s_instances))
- continue;
- if (sb->s_bdev == bdev) {
- sb->s_count++;
- spin_unlock(&sb_lock);
- down_read(&sb->s_umount);
- /* still alive? */
- if (sb->s_root && (sb->s_flags & SB_BORN))
- return sb;
- up_read(&sb->s_umount);
- /* nope, got unmounted */
- spin_lock(&sb_lock);
- __put_super(sb);
- goto rescan;
- }
- }
- spin_unlock(&sb_lock);
- return NULL;
-}
-
-/**
* get_active_super - get an active reference to the superblock of a device
* @bdev: device to get the superblock for
*
@@ -842,15 +1021,12 @@ struct super_block *get_active_super(struct block_device *bdev)
if (!bdev)
return NULL;
-restart:
spin_lock(&sb_lock);
list_for_each_entry(sb, &super_blocks, s_list) {
- if (hlist_unhashed(&sb->s_instances))
- continue;
if (sb->s_bdev == bdev) {
if (!grab_super(sb))
- goto restart;
- up_write(&sb->s_umount);
+ return NULL;
+ super_unlock_excl(sb);
return sb;
}
}
@@ -863,28 +1039,21 @@ struct super_block *user_get_super(dev_t dev, bool excl)
struct super_block *sb;
spin_lock(&sb_lock);
-rescan:
list_for_each_entry(sb, &super_blocks, s_list) {
- if (hlist_unhashed(&sb->s_instances))
- continue;
if (sb->s_dev == dev) {
+ bool born;
+
sb->s_count++;
spin_unlock(&sb_lock);
- if (excl)
- down_write(&sb->s_umount);
- else
- down_read(&sb->s_umount);
/* still alive? */
- if (sb->s_root && (sb->s_flags & SB_BORN))
+ born = super_lock(sb, excl);
+ if (born && sb->s_root)
return sb;
- if (excl)
- up_write(&sb->s_umount);
- else
- up_read(&sb->s_umount);
+ super_unlock(sb, excl);
/* nope, got unmounted */
spin_lock(&sb_lock);
__put_super(sb);
- goto rescan;
+ break;
}
}
spin_unlock(&sb_lock);
@@ -926,9 +1095,9 @@ int reconfigure_super(struct fs_context *fc)
if (remount_ro) {
if (!hlist_empty(&sb->s_pins)) {
- up_write(&sb->s_umount);
+ super_unlock_excl(sb);
group_pin_kill(&sb->s_pins);
- down_write(&sb->s_umount);
+ __super_lock_excl(sb);
if (!sb->s_root)
return 0;
if (sb->s_writers.frozen != SB_UNFROZEN)
@@ -991,9 +1160,9 @@ cancel_readonly:
static void do_emergency_remount_callback(struct super_block *sb)
{
- down_write(&sb->s_umount);
- if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
- !sb_rdonly(sb)) {
+ bool born = super_lock_excl(sb);
+
+ if (born && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
struct fs_context *fc;
fc = fs_context_for_reconfigure(sb->s_root,
@@ -1004,7 +1173,7 @@ static void do_emergency_remount_callback(struct super_block *sb)
put_fs_context(fc);
}
}
- up_write(&sb->s_umount);
+ super_unlock_excl(sb);
}
static void do_emergency_remount(struct work_struct *work)
@@ -1027,12 +1196,13 @@ void emergency_remount(void)
static void do_thaw_all_callback(struct super_block *sb)
{
- down_write(&sb->s_umount);
- if (sb->s_root && sb->s_flags & SB_BORN) {
+ bool born = super_lock_excl(sb);
+
+ if (born && sb->s_root) {
emergency_thaw_bdev(sb);
- thaw_super_locked(sb);
+ thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE);
} else {
- up_write(&sb->s_umount);
+ super_unlock_excl(sb);
}
}
@@ -1136,7 +1306,7 @@ static int test_single_super(struct super_block *s, struct fs_context *fc)
return 1;
}
-static int vfs_get_super(struct fs_context *fc, bool reconf,
+static int vfs_get_super(struct fs_context *fc,
int (*test)(struct super_block *, struct fs_context *),
int (*fill_super)(struct super_block *sb,
struct fs_context *fc))
@@ -1154,19 +1324,9 @@ static int vfs_get_super(struct fs_context *fc, bool reconf,
goto error;
sb->s_flags |= SB_ACTIVE;
- fc->root = dget(sb->s_root);
- } else {
- fc->root = dget(sb->s_root);
- if (reconf) {
- err = reconfigure_super(fc);
- if (err < 0) {
- dput(fc->root);
- fc->root = NULL;
- goto error;
- }
- }
}
+ fc->root = dget(sb->s_root);
return 0;
error:
@@ -1178,7 +1338,7 @@ int get_tree_nodev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc))
{
- return vfs_get_super(fc, false, NULL, fill_super);
+ return vfs_get_super(fc, NULL, fill_super);
}
EXPORT_SYMBOL(get_tree_nodev);
@@ -1186,54 +1346,81 @@ int get_tree_single(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc))
{
- return vfs_get_super(fc, false, test_single_super, fill_super);
+ return vfs_get_super(fc, test_single_super, fill_super);
}
EXPORT_SYMBOL(get_tree_single);
-int get_tree_single_reconf(struct fs_context *fc,
- int (*fill_super)(struct super_block *sb,
- struct fs_context *fc))
-{
- return vfs_get_super(fc, true, test_single_super, fill_super);
-}
-EXPORT_SYMBOL(get_tree_single_reconf);
-
int get_tree_keyed(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc),
void *key)
{
fc->s_fs_info = key;
- return vfs_get_super(fc, false, test_keyed_super, fill_super);
+ return vfs_get_super(fc, test_keyed_super, fill_super);
}
EXPORT_SYMBOL(get_tree_keyed);
#ifdef CONFIG_BLOCK
-static void fs_mark_dead(struct block_device *bdev)
+/*
+ * Lock a super block that the callers holds a reference to.
+ *
+ * The caller needs to ensure that the super_block isn't being freed while
+ * calling this function, e.g. by holding a lock over the call to this function
+ * and the place that clears the pointer to the superblock used by this function
+ * before freeing the superblock.
+ */
+static bool super_lock_shared_active(struct super_block *sb)
{
- struct super_block *sb;
+ bool born = super_lock_shared(sb);
+
+ if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
+ super_unlock_shared(sb);
+ return false;
+ }
+ return true;
+}
+
+static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
+{
+ struct super_block *sb = bdev->bd_holder;
+
+ /* bd_holder_lock ensures that the sb isn't freed */
+ lockdep_assert_held(&bdev->bd_holder_lock);
- sb = get_super(bdev);
- if (!sb)
+ if (!super_lock_shared_active(sb))
return;
+ if (!surprise)
+ sync_filesystem(sb);
+ shrink_dcache_sb(sb);
+ invalidate_inodes(sb);
if (sb->s_op->shutdown)
sb->s_op->shutdown(sb);
- drop_super(sb);
+
+ super_unlock_shared(sb);
+}
+
+static void fs_bdev_sync(struct block_device *bdev)
+{
+ struct super_block *sb = bdev->bd_holder;
+
+ lockdep_assert_held(&bdev->bd_holder_lock);
+
+ if (!super_lock_shared_active(sb))
+ return;
+ sync_filesystem(sb);
+ super_unlock_shared(sb);
}
-static const struct blk_holder_ops fs_holder_ops = {
- .mark_dead = fs_mark_dead,
+const struct blk_holder_ops fs_holder_ops = {
+ .mark_dead = fs_bdev_mark_dead,
+ .sync = fs_bdev_sync,
};
+EXPORT_SYMBOL_GPL(fs_holder_ops);
static int set_bdev_super(struct super_block *s, void *data)
{
- s->s_bdev = data;
- s->s_dev = s->s_bdev->bd_dev;
- s->s_bdi = bdi_get(s->s_bdev->bd_disk->bdi);
-
- if (bdev_stable_writes(s->s_bdev))
- s->s_iflags |= SB_I_STABLE_WRITES;
+ s->s_dev = *(dev_t *)data;
return 0;
}
@@ -1244,8 +1431,63 @@ static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc)
static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc)
{
- return !(s->s_iflags & SB_I_RETIRED) && s->s_bdev == fc->sget_key;
+ return !(s->s_iflags & SB_I_RETIRED) &&
+ s->s_dev == *(dev_t *)fc->sget_key;
+}
+
+int setup_bdev_super(struct super_block *sb, int sb_flags,
+ struct fs_context *fc)
+{
+ blk_mode_t mode = sb_open_mode(sb_flags);
+ struct block_device *bdev;
+
+ bdev = blkdev_get_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
+ if (IS_ERR(bdev)) {
+ if (fc)
+ errorf(fc, "%s: Can't open blockdev", fc->source);
+ return PTR_ERR(bdev);
+ }
+
+ /*
+ * This really should be in blkdev_get_by_dev, but right now can't due
+ * to legacy issues that require us to allow opening a block device node
+ * writable from userspace even for a read-only block device.
+ */
+ if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
+ blkdev_put(bdev, sb);
+ return -EACCES;
+ }
+
+ /*
+ * Until SB_BORN flag is set, there can be no active superblock
+ * references and thus no filesystem freezing. get_active_super() will
+ * just loop waiting for SB_BORN so even freeze_bdev() cannot proceed.
+ *
+ * It is enough to check bdev was not frozen before we set s_bdev.
+ */
+ mutex_lock(&bdev->bd_fsfreeze_mutex);
+ if (bdev->bd_fsfreeze_count > 0) {
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ if (fc)
+ warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
+ blkdev_put(bdev, sb);
+ return -EBUSY;
+ }
+ spin_lock(&sb_lock);
+ sb->s_bdev = bdev;
+ sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
+ if (bdev_stable_writes(bdev))
+ sb->s_iflags |= SB_I_STABLE_WRITES;
+ spin_unlock(&sb_lock);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+
+ snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
+ shrinker_debugfs_rename(&sb->s_shrink, "sb-%s:%s", sb->s_type->name,
+ sb->s_id);
+ sb_set_blocksize(sb, block_size(bdev));
+ return 0;
}
+EXPORT_SYMBOL_GPL(setup_bdev_super);
/**
* get_tree_bdev - Get a superblock based on a single block device
@@ -1256,73 +1498,49 @@ int get_tree_bdev(struct fs_context *fc,
int (*fill_super)(struct super_block *,
struct fs_context *))
{
- struct block_device *bdev;
struct super_block *s;
int error = 0;
+ dev_t dev;
if (!fc->source)
return invalf(fc, "No source specified");
- bdev = blkdev_get_by_path(fc->source, sb_open_mode(fc->sb_flags),
- fc->fs_type, &fs_holder_ops);
- if (IS_ERR(bdev)) {
- errorf(fc, "%s: Can't open blockdev", fc->source);
- return PTR_ERR(bdev);
- }
-
- /* Once the superblock is inserted into the list by sget_fc(), s_umount
- * will protect the lockfs code from trying to start a snapshot while
- * we are mounting
- */
- mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (bdev->bd_fsfreeze_count > 0) {
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
- blkdev_put(bdev, fc->fs_type);
- return -EBUSY;
+ error = lookup_bdev(fc->source, &dev);
+ if (error) {
+ errorf(fc, "%s: Can't lookup blockdev", fc->source);
+ return error;
}
fc->sb_flags |= SB_NOSEC;
- fc->sget_key = bdev;
+ fc->sget_key = &dev;
s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- if (IS_ERR(s)) {
- blkdev_put(bdev, fc->fs_type);
+ if (IS_ERR(s))
return PTR_ERR(s);
- }
if (s->s_root) {
/* Don't summarily change the RO/RW state. */
if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
- warnf(fc, "%pg: Can't mount, would change RO state", bdev);
+ warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
deactivate_locked_super(s);
- blkdev_put(bdev, fc->fs_type);
return -EBUSY;
}
-
+ } else {
/*
- * s_umount nests inside open_mutex during
- * __invalidate_device(). blkdev_put() acquires
- * open_mutex and can't be called under s_umount. Drop
- * s_umount temporarily. This is safe as we're
- * holding an active reference.
+ * We drop s_umount here because we need to open the bdev and
+ * bdev->open_mutex ranks above s_umount (blkdev_put() ->
+ * bdev_mark_dead()). It is safe because we have active sb
+ * reference and SB_BORN is not set yet.
*/
- up_write(&s->s_umount);
- blkdev_put(bdev, fc->fs_type);
- down_write(&s->s_umount);
- } else {
- snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
- shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s",
- fc->fs_type->name, s->s_id);
- sb_set_blocksize(s, block_size(bdev));
- error = fill_super(s, fc);
+ super_unlock_excl(s);
+ error = setup_bdev_super(s, fc->sb_flags, fc);
+ __super_lock_excl(s);
+ if (!error)
+ error = fill_super(s, fc);
if (error) {
deactivate_locked_super(s);
return error;
}
-
s->s_flags |= SB_ACTIVE;
- bdev->bd_super = s;
}
BUG_ON(fc->root);
@@ -1333,79 +1551,52 @@ EXPORT_SYMBOL(get_tree_bdev);
static int test_bdev_super(struct super_block *s, void *data)
{
- return !(s->s_iflags & SB_I_RETIRED) && (void *)s->s_bdev == data;
+ return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
}
struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int))
{
- struct block_device *bdev;
struct super_block *s;
- int error = 0;
+ int error;
+ dev_t dev;
- bdev = blkdev_get_by_path(dev_name, sb_open_mode(flags), fs_type,
- &fs_holder_ops);
- if (IS_ERR(bdev))
- return ERR_CAST(bdev);
+ error = lookup_bdev(dev_name, &dev);
+ if (error)
+ return ERR_PTR(error);
- /*
- * once the super is inserted into the list by sget, s_umount
- * will protect the lockfs code from trying to start a snapshot
- * while we are mounting
- */
- mutex_lock(&bdev->bd_fsfreeze_mutex);
- if (bdev->bd_fsfreeze_count > 0) {
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
- error = -EBUSY;
- goto error_bdev;
- }
- s = sget(fs_type, test_bdev_super, set_bdev_super, flags | SB_NOSEC,
- bdev);
- mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ flags |= SB_NOSEC;
+ s = sget(fs_type, test_bdev_super, set_bdev_super, flags, &dev);
if (IS_ERR(s))
- goto error_s;
+ return ERR_CAST(s);
if (s->s_root) {
if ((flags ^ s->s_flags) & SB_RDONLY) {
deactivate_locked_super(s);
- error = -EBUSY;
- goto error_bdev;
+ return ERR_PTR(-EBUSY);
}
-
+ } else {
/*
- * s_umount nests inside open_mutex during
- * __invalidate_device(). blkdev_put() acquires
- * open_mutex and can't be called under s_umount. Drop
- * s_umount temporarily. This is safe as we're
- * holding an active reference.
+ * We drop s_umount here because we need to open the bdev and
+ * bdev->open_mutex ranks above s_umount (blkdev_put() ->
+ * bdev_mark_dead()). It is safe because we have active sb
+ * reference and SB_BORN is not set yet.
*/
- up_write(&s->s_umount);
- blkdev_put(bdev, fs_type);
- down_write(&s->s_umount);
- } else {
- snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
- shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s",
- fs_type->name, s->s_id);
- sb_set_blocksize(s, block_size(bdev));
- error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
+ super_unlock_excl(s);
+ error = setup_bdev_super(s, flags, NULL);
+ __super_lock_excl(s);
+ if (!error)
+ error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
if (error) {
deactivate_locked_super(s);
- goto error;
+ return ERR_PTR(error);
}
s->s_flags |= SB_ACTIVE;
- bdev->bd_super = s;
}
return dget(s->s_root);
-
-error_s:
- error = PTR_ERR(s);
-error_bdev:
- blkdev_put(bdev, fs_type);
-error:
- return ERR_PTR(error);
}
EXPORT_SYMBOL(mount_bdev);
@@ -1413,10 +1604,11 @@ void kill_block_super(struct super_block *sb)
{
struct block_device *bdev = sb->s_bdev;
- bdev->bd_super = NULL;
generic_shutdown_super(sb);
- sync_blockdev(bdev);
- blkdev_put(bdev, sb->s_type);
+ if (bdev) {
+ sync_blockdev(bdev);
+ blkdev_put(bdev, sb);
+ }
}
EXPORT_SYMBOL(kill_block_super);
@@ -1533,13 +1725,13 @@ int vfs_get_tree(struct fs_context *fc)
WARN_ON(!sb->s_bdi);
/*
- * Write barrier is for super_cache_count(). We place it before setting
- * SB_BORN as the data dependency between the two functions is the
- * superblock structure contents that we just set up, not the SB_BORN
- * flag.
+ * super_wake() contains a memory barrier which also care of
+ * ordering for super_cache_count(). We place it before setting
+ * SB_BORN as the data dependency between the two functions is
+ * the superblock structure contents that we just set up, not
+ * the SB_BORN flag.
*/
- smp_wmb();
- sb->s_flags |= SB_BORN;
+ super_wake(sb, SB_BORN);
error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
if (unlikely(error)) {
@@ -1644,14 +1836,43 @@ static void sb_freeze_unlock(struct super_block *sb, int level)
percpu_up_write(sb->s_writers.rw_sem + level);
}
+static int wait_for_partially_frozen(struct super_block *sb)
+{
+ int ret = 0;
+
+ do {
+ unsigned short old = sb->s_writers.frozen;
+
+ up_write(&sb->s_umount);
+ ret = wait_var_event_killable(&sb->s_writers.frozen,
+ sb->s_writers.frozen != old);
+ down_write(&sb->s_umount);
+ } while (ret == 0 &&
+ sb->s_writers.frozen != SB_UNFROZEN &&
+ sb->s_writers.frozen != SB_FREEZE_COMPLETE);
+
+ return ret;
+}
+
/**
* freeze_super - lock the filesystem and force it into a consistent state
* @sb: the super to lock
+ * @who: context that wants to freeze
*
* Syncs the super to make sure the filesystem is consistent and calls the fs's
- * freeze_fs. Subsequent calls to this without first thawing the fs will return
+ * freeze_fs. Subsequent calls to this without first thawing the fs may return
* -EBUSY.
*
+ * @who should be:
+ * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
+ * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
+ *
+ * The @who argument distinguishes between the kernel and userspace trying to
+ * freeze the filesystem. Although there cannot be multiple kernel freezes or
+ * multiple userspace freezes in effect at any given time, the kernel and
+ * userspace can both hold a filesystem frozen. The filesystem remains frozen
+ * until there are no kernel or userspace freezes in effect.
+ *
* During this function, sb->s_writers.frozen goes through these values:
*
* SB_UNFROZEN: File system is normal, all writes progress as usual.
@@ -1677,34 +1898,62 @@ static void sb_freeze_unlock(struct super_block *sb, int level)
*
* sb->s_writers.frozen is protected by sb->s_umount.
*/
-int freeze_super(struct super_block *sb)
+int freeze_super(struct super_block *sb, enum freeze_holder who)
{
int ret;
atomic_inc(&sb->s_active);
- down_write(&sb->s_umount);
+ if (!super_lock_excl(sb))
+ WARN(1, "Dying superblock while freezing!");
+
+retry:
+ if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
+ if (sb->s_writers.freeze_holders & who) {
+ deactivate_locked_super(sb);
+ return -EBUSY;
+ }
+
+ WARN_ON(sb->s_writers.freeze_holders == 0);
+
+ /*
+ * Someone else already holds this type of freeze; share the
+ * freeze and assign the active ref to the freeze.
+ */
+ sb->s_writers.freeze_holders |= who;
+ super_unlock_excl(sb);
+ return 0;
+ }
+
if (sb->s_writers.frozen != SB_UNFROZEN) {
- deactivate_locked_super(sb);
- return -EBUSY;
+ ret = wait_for_partially_frozen(sb);
+ if (ret) {
+ deactivate_locked_super(sb);
+ return ret;
+ }
+
+ goto retry;
}
if (!(sb->s_flags & SB_BORN)) {
- up_write(&sb->s_umount);
+ super_unlock_excl(sb);
return 0; /* sic - it's "nothing to do" */
}
if (sb_rdonly(sb)) {
/* Nothing to do really... */
+ sb->s_writers.freeze_holders |= who;
sb->s_writers.frozen = SB_FREEZE_COMPLETE;
- up_write(&sb->s_umount);
+ wake_up_var(&sb->s_writers.frozen);
+ super_unlock_excl(sb);
return 0;
}
sb->s_writers.frozen = SB_FREEZE_WRITE;
/* Release s_umount to preserve sb_start_write -> s_umount ordering */
- up_write(&sb->s_umount);
+ super_unlock_excl(sb);
sb_wait_write(sb, SB_FREEZE_WRITE);
- down_write(&sb->s_umount);
+ if (!super_lock_excl(sb))
+ WARN(1, "Dying superblock while freezing!");
/* Now we go and block page faults... */
sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
@@ -1715,6 +1964,7 @@ int freeze_super(struct super_block *sb)
if (ret) {
sb->s_writers.frozen = SB_UNFROZEN;
sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
+ wake_up_var(&sb->s_writers.frozen);
deactivate_locked_super(sb);
return ret;
}
@@ -1730,6 +1980,7 @@ int freeze_super(struct super_block *sb)
"VFS:Filesystem freeze failed\n");
sb->s_writers.frozen = SB_UNFROZEN;
sb_freeze_unlock(sb, SB_FREEZE_FS);
+ wake_up_var(&sb->s_writers.frozen);
deactivate_locked_super(sb);
return ret;
}
@@ -1738,24 +1989,50 @@ int freeze_super(struct super_block *sb)
* For debugging purposes so that fs can warn if it sees write activity
* when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
*/
+ sb->s_writers.freeze_holders |= who;
sb->s_writers.frozen = SB_FREEZE_COMPLETE;
+ wake_up_var(&sb->s_writers.frozen);
lockdep_sb_freeze_release(sb);
- up_write(&sb->s_umount);
+ super_unlock_excl(sb);
return 0;
}
EXPORT_SYMBOL(freeze_super);
-static int thaw_super_locked(struct super_block *sb)
+/*
+ * Undoes the effect of a freeze_super_locked call. If the filesystem is
+ * frozen both by userspace and the kernel, a thaw call from either source
+ * removes that state without releasing the other state or unlocking the
+ * filesystem.
+ */
+static int thaw_super_locked(struct super_block *sb, enum freeze_holder who)
{
int error;
- if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
- up_write(&sb->s_umount);
+ if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
+ if (!(sb->s_writers.freeze_holders & who)) {
+ super_unlock_excl(sb);
+ return -EINVAL;
+ }
+
+ /*
+ * Freeze is shared with someone else. Release our hold and
+ * drop the active ref that freeze_super assigned to the
+ * freezer.
+ */
+ if (sb->s_writers.freeze_holders & ~who) {
+ sb->s_writers.freeze_holders &= ~who;
+ deactivate_locked_super(sb);
+ return 0;
+ }
+ } else {
+ super_unlock_excl(sb);
return -EINVAL;
}
if (sb_rdonly(sb)) {
+ sb->s_writers.freeze_holders &= ~who;
sb->s_writers.frozen = SB_UNFROZEN;
+ wake_up_var(&sb->s_writers.frozen);
goto out;
}
@@ -1764,15 +2041,16 @@ static int thaw_super_locked(struct super_block *sb)
if (sb->s_op->unfreeze_fs) {
error = sb->s_op->unfreeze_fs(sb);
if (error) {
- printk(KERN_ERR
- "VFS:Filesystem thaw failed\n");
+ printk(KERN_ERR "VFS:Filesystem thaw failed\n");
lockdep_sb_freeze_release(sb);
- up_write(&sb->s_umount);
+ super_unlock_excl(sb);
return error;
}
}
+ sb->s_writers.freeze_holders &= ~who;
sb->s_writers.frozen = SB_UNFROZEN;
+ wake_up_var(&sb->s_writers.frozen);
sb_freeze_unlock(sb, SB_FREEZE_FS);
out:
deactivate_locked_super(sb);
@@ -1782,13 +2060,20 @@ out:
/**
* thaw_super -- unlock filesystem
* @sb: the super to thaw
+ * @who: context that wants to freeze
+ *
+ * Unlocks the filesystem and marks it writeable again after freeze_super()
+ * if there are no remaining freezes on the filesystem.
*
- * Unlocks the filesystem and marks it writeable again after freeze_super().
+ * @who should be:
+ * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
+ * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
*/
-int thaw_super(struct super_block *sb)
+int thaw_super(struct super_block *sb, enum freeze_holder who)
{
- down_write(&sb->s_umount);
- return thaw_super_locked(sb);
+ if (!super_lock_excl(sb))
+ WARN(1, "Dying superblock while thawing!");
+ return thaw_super_locked(sb, who);
}
EXPORT_SYMBOL(thaw_super);
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 0140010aa0c3..2f5ead88d00b 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -224,7 +224,7 @@ got_it:
memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2);
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
dir_commit_chunk(page, pos, SYSV_DIRSIZE);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
err = sysv_handle_dirsync(dir);
out_page:
@@ -249,7 +249,7 @@ int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
}
de->inode = 0;
dir_commit_chunk(page, pos, SYSV_DIRSIZE);
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
return sysv_handle_dirsync(inode);
}
@@ -346,7 +346,7 @@ int sysv_set_link(struct sysv_dir_entry *de, struct page *page,
}
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
dir_commit_chunk(page, pos, SYSV_DIRSIZE);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
return sysv_handle_dirsync(inode);
}
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index e732879036ab..6719da5889d9 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -165,7 +165,7 @@ struct inode * sysv_new_inode(const struct inode * dir, umode_t mode)
dirty_sb(sb);
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_ino = fs16_to_cpu(sbi, ino);
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_blocks = 0;
memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data));
SYSV_I(inode)->i_dir_start_lookup = 0;
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 9e8d4a6fb2f3..0aa3827d8178 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -202,8 +202,7 @@ struct inode *sysv_iget(struct super_block *sb, unsigned int ino)
inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size);
inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_atime);
inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_mtime);
- inode->i_ctime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_ctime);
- inode->i_ctime.tv_nsec = 0;
+ inode_set_ctime(inode, fs32_to_cpu(sbi, raw_inode->i_ctime), 0);
inode->i_atime.tv_nsec = 0;
inode->i_mtime.tv_nsec = 0;
inode->i_blocks = 0;
@@ -256,7 +255,7 @@ static int __sysv_write_inode(struct inode *inode, int wait)
raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size);
raw_inode->i_atime = cpu_to_fs32(sbi, inode->i_atime.tv_sec);
raw_inode->i_mtime = cpu_to_fs32(sbi, inode->i_mtime.tv_sec);
- raw_inode->i_ctime = cpu_to_fs32(sbi, inode->i_ctime.tv_sec);
+ raw_inode->i_ctime = cpu_to_fs32(sbi, inode_get_ctime(inode).tv_sec);
si = SYSV_I(inode);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 58d7f43a1371..edb94e55de8e 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -183,7 +183,7 @@ static inline int splice_branch(struct inode *inode,
*where->p = where->key;
write_unlock(&pointers_lock);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
/* had we spliced it onto indirect block? */
if (where->bh)
@@ -423,7 +423,7 @@ do_indirects:
}
n++;
}
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
if (IS_SYNC(inode))
sysv_sync_inode (inode);
else
@@ -449,7 +449,8 @@ int sysv_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, unsigned int flags)
{
struct super_block *s = path->dentry->d_sb;
- generic_fillattr(&nop_mnt_idmap, d_inode(path->dentry), stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(path->dentry),
+ stat);
stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size);
stat->blksize = s->s_blocksize;
return 0;
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index fcf163fea3ad..d6b73798071b 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -103,7 +103,7 @@ static int sysv_link(struct dentry * old_dentry, struct inode * dir,
{
struct inode *inode = d_inode(old_dentry);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode_inc_link_count(inode);
ihold(inode);
@@ -161,7 +161,7 @@ static int sysv_unlink(struct inode * dir, struct dentry * dentry)
err = sysv_delete_entry(de, page);
if (!err) {
- inode->i_ctime = dir->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode);
}
unmap_and_put_page(page, de);
@@ -230,7 +230,7 @@ static int sysv_rename(struct mnt_idmap *idmap, struct inode *old_dir,
unmap_and_put_page(new_page, new_de);
if (err)
goto out_dir;
- new_inode->i_ctime = current_time(new_inode);
+ inode_set_ctime_current(new_inode);
if (dir_de)
drop_nlink(new_inode);
inode_dec_link_count(new_inode);
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 57ac8aa4a724..2feb6c58648c 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -132,7 +132,7 @@ static struct inode *tracefs_get_inode(struct super_block *sb)
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
}
return inode;
}
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 9c9d3f0e36a4..eef9e527d9ff 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -243,8 +243,8 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
(unsigned int)inode->i_mtime.tv_sec,
(unsigned int)inode->i_mtime.tv_nsec);
pr_err("\tctime %u.%u\n",
- (unsigned int)inode->i_ctime.tv_sec,
- (unsigned int)inode->i_ctime.tv_nsec);
+ (unsigned int) inode_get_ctime(inode).tv_sec,
+ (unsigned int) inode_get_ctime(inode).tv_nsec);
pr_err("\tcreat_sqnum %llu\n", ui->creat_sqnum);
pr_err("\txattr_size %u\n", ui->xattr_size);
pr_err("\txattr_cnt %u\n", ui->xattr_cnt);
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index ef0499edc248..2f48c58d47cd 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -96,8 +96,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
inode->i_flags |= S_NOCMTIME;
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
- inode->i_mtime = inode->i_atime = inode->i_ctime =
- current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_mapping->nrpages = 0;
if (!is_xattr) {
@@ -325,7 +324,7 @@ static int ubifs_create(struct mnt_idmap *idmap, struct inode *dir,
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = dir->i_ctime = inode->i_ctime;
+ dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err)
goto out_cancel;
@@ -765,10 +764,10 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
inc_nlink(inode);
ihold(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = dir->i_ctime = inode->i_ctime;
+ dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err)
goto out_cancel;
@@ -838,11 +837,11 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
}
lock_2_inodes(dir, inode);
- inode->i_ctime = current_time(dir);
+ inode_set_ctime_current(inode);
drop_nlink(inode);
dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = dir->i_ctime = inode->i_ctime;
+ dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
if (err)
goto out_cancel;
@@ -940,12 +939,12 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
}
lock_2_inodes(dir, inode);
- inode->i_ctime = current_time(dir);
+ inode_set_ctime_current(inode);
clear_nlink(inode);
drop_nlink(dir);
dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = dir->i_ctime = inode->i_ctime;
+ dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
if (err)
goto out_cancel;
@@ -1019,7 +1018,7 @@ static int ubifs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
inc_nlink(dir);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = dir->i_ctime = inode->i_ctime;
+ dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err) {
ubifs_err(c, "cannot create directory, error %d", err);
@@ -1110,7 +1109,7 @@ static int ubifs_mknod(struct mnt_idmap *idmap, struct inode *dir,
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = dir->i_ctime = inode->i_ctime;
+ dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err)
goto out_cancel;
@@ -1210,7 +1209,7 @@ static int ubifs_symlink(struct mnt_idmap *idmap, struct inode *dir,
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = dir->i_ctime = inode->i_ctime;
+ dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err)
goto out_cancel;
@@ -1298,7 +1297,6 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ubifs_budget_req ino_req = { .dirtied_ino = 1,
.dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
struct ubifs_budget_req wht_req;
- struct timespec64 time;
unsigned int saved_nlink;
struct fscrypt_name old_nm, new_nm;
@@ -1414,8 +1412,7 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
* Like most other Unix systems, set the @i_ctime for inodes on a
* rename.
*/
- time = current_time(old_dir);
- old_inode->i_ctime = time;
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
/* We must adjust parent link count when renaming directories */
if (is_dir) {
@@ -1444,13 +1441,11 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dir->i_size -= old_sz;
ubifs_inode(old_dir)->ui_size = old_dir->i_size;
- old_dir->i_mtime = old_dir->i_ctime = time;
- new_dir->i_mtime = new_dir->i_ctime = time;
/*
* And finally, if we unlinked a direntry which happened to have the
* same name as the moved direntry, we have to decrement @i_nlink of
- * the unlinked inode and change its ctime.
+ * the unlinked inode.
*/
if (unlink) {
/*
@@ -1462,7 +1457,6 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
clear_nlink(new_inode);
else
drop_nlink(new_inode);
- new_inode->i_ctime = time;
} else {
new_dir->i_size += new_sz;
ubifs_inode(new_dir)->ui_size = new_dir->i_size;
@@ -1557,7 +1551,6 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
int sync = IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir);
struct inode *fst_inode = d_inode(old_dentry);
struct inode *snd_inode = d_inode(new_dentry);
- struct timespec64 time;
int err;
struct fscrypt_name fst_nm, snd_nm;
@@ -1588,11 +1581,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
lock_4_inodes(old_dir, new_dir, NULL, NULL);
- time = current_time(old_dir);
- fst_inode->i_ctime = time;
- snd_inode->i_ctime = time;
- old_dir->i_mtime = old_dir->i_ctime = time;
- new_dir->i_mtime = new_dir->i_ctime = time;
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
if (old_dir != new_dir) {
if (S_ISDIR(fst_inode->i_mode) && !S_ISDIR(snd_inode->i_mode)) {
@@ -1665,7 +1654,7 @@ int ubifs_getattr(struct mnt_idmap *idmap, const struct path *path,
STATX_ATTR_ENCRYPTED |
STATX_ATTR_IMMUTABLE);
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
stat->blksize = UBIFS_BLOCK_SIZE;
stat->size = ui->ui_size;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 6738fe43040b..e5382f0b2587 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1092,7 +1092,7 @@ static void do_attr_changes(struct inode *inode, const struct iattr *attr)
if (attr->ia_valid & ATTR_MTIME)
inode->i_mtime = attr->ia_mtime;
if (attr->ia_valid & ATTR_CTIME)
- inode->i_ctime = attr->ia_ctime;
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (attr->ia_valid & ATTR_MODE) {
umode_t mode = attr->ia_mode;
@@ -1192,7 +1192,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
mutex_lock(&ui->ui_mutex);
ui->ui_size = inode->i_size;
/* Truncation changes inode [mc]time */
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
/* Other attributes may be changed at the same time as well */
do_attr_changes(inode, attr);
err = ubifs_jnl_truncate(c, inode, old_size, new_size);
@@ -1239,7 +1239,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
mutex_lock(&ui->ui_mutex);
if (attr->ia_valid & ATTR_SIZE) {
/* Truncation changes inode [mc]time */
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
/* 'truncate_setsize()' changed @i_size, update @ui_size */
ui->ui_size = inode->i_size;
}
@@ -1364,8 +1364,10 @@ out:
static inline int mctime_update_needed(const struct inode *inode,
const struct timespec64 *now)
{
+ struct timespec64 ctime = inode_get_ctime(inode);
+
if (!timespec64_equal(&inode->i_mtime, now) ||
- !timespec64_equal(&inode->i_ctime, now))
+ !timespec64_equal(&ctime, now))
return 1;
return 0;
}
@@ -1376,8 +1378,7 @@ static inline int mctime_update_needed(const struct inode *inode,
*
* This function updates time of the inode.
*/
-int ubifs_update_time(struct inode *inode, struct timespec64 *time,
- int flags)
+int ubifs_update_time(struct inode *inode, int flags)
{
struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_info *c = inode->i_sb->s_fs_info;
@@ -1385,21 +1386,17 @@ int ubifs_update_time(struct inode *inode, struct timespec64 *time,
.dirtied_ino_d = ALIGN(ui->data_len, 8) };
int err, release;
- if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
- return generic_update_time(inode, time, flags);
+ if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) {
+ generic_update_time(inode, flags);
+ return 0;
+ }
err = ubifs_budget_space(c, &req);
if (err)
return err;
mutex_lock(&ui->ui_mutex);
- if (flags & S_ATIME)
- inode->i_atime = *time;
- if (flags & S_CTIME)
- inode->i_ctime = *time;
- if (flags & S_MTIME)
- inode->i_mtime = *time;
-
+ inode_update_timestamps(inode, flags);
release = ui->dirty;
__mark_inode_dirty(inode, I_DIRTY_SYNC);
mutex_unlock(&ui->ui_mutex);
@@ -1432,7 +1429,7 @@ static int update_mctime(struct inode *inode)
return err;
mutex_lock(&ui->ui_mutex);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
release = ui->dirty;
mark_inode_dirty_sync(inode);
mutex_unlock(&ui->ui_mutex);
@@ -1570,7 +1567,7 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
struct ubifs_inode *ui = ubifs_inode(inode);
mutex_lock(&ui->ui_mutex);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
release = ui->dirty;
mark_inode_dirty_sync(inode);
mutex_unlock(&ui->ui_mutex);
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 67c5108abd89..d79cabe193c3 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -118,7 +118,7 @@ static int setflags(struct inode *inode, int flags)
ui->flags &= ~ioctl2ubifs(UBIFS_SETTABLE_IOCTL_FLAGS);
ui->flags |= ioctl2ubifs(flags);
ubifs_set_inode_flags(inode);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
release = ui->dirty;
mark_inode_dirty_sync(inode);
mutex_unlock(&ui->ui_mutex);
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index dc52ac0f4a34..ffc9beee7be6 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -454,8 +454,8 @@ static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
- ino->ctime_sec = cpu_to_le64(inode->i_ctime.tv_sec);
- ino->ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+ ino->ctime_sec = cpu_to_le64(inode_get_ctime(inode).tv_sec);
+ ino->ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
ino->uid = cpu_to_le32(i_uid_read(inode));
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 32cb14759796..b08fb28d16b5 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -146,8 +146,8 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
inode->i_atime.tv_nsec = le32_to_cpu(ino->atime_nsec);
inode->i_mtime.tv_sec = (int64_t)le64_to_cpu(ino->mtime_sec);
inode->i_mtime.tv_nsec = le32_to_cpu(ino->mtime_nsec);
- inode->i_ctime.tv_sec = (int64_t)le64_to_cpu(ino->ctime_sec);
- inode->i_ctime.tv_nsec = le32_to_cpu(ino->ctime_nsec);
+ inode_set_ctime(inode, (int64_t)le64_to_cpu(ino->ctime_sec),
+ le32_to_cpu(ino->ctime_nsec));
inode->i_mode = le32_to_cpu(ino->mode);
inode->i_size = le64_to_cpu(ino->size);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 4c36044140e7..ebb3ad6b5e7e 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -2027,7 +2027,7 @@ int ubifs_calc_dark(const struct ubifs_info *c, int spc);
int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync);
int ubifs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr);
-int ubifs_update_time(struct inode *inode, struct timespec64 *time, int flags);
+int ubifs_update_time(struct inode *inode, int flags);
/* dir.c */
struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 349228dd1191..406c82eab513 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -134,7 +134,7 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
ui->data_len = size;
mutex_lock(&host_ui->ui_mutex);
- host->i_ctime = current_time(host);
+ inode_set_ctime_current(host);
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
host_ui->xattr_size += CALC_XATTR_BYTES(size);
@@ -215,7 +215,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
ui->data_len = size;
mutex_lock(&host_ui->ui_mutex);
- host->i_ctime = current_time(host);
+ inode_set_ctime_current(host);
host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
host_ui->xattr_size += CALC_XATTR_BYTES(size);
@@ -474,7 +474,7 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
return err;
mutex_lock(&host_ui->ui_mutex);
- host->i_ctime = current_time(host);
+ inode_set_ctime_current(host);
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
host_ui->xattr_size -= CALC_XATTR_BYTES(ui->data_len);
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 5f7ac8c84798..6b558cbbeb6b 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -100,7 +100,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
iinfo->i_crtime = inode->i_mtime;
if (unlikely(insert_inode_locked(inode) < 0)) {
make_bad_inode(inode);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 28cdfc57d946..d089795074e8 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -910,7 +910,7 @@ static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
map->oflags = UDF_BLK_NEW | UDF_BLK_MAPPED;
iinfo->i_next_alloc_block = map->lblk + 1;
iinfo->i_next_alloc_goal = newblocknum + 1;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
if (IS_SYNC(inode))
udf_sync_inode(inode);
@@ -1298,7 +1298,7 @@ set_size:
goto out_unlock;
}
update_time:
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
if (IS_SYNC(inode))
udf_sync_inode(inode);
else
@@ -1329,6 +1329,7 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
int bs = inode->i_sb->s_blocksize;
int ret = -EIO;
uint32_t uid, gid;
+ struct timespec64 ctime;
reread:
if (iloc->partitionReferenceNum >= sbi->s_partitions) {
@@ -1507,7 +1508,8 @@ reread:
udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime);
udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime);
- udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime);
+ udf_disk_stamp_to_time(&ctime, fe->attrTime);
+ inode_set_ctime_to_ts(inode, ctime);
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
@@ -1522,7 +1524,8 @@ reread:
udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime);
udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime);
udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime);
- udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime);
+ udf_disk_stamp_to_time(&ctime, efe->attrTime);
+ inode_set_ctime_to_ts(inode, ctime);
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
@@ -1799,7 +1802,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
- udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
+ udf_time_to_disk_stamp(&fe->attrTime, inode_get_ctime(inode));
memset(&(fe->impIdent), 0, sizeof(struct regid));
strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
@@ -1830,12 +1833,12 @@ static int udf_update_inode(struct inode *inode, int do_sync)
udf_adjust_time(iinfo, inode->i_atime);
udf_adjust_time(iinfo, inode->i_mtime);
- udf_adjust_time(iinfo, inode->i_ctime);
+ udf_adjust_time(iinfo, inode_get_ctime(inode));
udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
- udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
+ udf_time_to_disk_stamp(&efe->attrTime, inode_get_ctime(inode));
memset(&(efe->impIdent), 0, sizeof(efe->impIdent));
strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index a95579b043ab..ae55ab8859b6 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -365,7 +365,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
*(__le32 *)((struct allocDescImpUse *)iter.fi.icb.impUse)->impUse =
cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
udf_fiiter_write_fi(&iter, NULL);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
udf_fiiter_release(&iter);
udf_add_fid_counter(dir->i_sb, false, 1);
@@ -471,7 +471,7 @@ static int udf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
udf_fiiter_release(&iter);
udf_add_fid_counter(dir->i_sb, true, 1);
inc_nlink(dir);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
d_instantiate_new(dentry, inode);
@@ -523,8 +523,8 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
inode->i_size = 0;
inode_dec_link_count(dir);
udf_add_fid_counter(dir->i_sb, true, -1);
- inode->i_ctime = dir->i_ctime = dir->i_mtime =
- current_time(inode);
+ dir->i_mtime = inode_set_ctime_to_ts(dir,
+ inode_set_ctime_current(inode));
mark_inode_dirty(dir);
ret = 0;
end_rmdir:
@@ -555,11 +555,11 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry)
set_nlink(inode, 1);
}
udf_fiiter_delete_entry(&iter);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
inode_dec_link_count(inode);
udf_add_fid_counter(dir->i_sb, false, -1);
- inode->i_ctime = dir->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
ret = 0;
end_unlink:
udf_fiiter_release(&iter);
@@ -746,9 +746,9 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
inc_nlink(inode);
udf_add_fid_counter(dir->i_sb, false, 1);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
ihold(inode);
d_instantiate(dentry, inode);
@@ -833,7 +833,7 @@ static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- old_inode->i_ctime = current_time(old_inode);
+ inode_set_ctime_current(old_inode);
mark_inode_dirty(old_inode);
/*
@@ -861,13 +861,13 @@ static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir,
}
if (new_inode) {
- new_inode->i_ctime = current_time(new_inode);
+ inode_set_ctime_current(new_inode);
inode_dec_link_count(new_inode);
udf_add_fid_counter(old_dir->i_sb, S_ISDIR(new_inode->i_mode),
-1);
}
- old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
- new_dir->i_ctime = new_dir->i_mtime = current_time(new_dir);
+ old_dir->i_mtime = inode_set_ctime_current(old_dir);
+ new_dir->i_mtime = inode_set_ctime_current(new_dir);
mark_inode_dirty(old_dir);
mark_inode_dirty(new_dir);
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 779b5c2c75f6..f7eaf7b14594 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -149,7 +149,7 @@ static int udf_symlink_getattr(struct mnt_idmap *idmap,
struct inode *inode = d_backing_inode(dentry);
struct page *page;
- generic_fillattr(&nop_mnt_idmap, inode, stat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
page = read_mapping_page(inode->i_mapping, 0, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 379d75796a5c..fd57f03b6c93 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -107,7 +107,7 @@ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
ufs_commit_chunk(page, pos, len);
ufs_put_page(page);
if (update_times)
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
ufs_handle_dirsync(dir);
}
@@ -397,7 +397,7 @@ got_it:
ufs_set_de_type(sb, de, inode->i_mode);
ufs_commit_chunk(page, pos, rec_len);
- dir->i_mtime = dir->i_ctime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
mark_inode_dirty(dir);
err = ufs_handle_dirsync(dir);
@@ -539,7 +539,7 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
pde->d_reclen = cpu_to_fs16(sb, to - from);
dir->d_ino = 0;
ufs_commit_chunk(page, pos, to - from);
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
err = ufs_handle_dirsync(inode);
out:
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 06bd84d555bd..a1e7bd9d1f98 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -292,7 +292,7 @@ cg_found:
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_blocks = 0;
inode->i_generation = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
ufsi->i_flags = UFS_I(dir)->i_flags;
ufsi->i_lastfrag = 0;
ufsi->i_shadow = 0;
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index a4246c83a8cd..21a4779a2de5 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -296,7 +296,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned index,
if (new)
*new = 1;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
if (IS_SYNC(inode))
ufs_sync_inode (inode);
mark_inode_dirty(inode);
@@ -378,7 +378,7 @@ ufs_inode_getblock(struct inode *inode, u64 ind_block,
mark_buffer_dirty(bh);
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
mark_inode_dirty(inode);
out:
brelse (bh);
@@ -580,11 +580,12 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
- inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec);
+ inode_set_ctime(inode,
+ (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec),
+ 0);
inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
inode->i_mtime.tv_nsec = 0;
inode->i_atime.tv_nsec = 0;
- inode->i_ctime.tv_nsec = 0;
inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
@@ -626,10 +627,10 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
- inode->i_ctime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_ctime);
+ inode_set_ctime(inode, fs64_to_cpu(sb, ufs2_inode->ui_ctime),
+ fs32_to_cpu(sb, ufs2_inode->ui_ctimensec));
inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
- inode->i_ctime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_ctimensec);
inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
@@ -726,7 +727,8 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
ufs_inode->ui_atime.tv_usec = 0;
- ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb, inode->i_ctime.tv_sec);
+ ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb,
+ inode_get_ctime(inode).tv_sec);
ufs_inode->ui_ctime.tv_usec = 0;
ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
ufs_inode->ui_mtime.tv_usec = 0;
@@ -770,8 +772,9 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
- ufs_inode->ui_ctime = cpu_to_fs64(sb, inode->i_ctime.tv_sec);
- ufs_inode->ui_ctimensec = cpu_to_fs32(sb, inode->i_ctime.tv_nsec);
+ ufs_inode->ui_ctime = cpu_to_fs64(sb, inode_get_ctime(inode).tv_sec);
+ ufs_inode->ui_ctimensec = cpu_to_fs32(sb,
+ inode_get_ctime(inode).tv_nsec);
ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
@@ -1205,7 +1208,7 @@ static int ufs_truncate(struct inode *inode, loff_t size)
truncate_setsize(inode, size);
ufs_truncate_blocks(inode);
- inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
out:
UFSD("EXIT: err %d\n", err);
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 36154b5aca6d..9cad29463791 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -153,7 +153,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
struct inode *inode = d_inode(old_dentry);
int error;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode_inc_link_count(inode);
ihold(inode);
@@ -220,7 +220,7 @@ static int ufs_unlink(struct inode *dir, struct dentry *dentry)
if (err)
goto out;
- inode->i_ctime = dir->i_ctime;
+ inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode);
err = 0;
out:
@@ -282,7 +282,7 @@ static int ufs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (!new_de)
goto out_dir;
ufs_set_link(new_dir, new_de, new_page, old_inode, 1);
- new_inode->i_ctime = current_time(new_inode);
+ inode_set_ctime_current(new_inode);
if (dir_de)
drop_nlink(new_inode);
inode_dec_link_count(new_inode);
@@ -298,7 +298,7 @@ static int ufs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
* Like most other Unix systems, set the ctime for inodes on a
* rename.
*/
- old_inode->i_ctime = current_time(old_inode);
+ inode_set_ctime_current(old_inode);
ufs_delete_entry(old_dir, old_de, old_page);
mark_inode_dirty(old_inode);
diff --git a/fs/vboxsf/utils.c b/fs/vboxsf/utils.c
index dd0ae1188e87..83f20dd15522 100644
--- a/fs/vboxsf/utils.c
+++ b/fs/vboxsf/utils.c
@@ -128,8 +128,8 @@ int vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
inode->i_atime = ns_to_timespec64(
info->access_time.ns_relative_to_unix_epoch);
- inode->i_ctime = ns_to_timespec64(
- info->change_time.ns_relative_to_unix_epoch);
+ inode_set_ctime_to_ts(inode,
+ ns_to_timespec64(info->change_time.ns_relative_to_unix_epoch));
inode->i_mtime = ns_to_timespec64(
info->modification_time.ns_relative_to_unix_epoch);
return 0;
@@ -252,7 +252,7 @@ int vboxsf_getattr(struct mnt_idmap *idmap, const struct path *path,
if (err)
return err;
- generic_fillattr(&nop_mnt_idmap, d_inode(dentry), kstat);
+ generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(dentry), kstat);
return 0;
}
diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h
index 49bf3a1eb2a0..d071a6e32581 100644
--- a/fs/verity/fsverity_private.h
+++ b/fs/verity/fsverity_private.h
@@ -118,16 +118,16 @@ void fsverity_free_info(struct fsverity_info *vi);
int fsverity_get_descriptor(struct inode *inode,
struct fsverity_descriptor **desc_ret);
-int __init fsverity_init_info_cache(void);
-void __init fsverity_exit_info_cache(void);
+void __init fsverity_init_info_cache(void);
/* signature.c */
#ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES
+extern int fsverity_require_signatures;
int fsverity_verify_signature(const struct fsverity_info *vi,
const u8 *signature, size_t sig_size);
-int __init fsverity_init_signature(void);
+void __init fsverity_init_signature(void);
#else /* !CONFIG_FS_VERITY_BUILTIN_SIGNATURES */
static inline int
fsverity_verify_signature(const struct fsverity_info *vi,
@@ -136,15 +136,13 @@ fsverity_verify_signature(const struct fsverity_info *vi,
return 0;
}
-static inline int fsverity_init_signature(void)
+static inline void fsverity_init_signature(void)
{
- return 0;
}
#endif /* !CONFIG_FS_VERITY_BUILTIN_SIGNATURES */
/* verify.c */
-int __init fsverity_init_workqueue(void);
-void __init fsverity_exit_workqueue(void);
+void __init fsverity_init_workqueue(void);
#endif /* _FSVERITY_PRIVATE_H */
diff --git a/fs/verity/hash_algs.c b/fs/verity/hash_algs.c
index c598d2035476..6b08b1d9a7d7 100644
--- a/fs/verity/hash_algs.c
+++ b/fs/verity/hash_algs.c
@@ -226,6 +226,14 @@ void __init fsverity_check_hash_algs(void)
if (!alg->name)
continue;
+ /*
+ * 0 must never be allocated as an FS_VERITY_HASH_ALG_* value,
+ * as it is reserved for users that use 0 to mean unspecified or
+ * a default value. fs/verity/ itself doesn't care and doesn't
+ * have a default algorithm, but some users make use of this.
+ */
+ BUG_ON(i == 0);
+
BUG_ON(alg->digest_size > FS_VERITY_MAX_DIGEST_SIZE);
/*
diff --git a/fs/verity/init.c b/fs/verity/init.c
index 023905151035..a29f062f6047 100644
--- a/fs/verity/init.c
+++ b/fs/verity/init.c
@@ -9,6 +9,37 @@
#include <linux/ratelimit.h>
+#ifdef CONFIG_SYSCTL
+static struct ctl_table_header *fsverity_sysctl_header;
+
+static struct ctl_table fsverity_sysctl_table[] = {
+#ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES
+ {
+ .procname = "require_signatures",
+ .data = &fsverity_require_signatures,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+#endif
+ { }
+};
+
+static void __init fsverity_init_sysctl(void)
+{
+ fsverity_sysctl_header = register_sysctl("fs/verity",
+ fsverity_sysctl_table);
+ if (!fsverity_sysctl_header)
+ panic("fsverity sysctl registration failed");
+}
+#else /* CONFIG_SYSCTL */
+static inline void fsverity_init_sysctl(void)
+{
+}
+#endif /* !CONFIG_SYSCTL */
+
void fsverity_msg(const struct inode *inode, const char *level,
const char *fmt, ...)
{
@@ -33,28 +64,11 @@ void fsverity_msg(const struct inode *inode, const char *level,
static int __init fsverity_init(void)
{
- int err;
-
fsverity_check_hash_algs();
-
- err = fsverity_init_info_cache();
- if (err)
- return err;
-
- err = fsverity_init_workqueue();
- if (err)
- goto err_exit_info_cache;
-
- err = fsverity_init_signature();
- if (err)
- goto err_exit_workqueue;
-
+ fsverity_init_info_cache();
+ fsverity_init_workqueue();
+ fsverity_init_sysctl();
+ fsverity_init_signature();
return 0;
-
-err_exit_workqueue:
- fsverity_exit_workqueue();
-err_exit_info_cache:
- fsverity_exit_info_cache();
- return err;
}
late_initcall(fsverity_init)
diff --git a/fs/verity/open.c b/fs/verity/open.c
index 1db5106a9c38..6c31a871b84b 100644
--- a/fs/verity/open.c
+++ b/fs/verity/open.c
@@ -408,18 +408,10 @@ void __fsverity_cleanup_inode(struct inode *inode)
}
EXPORT_SYMBOL_GPL(__fsverity_cleanup_inode);
-int __init fsverity_init_info_cache(void)
+void __init fsverity_init_info_cache(void)
{
- fsverity_info_cachep = KMEM_CACHE_USERCOPY(fsverity_info,
- SLAB_RECLAIM_ACCOUNT,
- file_digest);
- if (!fsverity_info_cachep)
- return -ENOMEM;
- return 0;
-}
-
-void __init fsverity_exit_info_cache(void)
-{
- kmem_cache_destroy(fsverity_info_cachep);
- fsverity_info_cachep = NULL;
+ fsverity_info_cachep = KMEM_CACHE_USERCOPY(
+ fsverity_info,
+ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC,
+ file_digest);
}
diff --git a/fs/verity/signature.c b/fs/verity/signature.c
index 72034bc71c9d..90c07573dd77 100644
--- a/fs/verity/signature.c
+++ b/fs/verity/signature.c
@@ -24,7 +24,7 @@
* /proc/sys/fs/verity/require_signatures
* If 1, all verity files must have a valid builtin signature.
*/
-static int fsverity_require_signatures;
+int fsverity_require_signatures;
/*
* Keyring that contains the trusted X.509 certificates.
@@ -62,6 +62,22 @@ int fsverity_verify_signature(const struct fsverity_info *vi,
return 0;
}
+ if (fsverity_keyring->keys.nr_leaves_on_tree == 0) {
+ /*
+ * The ".fs-verity" keyring is empty, due to builtin signatures
+ * being supported by the kernel but not actually being used.
+ * In this case, verify_pkcs7_signature() would always return an
+ * error, usually ENOKEY. It could also be EBADMSG if the
+ * PKCS#7 is malformed, but that isn't very important to
+ * distinguish. So, just skip to ENOKEY to avoid the attack
+ * surface of the PKCS#7 parser, which would otherwise be
+ * reachable by any task able to execute FS_IOC_ENABLE_VERITY.
+ */
+ fsverity_err(inode,
+ "fs-verity keyring is empty, rejecting signed file!");
+ return -ENOKEY;
+ }
+
d = kzalloc(sizeof(*d) + hash_alg->digest_size, GFP_KERNEL);
if (!d)
return -ENOMEM;
@@ -93,59 +109,14 @@ int fsverity_verify_signature(const struct fsverity_info *vi,
return 0;
}
-#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *fsverity_sysctl_header;
-
-static struct ctl_table fsverity_sysctl_table[] = {
- {
- .procname = "require_signatures",
- .data = &fsverity_require_signatures,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- { }
-};
-
-static int __init fsverity_sysctl_init(void)
-{
- fsverity_sysctl_header = register_sysctl("fs/verity", fsverity_sysctl_table);
- if (!fsverity_sysctl_header) {
- pr_err("sysctl registration failed!\n");
- return -ENOMEM;
- }
- return 0;
-}
-#else /* !CONFIG_SYSCTL */
-static inline int __init fsverity_sysctl_init(void)
+void __init fsverity_init_signature(void)
{
- return 0;
-}
-#endif /* !CONFIG_SYSCTL */
-
-int __init fsverity_init_signature(void)
-{
- struct key *ring;
- int err;
-
- ring = keyring_alloc(".fs-verity", KUIDT_INIT(0), KGIDT_INIT(0),
- current_cred(), KEY_POS_SEARCH |
+ fsverity_keyring =
+ keyring_alloc(".fs-verity", KUIDT_INIT(0), KGIDT_INIT(0),
+ current_cred(), KEY_POS_SEARCH |
KEY_USR_VIEW | KEY_USR_READ | KEY_USR_WRITE |
KEY_USR_SEARCH | KEY_USR_SETATTR,
- KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
- if (IS_ERR(ring))
- return PTR_ERR(ring);
-
- err = fsverity_sysctl_init();
- if (err)
- goto err_put_ring;
-
- fsverity_keyring = ring;
- return 0;
-
-err_put_ring:
- key_put(ring);
- return err;
+ KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+ if (IS_ERR(fsverity_keyring))
+ panic("failed to allocate \".fs-verity\" keyring");
}
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
index 433cef51f5f6..904ccd7e8e16 100644
--- a/fs/verity/verify.c
+++ b/fs/verity/verify.c
@@ -346,7 +346,7 @@ void fsverity_enqueue_verify_work(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
-int __init fsverity_init_workqueue(void)
+void __init fsverity_init_workqueue(void)
{
/*
* Use a high-priority workqueue to prioritize verification work, which
@@ -360,12 +360,5 @@ int __init fsverity_init_workqueue(void)
WQ_HIGHPRI,
num_online_cpus());
if (!fsverity_read_workqueue)
- return -ENOMEM;
- return 0;
-}
-
-void __init fsverity_exit_workqueue(void)
-{
- destroy_workqueue(fsverity_read_workqueue);
- fsverity_read_workqueue = NULL;
+ panic("failed to allocate fsverity_read_queue");
}
diff --git a/fs/xattr.c b/fs/xattr.c
index e7bbb7f57557..efd4736bc94b 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -1040,12 +1040,32 @@ const char *xattr_full_name(const struct xattr_handler *handler,
EXPORT_SYMBOL(xattr_full_name);
/**
- * free_simple_xattr - free an xattr object
+ * simple_xattr_space - estimate the memory used by a simple xattr
+ * @name: the full name of the xattr
+ * @size: the size of its value
+ *
+ * This takes no account of how much larger the two slab objects actually are:
+ * that would depend on the slab implementation, when what is required is a
+ * deterministic number, which grows with name length and size and quantity.
+ *
+ * Return: The approximate number of bytes of memory used by such an xattr.
+ */
+size_t simple_xattr_space(const char *name, size_t size)
+{
+ /*
+ * Use "40" instead of sizeof(struct simple_xattr), to return the
+ * same result on 32-bit and 64-bit, and even if simple_xattr grows.
+ */
+ return 40 + size + strlen(name);
+}
+
+/**
+ * simple_xattr_free - free an xattr object
* @xattr: the xattr object
*
* Free the xattr object. Can handle @xattr being NULL.
*/
-static inline void free_simple_xattr(struct simple_xattr *xattr)
+void simple_xattr_free(struct simple_xattr *xattr)
{
if (xattr)
kfree(xattr->name);
@@ -1073,7 +1093,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
if (len < sizeof(*new_xattr))
return NULL;
- new_xattr = kvmalloc(len, GFP_KERNEL);
+ new_xattr = kvmalloc(len, GFP_KERNEL_ACCOUNT);
if (!new_xattr)
return NULL;
@@ -1164,7 +1184,6 @@ int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
* @value: the value to store along the xattr
* @size: the size of @value
* @flags: the flags determining how to set the xattr
- * @removed_size: the size of the removed xattr
*
* Set a new xattr object.
* If @value is passed a new xattr object will be allocated. If XATTR_REPLACE
@@ -1181,29 +1200,27 @@ int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
* nothing if XATTR_CREATE is specified in @flags or @flags is zero. For
* XATTR_REPLACE we fail as mentioned above.
*
- * Return: On success zero and on error a negative error code is returned.
+ * Return: On success, the removed or replaced xattr is returned, to be freed
+ * by the caller; or NULL if none. On failure a negative error code is returned.
*/
-int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
- const void *value, size_t size, int flags,
- ssize_t *removed_size)
+struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- struct simple_xattr *xattr = NULL, *new_xattr = NULL;
+ struct simple_xattr *old_xattr = NULL, *new_xattr = NULL;
struct rb_node *parent = NULL, **rbp;
int err = 0, ret;
- if (removed_size)
- *removed_size = -1;
-
/* value == NULL means remove */
if (value) {
new_xattr = simple_xattr_alloc(value, size);
if (!new_xattr)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- new_xattr->name = kstrdup(name, GFP_KERNEL);
+ new_xattr->name = kstrdup(name, GFP_KERNEL_ACCOUNT);
if (!new_xattr->name) {
- free_simple_xattr(new_xattr);
- return -ENOMEM;
+ simple_xattr_free(new_xattr);
+ return ERR_PTR(-ENOMEM);
}
}
@@ -1217,12 +1234,12 @@ int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
else if (ret > 0)
rbp = &(*rbp)->rb_right;
else
- xattr = rb_entry(*rbp, struct simple_xattr, rb_node);
- if (xattr)
+ old_xattr = rb_entry(*rbp, struct simple_xattr, rb_node);
+ if (old_xattr)
break;
}
- if (xattr) {
+ if (old_xattr) {
/* Fail if XATTR_CREATE is requested and the xattr exists. */
if (flags & XATTR_CREATE) {
err = -EEXIST;
@@ -1230,12 +1247,10 @@ int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
}
if (new_xattr)
- rb_replace_node(&xattr->rb_node, &new_xattr->rb_node,
- &xattrs->rb_root);
+ rb_replace_node(&old_xattr->rb_node,
+ &new_xattr->rb_node, &xattrs->rb_root);
else
- rb_erase(&xattr->rb_node, &xattrs->rb_root);
- if (!err && removed_size)
- *removed_size = xattr->size;
+ rb_erase(&old_xattr->rb_node, &xattrs->rb_root);
} else {
/* Fail if XATTR_REPLACE is requested but no xattr is found. */
if (flags & XATTR_REPLACE) {
@@ -1260,12 +1275,10 @@ int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
out_unlock:
write_unlock(&xattrs->lock);
- if (err)
- free_simple_xattr(new_xattr);
- else
- free_simple_xattr(xattr);
- return err;
-
+ if (!err)
+ return old_xattr;
+ simple_xattr_free(new_xattr);
+ return ERR_PTR(err);
}
static bool xattr_is_trusted(const char *name)
@@ -1370,14 +1383,17 @@ void simple_xattrs_init(struct simple_xattrs *xattrs)
/**
* simple_xattrs_free - free xattrs
* @xattrs: xattr header whose xattrs to destroy
+ * @freed_space: approximate number of bytes of memory freed from @xattrs
*
* Destroy all xattrs in @xattr. When this is called no one can hold a
* reference to any of the xattrs anymore.
*/
-void simple_xattrs_free(struct simple_xattrs *xattrs)
+void simple_xattrs_free(struct simple_xattrs *xattrs, size_t *freed_space)
{
struct rb_node *rbp;
+ if (freed_space)
+ *freed_space = 0;
rbp = rb_first(&xattrs->rb_root);
while (rbp) {
struct simple_xattr *xattr;
@@ -1386,7 +1402,10 @@ void simple_xattrs_free(struct simple_xattrs *xattrs)
rbp_next = rb_next(rbp);
xattr = rb_entry(rbp, struct simple_xattr, rb_node);
rb_erase(&xattr->rb_node, &xattrs->rb_root);
- free_simple_xattr(xattr);
+ if (freed_space)
+ *freed_space += simple_xattr_space(xattr->name,
+ xattr->size);
+ simple_xattr_free(xattr);
rbp = rbp_next;
}
}
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 758aacd8166b..a35781577cad 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -222,7 +222,8 @@ xfs_inode_from_disk(
*/
inode->i_atime = xfs_inode_from_disk_ts(from, from->di_atime);
inode->i_mtime = xfs_inode_from_disk_ts(from, from->di_mtime);
- inode->i_ctime = xfs_inode_from_disk_ts(from, from->di_ctime);
+ inode_set_ctime_to_ts(inode,
+ xfs_inode_from_disk_ts(from, from->di_ctime));
ip->i_disk_size = be64_to_cpu(from->di_size);
ip->i_nblocks = be64_to_cpu(from->di_nblocks);
@@ -316,7 +317,7 @@ xfs_inode_to_disk(
to->di_atime = xfs_inode_to_disk_ts(ip, inode->i_atime);
to->di_mtime = xfs_inode_to_disk_ts(ip, inode->i_mtime);
- to->di_ctime = xfs_inode_to_disk_ts(ip, inode->i_ctime);
+ to->di_ctime = xfs_inode_to_disk_ts(ip, inode_get_ctime(inode));
to->di_nlink = cpu_to_be32(inode->i_nlink);
to->di_gen = cpu_to_be32(inode->i_generation);
to->di_mode = cpu_to_be16(inode->i_mode);
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index cb4796b6e693..ad22656376d3 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -62,12 +62,12 @@ xfs_trans_ichgtime(
ASSERT(tp);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- tv = current_time(inode);
+ /* If the mtime changes, then ctime must also change */
+ ASSERT(flags & XFS_ICHGTIME_CHG);
+ tv = inode_set_ctime_current(inode);
if (flags & XFS_ICHGTIME_MOD)
inode->i_mtime = tv;
- if (flags & XFS_ICHGTIME_CHG)
- inode->i_ctime = tv;
if (flags & XFS_ICHGTIME_CREATE)
ip->i_crtime = tv;
}
diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
index e382a35e98d8..05be757668bb 100644
--- a/fs/xfs/scrub/fscounters.c
+++ b/fs/xfs/scrub/fscounters.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2019-2023 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <djwong@kernel.org>
@@ -8,6 +8,8 @@
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
#include "xfs_mount.h"
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
@@ -16,6 +18,7 @@
#include "xfs_ag.h"
#include "xfs_rtalloc.h"
#include "xfs_inode.h"
+#include "xfs_icache.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -53,6 +56,7 @@ struct xchk_fscounters {
uint64_t frextents;
unsigned long long icount_min;
unsigned long long icount_max;
+ bool frozen;
};
/*
@@ -123,6 +127,82 @@ xchk_fscount_warmup(
return error;
}
+static inline int
+xchk_fsfreeze(
+ struct xfs_scrub *sc)
+{
+ int error;
+
+ error = freeze_super(sc->mp->m_super, FREEZE_HOLDER_KERNEL);
+ trace_xchk_fsfreeze(sc, error);
+ return error;
+}
+
+static inline int
+xchk_fsthaw(
+ struct xfs_scrub *sc)
+{
+ int error;
+
+ /* This should always succeed, we have a kernel freeze */
+ error = thaw_super(sc->mp->m_super, FREEZE_HOLDER_KERNEL);
+ trace_xchk_fsthaw(sc, error);
+ return error;
+}
+
+/*
+ * We couldn't stabilize the filesystem long enough to sample all the variables
+ * that comprise the summary counters and compare them to the percpu counters.
+ * We need to disable all writer threads, which means taking the first two
+ * freeze levels to put userspace to sleep, and the third freeze level to
+ * prevent background threads from starting new transactions. Take one level
+ * more to prevent other callers from unfreezing the filesystem while we run.
+ */
+STATIC int
+xchk_fscounters_freeze(
+ struct xfs_scrub *sc)
+{
+ struct xchk_fscounters *fsc = sc->buf;
+ int error = 0;
+
+ if (sc->flags & XCHK_HAVE_FREEZE_PROT) {
+ sc->flags &= ~XCHK_HAVE_FREEZE_PROT;
+ mnt_drop_write_file(sc->file);
+ }
+
+ /* Try to grab a kernel freeze. */
+ while ((error = xchk_fsfreeze(sc)) == -EBUSY) {
+ if (xchk_should_terminate(sc, &error))
+ return error;
+
+ delay(HZ / 10);
+ }
+ if (error)
+ return error;
+
+ fsc->frozen = true;
+ return 0;
+}
+
+/* Thaw the filesystem after checking or repairing fscounters. */
+STATIC void
+xchk_fscounters_cleanup(
+ void *buf)
+{
+ struct xchk_fscounters *fsc = buf;
+ struct xfs_scrub *sc = fsc->sc;
+ int error;
+
+ if (!fsc->frozen)
+ return;
+
+ error = xchk_fsthaw(sc);
+ if (error)
+ xfs_emerg(sc->mp, "still frozen after scrub, err=%d", error);
+ else
+ fsc->frozen = false;
+}
+
int
xchk_setup_fscounters(
struct xfs_scrub *sc)
@@ -140,6 +220,7 @@ xchk_setup_fscounters(
sc->buf = kzalloc(sizeof(struct xchk_fscounters), XCHK_GFP_FLAGS);
if (!sc->buf)
return -ENOMEM;
+ sc->buf_cleanup = xchk_fscounters_cleanup;
fsc = sc->buf;
fsc->sc = sc;
@@ -150,7 +231,18 @@ xchk_setup_fscounters(
if (error)
return error;
- return xchk_trans_alloc(sc, 0);
+ /*
+ * Pause all writer activity in the filesystem while we're scrubbing to
+ * reduce the likelihood of background perturbations to the counters
+ * throwing off our calculations.
+ */
+ if (sc->flags & XCHK_TRY_HARDER) {
+ error = xchk_fscounters_freeze(sc);
+ if (error)
+ return error;
+ }
+
+ return xfs_trans_alloc_empty(sc->mp, &sc->tp);
}
/*
@@ -290,8 +382,7 @@ retry:
if (fsc->ifree > fsc->icount) {
if (tries--)
goto retry;
- xchk_set_incomplete(sc);
- return 0;
+ return -EDEADLOCK;
}
return 0;
@@ -367,6 +458,8 @@ xchk_fscount_count_frextents(
* Otherwise, we /might/ have a problem. If the change in the summations is
* more than we want to tolerate, the filesystem is probably busy and we should
* just send back INCOMPLETE and see if userspace will try again.
+ *
+ * If we're repairing then we require an exact match.
*/
static inline bool
xchk_fscount_within_range(
@@ -396,21 +489,7 @@ xchk_fscount_within_range(
if (expected >= min_value && expected <= max_value)
return true;
- /*
- * If the difference between the two summations is too large, the fs
- * might just be busy and so we'll mark the scrub incomplete. Return
- * true here so that we don't mark the counter corrupt.
- *
- * XXX: In the future when userspace can grant scrub permission to
- * quiesce the filesystem to solve the outsized variance problem, this
- * check should be moved up and the return code changed to signal to
- * userspace that we need quiesce permission.
- */
- if (max_value - min_value >= XCHK_FSCOUNT_MIN_VARIANCE) {
- xchk_set_incomplete(sc);
- return true;
- }
-
+ /* Everything else is bad. */
return false;
}
@@ -422,6 +501,7 @@ xchk_fscounters(
struct xfs_mount *mp = sc->mp;
struct xchk_fscounters *fsc = sc->buf;
int64_t icount, ifree, fdblocks, frextents;
+ bool try_again = false;
int error;
/* Snapshot the percpu counters. */
@@ -431,9 +511,26 @@ xchk_fscounters(
frextents = percpu_counter_sum(&mp->m_frextents);
/* No negative values, please! */
- if (icount < 0 || ifree < 0 || fdblocks < 0 || frextents < 0)
+ if (icount < 0 || ifree < 0)
xchk_set_corrupt(sc);
+ /*
+ * If the filesystem is not frozen, the counter summation calls above
+ * can race with xfs_mod_freecounter, which subtracts a requested space
+ * reservation from the counter and undoes the subtraction if that made
+ * the counter go negative. Therefore, it's possible to see negative
+ * values here, and we should only flag that as a corruption if we
+ * froze the fs. This is much more likely to happen with frextents
+ * since there are no reserved pools.
+ */
+ if (fdblocks < 0 || frextents < 0) {
+ if (!fsc->frozen)
+ return -EDEADLOCK;
+
+ xchk_set_corrupt(sc);
+ return 0;
+ }
+
/* See if icount is obviously wrong. */
if (icount < fsc->icount_min || icount > fsc->icount_max)
xchk_set_corrupt(sc);
@@ -447,12 +544,6 @@ xchk_fscounters(
xchk_set_corrupt(sc);
/*
- * XXX: We can't quiesce percpu counter updates, so exit early.
- * This can be re-enabled when we gain exclusive freeze functionality.
- */
- return 0;
-
- /*
* If ifree exceeds icount by more than the minimum variance then
* something's probably wrong with the counters.
*/
@@ -463,8 +554,6 @@ xchk_fscounters(
error = xchk_fscount_aggregate_agcounts(sc, fsc);
if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error))
return error;
- if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
- return 0;
/* Count the free extents counter for rt volumes. */
error = xchk_fscount_count_frextents(sc, fsc);
@@ -473,20 +562,45 @@ xchk_fscounters(
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
return 0;
- /* Compare the in-core counters with whatever we counted. */
- if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount))
- xchk_set_corrupt(sc);
+ /*
+ * Compare the in-core counters with whatever we counted. If the fs is
+ * frozen, we treat the discrepancy as a corruption because the freeze
+ * should have stabilized the counter values. Otherwise, we need
+ * userspace to call us back having granted us freeze permission.
+ */
+ if (!xchk_fscount_within_range(sc, icount, &mp->m_icount,
+ fsc->icount)) {
+ if (fsc->frozen)
+ xchk_set_corrupt(sc);
+ else
+ try_again = true;
+ }
- if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree))
- xchk_set_corrupt(sc);
+ if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree)) {
+ if (fsc->frozen)
+ xchk_set_corrupt(sc);
+ else
+ try_again = true;
+ }
if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks,
- fsc->fdblocks))
- xchk_set_corrupt(sc);
+ fsc->fdblocks)) {
+ if (fsc->frozen)
+ xchk_set_corrupt(sc);
+ else
+ try_again = true;
+ }
if (!xchk_fscount_within_range(sc, frextents, &mp->m_frextents,
- fsc->frextents))
- xchk_set_corrupt(sc);
+ fsc->frextents)) {
+ if (fsc->frozen)
+ xchk_set_corrupt(sc);
+ else
+ try_again = true;
+ }
+
+ if (try_again)
+ return -EDEADLOCK;
return 0;
}
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 3d98f604765e..a0fffbcd022b 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -184,8 +184,10 @@ xchk_teardown(
xchk_irele(sc, sc->ip);
sc->ip = NULL;
}
- if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
+ if (sc->flags & XCHK_HAVE_FREEZE_PROT) {
+ sc->flags &= ~XCHK_HAVE_FREEZE_PROT;
mnt_drop_write_file(sc->file);
+ }
if (sc->buf) {
if (sc->buf_cleanup)
sc->buf_cleanup(sc->buf);
@@ -505,6 +507,8 @@ retry_op:
error = mnt_want_write_file(sc->file);
if (error)
goto out_sc;
+
+ sc->flags |= XCHK_HAVE_FREEZE_PROT;
}
/* Set up for the operation. */
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
index e113f2f5c254..f8ba00e51ca9 100644
--- a/fs/xfs/scrub/scrub.h
+++ b/fs/xfs/scrub/scrub.h
@@ -106,6 +106,7 @@ struct xfs_scrub {
/* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */
#define XCHK_TRY_HARDER (1U << 0) /* can't get resources, try again */
+#define XCHK_HAVE_FREEZE_PROT (1U << 1) /* do we have freeze protection? */
#define XCHK_FSGATES_DRAIN (1U << 2) /* defer ops draining enabled */
#define XCHK_NEED_DRAIN (1U << 3) /* scrub needs to drain defer ops */
#define XREP_ALREADY_FIXED (1U << 31) /* checking our repair work */
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index b3894daeb86a..0b54f1a1cf0c 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -98,6 +98,7 @@ TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_FSCOUNTERS);
#define XFS_SCRUB_STATE_STRINGS \
{ XCHK_TRY_HARDER, "try_harder" }, \
+ { XCHK_HAVE_FREEZE_PROT, "nofreeze" }, \
{ XCHK_FSGATES_DRAIN, "fsgates_drain" }, \
{ XCHK_NEED_DRAIN, "need_drain" }, \
{ XREP_ALREADY_FIXED, "already_fixed" }
@@ -693,6 +694,31 @@ TRACE_EVENT(xchk_fscounters_within_range,
__entry->old_value)
)
+DECLARE_EVENT_CLASS(xchk_fsfreeze_class,
+ TP_PROTO(struct xfs_scrub *sc, int error),
+ TP_ARGS(sc, error),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned int, type)
+ __field(int, error)
+ ),
+ TP_fast_assign(
+ __entry->dev = sc->mp->m_super->s_dev;
+ __entry->type = sc->sm->sm_type;
+ __entry->error = error;
+ ),
+ TP_printk("dev %d:%d type %s error %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
+ __entry->error)
+);
+#define DEFINE_XCHK_FSFREEZE_EVENT(name) \
+DEFINE_EVENT(xchk_fsfreeze_class, name, \
+ TP_PROTO(struct xfs_scrub *sc, int error), \
+ TP_ARGS(sc, error))
+DEFINE_XCHK_FSFREEZE_EVENT(xchk_fsfreeze);
+DEFINE_XCHK_FSFREEZE_EVENT(xchk_fsthaw);
+
TRACE_EVENT(xchk_refcount_incorrect,
TP_PROTO(struct xfs_perag *pag, const struct xfs_refcount_irec *irec,
xfs_nlink_t seen),
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 791db7d9c849..6b840301817a 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -233,7 +233,7 @@ xfs_acl_set_mode(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
inode->i_mode = mode;
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
if (xfs_has_wsync(mp))
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 451942fb38ec..2fca4b4e7fd8 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -578,7 +578,7 @@ const struct address_space_operations xfs_address_space_operations = {
.read_folio = xfs_vm_read_folio,
.readahead = xfs_vm_readahead,
.writepages = xfs_vm_writepages,
- .dirty_folio = filemap_dirty_folio,
+ .dirty_folio = iomap_dirty_folio,
.release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
.bmap = xfs_vm_bmap,
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index fbb675563208..fcefab687285 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1644,6 +1644,7 @@ xfs_swap_extents(
uint64_t f;
int resblks = 0;
unsigned int flags = 0;
+ struct timespec64 ctime;
/*
* Lock the inodes against other IO, page faults and truncate to
@@ -1756,8 +1757,9 @@ xfs_swap_extents(
* process that the file was not changed out from
* under it.
*/
- if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
- (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
+ ctime = inode_get_ctime(VFS_I(ip));
+ if ((sbp->bs_ctime.tv_sec != ctime.tv_sec) ||
+ (sbp->bs_ctime.tv_nsec != ctime.tv_nsec) ||
(sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
(sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
error = -EBUSY;
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 15d1e5a7c2d3..3b903f6bce98 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1938,14 +1938,17 @@ void
xfs_free_buftarg(
struct xfs_buftarg *btp)
{
+ struct block_device *bdev = btp->bt_bdev;
+
unregister_shrinker(&btp->bt_shrinker);
ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
percpu_counter_destroy(&btp->bt_io_count);
list_lru_destroy(&btp->bt_lru);
- blkdev_issue_flush(btp->bt_bdev);
- invalidate_bdev(btp->bt_bdev);
fs_put_dax(btp->bt_daxdev, btp->bt_mount);
+ /* the main block device is closed by kill_block_super */
+ if (bdev != btp->bt_mount->m_super->s_bdev)
+ blkdev_put(bdev, btp->bt_mount->m_super);
kmem_free(btp);
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 9e62cc500140..360fe83a334f 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -843,10 +843,9 @@ xfs_init_new_inode(
ip->i_df.if_nextents = 0;
ASSERT(ip->i_nblocks == 0);
- tv = current_time(inode);
+ tv = inode_set_ctime_current(inode);
inode->i_mtime = tv;
inode->i_atime = tv;
- inode->i_ctime = tv;
ip->i_extsize = 0;
ip->i_diflags = 0;
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 91c847a84e10..127b2410eb20 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -528,7 +528,7 @@ xfs_inode_to_log_dinode(
memset(to->di_pad3, 0, sizeof(to->di_pad3));
to->di_atime = xfs_inode_to_log_dinode_ts(ip, inode->i_atime);
to->di_mtime = xfs_inode_to_log_dinode_ts(ip, inode->i_mtime);
- to->di_ctime = xfs_inode_to_log_dinode_ts(ip, inode->i_ctime);
+ to->di_ctime = xfs_inode_to_log_dinode_ts(ip, inode_get_ctime(inode));
to->di_nlink = inode->i_nlink;
to->di_gen = inode->i_generation;
to->di_mode = inode->i_mode;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 24718adb3c16..2ededd3f6b8c 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -573,10 +573,10 @@ xfs_vn_getattr(
stat->gid = vfsgid_into_kgid(vfsgid);
stat->ino = ip->i_ino;
stat->atime = inode->i_atime;
- stat->mtime = inode->i_mtime;
- stat->ctime = inode->i_ctime;
stat->blocks = XFS_FSB_TO_BB(mp, ip->i_nblocks + ip->i_delayed_blks);
+ fill_mg_cmtime(stat, request_mask, inode);
+
if (xfs_has_v3inodes(mp)) {
if (request_mask & STATX_BTIME) {
stat->result_mask |= STATX_BTIME;
@@ -917,7 +917,7 @@ xfs_setattr_size(
if (newsize != oldsize &&
!(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
iattr->ia_ctime = iattr->ia_mtime =
- current_time(inode);
+ current_mgtime(inode);
iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
}
@@ -1029,7 +1029,6 @@ xfs_vn_setattr(
STATIC int
xfs_vn_update_time(
struct inode *inode,
- struct timespec64 *now,
int flags)
{
struct xfs_inode *ip = XFS_I(inode);
@@ -1037,13 +1036,16 @@ xfs_vn_update_time(
int log_flags = XFS_ILOG_TIMESTAMP;
struct xfs_trans *tp;
int error;
+ struct timespec64 now;
trace_xfs_update_time(ip);
if (inode->i_sb->s_flags & SB_LAZYTIME) {
if (!((flags & S_VERSION) &&
- inode_maybe_inc_iversion(inode, false)))
- return generic_update_time(inode, now, flags);
+ inode_maybe_inc_iversion(inode, false))) {
+ generic_update_time(inode, flags);
+ return 0;
+ }
/* Capture the iversion update that just occurred */
log_flags |= XFS_ILOG_CORE;
@@ -1054,12 +1056,15 @@ xfs_vn_update_time(
return error;
xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (flags & S_CTIME)
- inode->i_ctime = *now;
+ if (flags & (S_CTIME|S_MTIME))
+ now = inode_set_ctime_current(inode);
+ else
+ now = current_time(inode);
+
if (flags & S_MTIME)
- inode->i_mtime = *now;
+ inode->i_mtime = now;
if (flags & S_ATIME)
- inode->i_atime = *now;
+ inode->i_atime = now;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_log_inode(tp, ip, log_flags);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f225413a993c..c2093cb56092 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -100,8 +100,8 @@ xfs_bulkstat_one_int(
buf->bs_atime_nsec = inode->i_atime.tv_nsec;
buf->bs_mtime = inode->i_mtime.tv_sec;
buf->bs_mtime_nsec = inode->i_mtime.tv_nsec;
- buf->bs_ctime = inode->i_ctime.tv_sec;
- buf->bs_ctime_nsec = inode->i_ctime.tv_nsec;
+ buf->bs_ctime = inode_get_ctime(inode).tv_sec;
+ buf->bs_ctime_nsec = inode_get_ctime(inode).tv_nsec;
buf->bs_gen = inode->i_generation;
buf->bs_mode = inode->i_mode;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 818510243130..c79eac048456 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -377,17 +377,6 @@ disable_dax:
return 0;
}
-static void
-xfs_bdev_mark_dead(
- struct block_device *bdev)
-{
- xfs_force_shutdown(bdev->bd_holder, SHUTDOWN_DEVICE_REMOVED);
-}
-
-static const struct blk_holder_ops xfs_holder_ops = {
- .mark_dead = xfs_bdev_mark_dead,
-};
-
STATIC int
xfs_blkdev_get(
xfs_mount_t *mp,
@@ -396,8 +385,8 @@ xfs_blkdev_get(
{
int error = 0;
- *bdevp = blkdev_get_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE, mp,
- &xfs_holder_ops);
+ *bdevp = blkdev_get_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ mp->m_super, &fs_holder_ops);
if (IS_ERR(*bdevp)) {
error = PTR_ERR(*bdevp);
xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
@@ -407,31 +396,45 @@ xfs_blkdev_get(
}
STATIC void
-xfs_blkdev_put(
- struct xfs_mount *mp,
- struct block_device *bdev)
-{
- if (bdev)
- blkdev_put(bdev, mp);
-}
-
-STATIC void
-xfs_close_devices(
+xfs_shutdown_devices(
struct xfs_mount *mp)
{
+ /*
+ * Udev is triggered whenever anyone closes a block device or unmounts
+ * a file systemm on a block device.
+ * The default udev rules invoke blkid to read the fs super and create
+ * symlinks to the bdev under /dev/disk. For this, it uses buffered
+ * reads through the page cache.
+ *
+ * xfs_db also uses buffered reads to examine metadata. There is no
+ * coordination between xfs_db and udev, which means that they can run
+ * concurrently. Note there is no coordination between the kernel and
+ * blkid either.
+ *
+ * On a system with 64k pages, the page cache can cache the superblock
+ * and the root inode (and hence the root directory) with the same 64k
+ * page. If udev spawns blkid after the mkfs and the system is busy
+ * enough that it is still running when xfs_db starts up, they'll both
+ * read from the same page in the pagecache.
+ *
+ * The unmount writes updated inode metadata to disk directly. The XFS
+ * buffer cache does not use the bdev pagecache, so it needs to
+ * invalidate that pagecache on unmount. If the above scenario occurs,
+ * the pagecache no longer reflects what's on disk, xfs_db reads the
+ * stale metadata, and fails to find /a. Most of the time this succeeds
+ * because closing a bdev invalidates the page cache, but when processes
+ * race, everyone loses.
+ */
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
- struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
-
- xfs_free_buftarg(mp->m_logdev_targp);
- xfs_blkdev_put(mp, logdev);
+ blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
+ invalidate_bdev(mp->m_logdev_targp->bt_bdev);
}
if (mp->m_rtdev_targp) {
- struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
-
- xfs_free_buftarg(mp->m_rtdev_targp);
- xfs_blkdev_put(mp, rtdev);
+ blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
+ invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
}
- xfs_free_buftarg(mp->m_ddev_targp);
+ blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
+ invalidate_bdev(mp->m_ddev_targp->bt_bdev);
}
/*
@@ -448,17 +451,24 @@ STATIC int
xfs_open_devices(
struct xfs_mount *mp)
{
- struct block_device *ddev = mp->m_super->s_bdev;
+ struct super_block *sb = mp->m_super;
+ struct block_device *ddev = sb->s_bdev;
struct block_device *logdev = NULL, *rtdev = NULL;
int error;
/*
+ * blkdev_put() can't be called under s_umount, see the comment
+ * in get_tree_bdev() for more details
+ */
+ up_write(&sb->s_umount);
+
+ /*
* Open real time and log devices - order is important.
*/
if (mp->m_logname) {
error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
if (error)
- return error;
+ goto out_relock;
}
if (mp->m_rtname) {
@@ -496,7 +506,10 @@ xfs_open_devices(
mp->m_logdev_targp = mp->m_ddev_targp;
}
- return 0;
+ error = 0;
+out_relock:
+ down_write(&sb->s_umount);
+ return error;
out_free_rtdev_targ:
if (mp->m_rtdev_targp)
@@ -504,11 +517,12 @@ xfs_open_devices(
out_free_ddev_targ:
xfs_free_buftarg(mp->m_ddev_targp);
out_close_rtdev:
- xfs_blkdev_put(mp, rtdev);
+ if (rtdev)
+ blkdev_put(rtdev, sb);
out_close_logdev:
if (logdev && logdev != ddev)
- xfs_blkdev_put(mp, logdev);
- return error;
+ blkdev_put(logdev, sb);
+ goto out_relock;
}
/*
@@ -758,6 +772,17 @@ static void
xfs_mount_free(
struct xfs_mount *mp)
{
+ /*
+ * Free the buftargs here because blkdev_put needs to be called outside
+ * of sb->s_umount, which is held around the call to ->put_super.
+ */
+ if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
+ xfs_free_buftarg(mp->m_logdev_targp);
+ if (mp->m_rtdev_targp)
+ xfs_free_buftarg(mp->m_rtdev_targp);
+ if (mp->m_ddev_targp)
+ xfs_free_buftarg(mp->m_ddev_targp);
+
kfree(mp->m_rtname);
kfree(mp->m_logname);
kmem_free(mp);
@@ -1133,10 +1158,6 @@ xfs_fs_put_super(
{
struct xfs_mount *mp = XFS_M(sb);
- /* if ->fill_super failed, we have no mount to tear down */
- if (!sb->s_fs_info)
- return;
-
xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
xfs_filestream_unmount(mp);
xfs_unmountfs(mp);
@@ -1147,10 +1168,7 @@ xfs_fs_put_super(
xfs_inodegc_free_percpu(mp);
xfs_destroy_percpu_counters(mp);
xfs_destroy_mount_workqueues(mp);
- xfs_close_devices(mp);
-
- sb->s_fs_info = NULL;
- xfs_mount_free(mp);
+ xfs_shutdown_devices(mp);
}
static long
@@ -1492,7 +1510,7 @@ xfs_fs_fill_super(
error = xfs_fs_validate_params(mp);
if (error)
- goto out_free_names;
+ return error;
sb_min_blocksize(sb, BBSIZE);
sb->s_xattr = xfs_xattr_handlers;
@@ -1519,11 +1537,11 @@ xfs_fs_fill_super(
error = xfs_open_devices(mp);
if (error)
- goto out_free_names;
+ return error;
error = xfs_init_mount_workqueues(mp);
if (error)
- goto out_close_devices;
+ goto out_shutdown_devices;
error = xfs_init_percpu_counters(mp);
if (error)
@@ -1737,11 +1755,8 @@ xfs_fs_fill_super(
xfs_destroy_percpu_counters(mp);
out_destroy_workqueues:
xfs_destroy_mount_workqueues(mp);
- out_close_devices:
- xfs_close_devices(mp);
- out_free_names:
- sb->s_fs_info = NULL;
- xfs_mount_free(mp);
+ out_shutdown_devices:
+ xfs_shutdown_devices(mp);
return error;
out_unmount:
@@ -1934,7 +1949,8 @@ xfs_fs_reconfigure(
return 0;
}
-static void xfs_fs_free(
+static void
+xfs_fs_free(
struct fs_context *fc)
{
struct xfs_mount *mp = fc->s_fs_info;
@@ -2003,13 +2019,21 @@ static int xfs_init_fs_context(
return 0;
}
+static void
+xfs_kill_sb(
+ struct super_block *sb)
+{
+ kill_block_super(sb);
+ xfs_mount_free(XFS_M(sb));
+}
+
static struct file_system_type xfs_fs_type = {
.owner = THIS_MODULE,
.name = "xfs",
.init_fs_context = xfs_init_fs_context,
.parameters = xfs_fs_parameters,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+ .kill_sb = xfs_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
};
MODULE_ALIAS_FS("xfs");
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
index 789cfb74c146..b2c9b35df8f7 100644
--- a/fs/zonefs/file.c
+++ b/fs/zonefs/file.c
@@ -175,7 +175,7 @@ const struct address_space_operations zonefs_file_aops = {
.read_folio = zonefs_read_folio,
.readahead = zonefs_readahead,
.writepages = zonefs_writepages,
- .dirty_folio = filemap_dirty_folio,
+ .dirty_folio = iomap_dirty_folio,
.release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
.migrate_folio = filemap_migrate_folio,
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 9350221abfc5..9d1a9808fbbb 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -658,7 +658,8 @@ static struct inode *zonefs_get_file_inode(struct inode *dir,
inode->i_ino = ino;
inode->i_mode = z->z_mode;
- inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
+ inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
+ inode_get_ctime(dir));
inode->i_uid = z->z_uid;
inode->i_gid = z->z_gid;
inode->i_size = z->z_wpoffset;
@@ -694,7 +695,8 @@ static struct inode *zonefs_get_zgroup_inode(struct super_block *sb,
inode->i_ino = ino;
inode_init_owner(&nop_mnt_idmap, inode, root, S_IFDIR | 0555);
inode->i_size = sbi->s_zgroup[ztype].g_nr_zones;
- inode->i_ctime = inode->i_mtime = inode->i_atime = root->i_ctime;
+ inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
+ inode_get_ctime(root));
inode->i_private = &sbi->s_zgroup[ztype];
set_nlink(inode, 2);
@@ -1317,7 +1319,7 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
inode->i_ino = bdev_nr_zones(sb->s_bdev);
inode->i_mode = S_IFDIR | 0555;
- inode->i_ctime = inode->i_mtime = inode->i_atime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
inode->i_op = &zonefs_dir_inode_operations;
inode->i_fop = &zonefs_dir_operations;
inode->i_size = 2;
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index d71291f25a80..76aa6aa346ba 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -22,6 +22,7 @@
#define METHOD_NAME__DDN "_DDN"
#define METHOD_NAME__DIS "_DIS"
#define METHOD_NAME__DMA "_DMA"
+#define METHOD_NAME__EVT "_EVT"
#define METHOD_NAME__HID "_HID"
#define METHOD_NAME__INI "_INI"
#define METHOD_NAME__PLD "_PLD"
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c941d99162c0..254685085c82 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -515,6 +515,12 @@ void acpi_bus_private_data_handler(acpi_handle, void *);
int acpi_bus_get_private_data(acpi_handle, void **);
int acpi_bus_attach_private_data(acpi_handle, void *);
void acpi_bus_detach_private_data(acpi_handle);
+int acpi_dev_install_notify_handler(struct acpi_device *adev,
+ u32 handler_type,
+ acpi_notify_handler handler);
+void acpi_dev_remove_notify_handler(struct acpi_device *adev,
+ u32 handler_type,
+ acpi_notify_handler handler);
extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
extern int register_acpi_notifier(struct notifier_block *);
extern int unregister_acpi_notifier(struct notifier_block *);
@@ -563,8 +569,6 @@ int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids);
void acpi_set_modalias(struct acpi_device *adev, const char *default_id,
char *modalias, size_t len);
-int acpi_create_dir(struct acpi_device *);
-void acpi_remove_dir(struct acpi_device *);
static inline bool acpi_device_enumerated(struct acpi_device *adev)
{
@@ -645,6 +649,8 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev);
#ifdef CONFIG_X86
bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status);
bool acpi_quirk_skip_acpi_ac_and_battery(void);
+int acpi_install_cmos_rtc_space_handler(acpi_handle handle);
+void acpi_remove_cmos_rtc_space_handler(acpi_handle handle);
#else
static inline bool acpi_device_override_status(struct acpi_device *adev,
unsigned long long *status)
@@ -655,6 +661,13 @@ static inline bool acpi_quirk_skip_acpi_ac_and_battery(void)
{
return false;
}
+static inline int acpi_install_cmos_rtc_space_handler(acpi_handle handle)
+{
+ return 1;
+}
+static inline void acpi_remove_cmos_rtc_space_handler(acpi_handle handle)
+{
+}
#endif
#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 9ffdc0425bc2..3d90716f9522 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20230331
+#define ACPI_CA_VERSION 0x20230628
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -970,8 +970,6 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
void **data,
void (*callback)(void *)))
-void acpi_run_debugger(char *batch_buffer);
-
void acpi_set_debugger_thread_id(acpi_thread_id thread_id);
#endif /* __ACXFACE_H__ */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 58b0490a2ad1..8d5572ad48cb 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -402,7 +402,7 @@ struct acpi_cdat_dsmas {
/* Flags for subtable above */
-#define ACPI_CEDT_DSMAS_NON_VOLATILE (1 << 2)
+#define ACPI_CDAT_DSMAS_NON_VOLATILE (1 << 2)
/* Subtable 1: Device scoped Latency and Bandwidth Information Structure (DSLBIS) */
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 0029336775a9..3751ae69432f 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -893,7 +893,10 @@ enum acpi_madt_type {
ACPI_MADT_TYPE_BIO_PIC = 22,
ACPI_MADT_TYPE_LPC_PIC = 23,
ACPI_MADT_TYPE_RINTC = 24,
- ACPI_MADT_TYPE_RESERVED = 25, /* 25 to 0x7F are reserved */
+ ACPI_MADT_TYPE_IMSIC = 25,
+ ACPI_MADT_TYPE_APLIC = 26,
+ ACPI_MADT_TYPE_PLIC = 27,
+ ACPI_MADT_TYPE_RESERVED = 28, /* 28 to 0x7F are reserved */
ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */
};
@@ -1261,6 +1264,9 @@ struct acpi_madt_rintc {
u32 flags;
u64 hart_id;
u32 uid; /* ACPI processor UID */
+ u32 ext_intc_id; /* External INTC Id */
+ u64 imsic_addr; /* IMSIC base address */
+ u32 imsic_size; /* IMSIC size */
};
/* Values for RISC-V INTC Version field above */
@@ -1271,6 +1277,48 @@ enum acpi_madt_rintc_version {
ACPI_MADT_RINTC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
};
+/* 25: RISC-V IMSIC */
+struct acpi_madt_imsic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 reserved;
+ u32 flags;
+ u16 num_ids;
+ u16 num_guest_ids;
+ u8 guest_index_bits;
+ u8 hart_index_bits;
+ u8 group_index_bits;
+ u8 group_index_shift;
+};
+
+/* 26: RISC-V APLIC */
+struct acpi_madt_aplic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 id;
+ u32 flags;
+ u8 hw_id[8];
+ u16 num_idcs;
+ u16 num_sources;
+ u32 gsi_base;
+ u64 base_addr;
+ u32 size;
+};
+
+/* 27: RISC-V PLIC */
+struct acpi_madt_plic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 id;
+ u8 hw_id[8];
+ u16 num_irqs;
+ u16 max_prio;
+ u32 flags;
+ u32 size;
+ u64 base_addr;
+ u32 gsi_base;
+};
+
/* 80: OEM data */
struct acpi_madt_oem_data {
@@ -2730,12 +2778,15 @@ enum acpi_rgrt_image_type {
struct acpi_table_rhct {
struct acpi_table_header header; /* Common ACPI table header */
- u32 reserved;
+ u32 flags; /* RHCT flags */
u64 time_base_freq;
u32 node_count;
u32 node_offset;
};
+/* RHCT Flags */
+
+#define ACPI_RHCT_TIMER_CANNOT_WAKEUP_CPU (1)
/*
* RHCT subtables
*/
@@ -2749,6 +2800,9 @@ struct acpi_rhct_node_header {
enum acpi_rhct_node_type {
ACPI_RHCT_NODE_TYPE_ISA_STRING = 0x0000,
+ ACPI_RHCT_NODE_TYPE_CMO = 0x0001,
+ ACPI_RHCT_NODE_TYPE_MMU = 0x0002,
+ ACPI_RHCT_NODE_TYPE_RESERVED = 0x0003,
ACPI_RHCT_NODE_TYPE_HART_INFO = 0xFFFF,
};
@@ -2762,6 +2816,24 @@ struct acpi_rhct_isa_string {
char isa[];
};
+struct acpi_rhct_cmo_node {
+ u8 reserved; /* Must be zero */
+ u8 cbom_size; /* CBOM size in powerof 2 */
+ u8 cbop_size; /* CBOP size in powerof 2 */
+ u8 cboz_size; /* CBOZ size in powerof 2 */
+};
+
+struct acpi_rhct_mmu_node {
+ u8 reserved; /* Must be zero */
+ u8 mmu_type; /* Virtual Address Scheme */
+};
+
+enum acpi_rhct_mmu_type {
+ ACPI_RHCT_MMU_TYPE_SV39 = 0,
+ ACPI_RHCT_MMU_TYPE_SV48 = 1,
+ ACPI_RHCT_MMU_TYPE_SV57 = 2
+};
+
/* Hart Info node structure */
struct acpi_rhct_hart_info {
u16 num_offsets;
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 000764ab3985..c080d579a546 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -279,12 +279,14 @@ struct acpi_srat_gic_its_affinity {
* 6: ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY
*/
+#define ACPI_SRAT_DEVICE_HANDLE_SIZE 16
+
struct acpi_srat_generic_affinity {
struct acpi_subtable_header header;
u8 reserved;
u8 device_handle_type;
u32 proximity_domain;
- u8 device_handle[16];
+ u8 device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
u32 flags;
u32 reserved1;
};
diff --git a/include/acpi/pdc_intel.h b/include/acpi/pdc_intel.h
deleted file mode 100644
index 967c552d1cd3..000000000000
--- a/include/acpi/pdc_intel.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/* _PDC bit definition for Intel processors */
-
-#ifndef __PDC_INTEL_H__
-#define __PDC_INTEL_H__
-
-#define ACPI_PDC_P_FFH (0x0001)
-#define ACPI_PDC_C_C1_HALT (0x0002)
-#define ACPI_PDC_T_FFH (0x0004)
-#define ACPI_PDC_SMP_C1PT (0x0008)
-#define ACPI_PDC_SMP_C2C3 (0x0010)
-#define ACPI_PDC_SMP_P_SWCOORD (0x0020)
-#define ACPI_PDC_SMP_C_SWCOORD (0x0040)
-#define ACPI_PDC_SMP_T_SWCOORD (0x0080)
-#define ACPI_PDC_C_C1_FFH (0x0100)
-#define ACPI_PDC_C_C2C3_FFH (0x0200)
-#define ACPI_PDC_SMP_P_HWCOORD (0x0800)
-
-#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \
- ACPI_PDC_C_C1_HALT | \
- ACPI_PDC_P_FFH)
-
-#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \
- ACPI_PDC_C_C1_HALT | \
- ACPI_PDC_SMP_P_SWCOORD | \
- ACPI_PDC_SMP_P_HWCOORD | \
- ACPI_PDC_P_FFH)
-
-#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \
- ACPI_PDC_SMP_C1PT | \
- ACPI_PDC_C_C1_HALT | \
- ACPI_PDC_C_C1_FFH | \
- ACPI_PDC_C_C2C3_FFH)
-
-#endif /* __PDC_INTEL_H__ */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 1ca450e35c0d..565341c826e3 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -182,6 +182,7 @@
#ifdef ACPI_USE_STANDARD_HEADERS
#include <stddef.h>
#include <unistd.h>
+#include <stdint.h>
#define ACPI_OFFSET(d, f) offsetof(d, f)
#endif
diff --git a/include/acpi/platform/aczephyr.h b/include/acpi/platform/aczephyr.h
index 2f0d30c3c5fd..703db4dc740d 100644
--- a/include/acpi/platform/aczephyr.h
+++ b/include/acpi/platform/aczephyr.h
@@ -10,9 +10,6 @@
#ifndef __ACZEPHYR_H__
#define __ACZEPHYR_H__
-#define SEEK_SET FS_SEEK_SET
-#define SEEK_END FS_SEEK_END
-
#define ACPI_MACHINE_WIDTH 64
#define ACPI_NO_ERROR_MESSAGES
diff --git a/include/acpi/proc_cap_intel.h b/include/acpi/proc_cap_intel.h
new file mode 100644
index 000000000000..ddcdc41d6c3e
--- /dev/null
+++ b/include/acpi/proc_cap_intel.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Vendor specific processor capabilities bit definition
+ * for Intel processors. Those bits are used to convey OSPM
+ * power management capabilities to the platform.
+ */
+
+#ifndef __PROC_CAP_INTEL_H__
+#define __PROC_CAP_INTEL_H__
+
+#define ACPI_PROC_CAP_P_FFH (0x0001)
+#define ACPI_PROC_CAP_C_C1_HALT (0x0002)
+#define ACPI_PROC_CAP_T_FFH (0x0004)
+#define ACPI_PROC_CAP_SMP_C1PT (0x0008)
+#define ACPI_PROC_CAP_SMP_C2C3 (0x0010)
+#define ACPI_PROC_CAP_SMP_P_SWCOORD (0x0020)
+#define ACPI_PROC_CAP_SMP_C_SWCOORD (0x0040)
+#define ACPI_PROC_CAP_SMP_T_SWCOORD (0x0080)
+#define ACPI_PROC_CAP_C_C1_FFH (0x0100)
+#define ACPI_PROC_CAP_C_C2C3_FFH (0x0200)
+#define ACPI_PROC_CAP_SMP_P_HWCOORD (0x0800)
+#define ACPI_PROC_CAP_COLLAB_PROC_PERF (0x1000)
+
+#define ACPI_PROC_CAP_EST_CAPABILITY_SMP (ACPI_PROC_CAP_SMP_C1PT | \
+ ACPI_PROC_CAP_C_C1_HALT | \
+ ACPI_PROC_CAP_P_FFH)
+
+#define ACPI_PROC_CAP_EST_CAPABILITY_SWSMP (ACPI_PROC_CAP_SMP_C1PT | \
+ ACPI_PROC_CAP_C_C1_HALT | \
+ ACPI_PROC_CAP_SMP_P_SWCOORD | \
+ ACPI_PROC_CAP_SMP_P_HWCOORD | \
+ ACPI_PROC_CAP_P_FFH)
+
+#define ACPI_PROC_CAP_C_CAPABILITY_SMP (ACPI_PROC_CAP_SMP_C2C3 | \
+ ACPI_PROC_CAP_SMP_C1PT | \
+ ACPI_PROC_CAP_C_C1_HALT | \
+ ACPI_PROC_CAP_C_C1_FFH | \
+ ACPI_PROC_CAP_C_C2C3_FFH)
+
+#endif /* __PROC_CAP_INTEL_H__ */
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 6156161b181f..ca86f4c6ba43 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -12,6 +12,7 @@
#include <linux/cache.h>
#include <linux/crypto.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
/*
* Maximum values for blocksize and alignmask, used to allocate
@@ -82,6 +83,8 @@ struct crypto_instance {
struct crypto_spawn *spawns;
};
+ struct work_struct free_work;
+
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 2038764b30c2..2835069c5997 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -7,91 +7,47 @@
#ifndef _CRYPTO_ENGINE_H
#define _CRYPTO_ENGINE_H
-#include <linux/crypto.h>
-#include <linux/list.h>
-#include <linux/kthread.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/akcipher.h>
#include <crypto/hash.h>
-#include <crypto/skcipher.h>
#include <crypto/kpp.h>
+#include <crypto/skcipher.h>
+#include <linux/types.h>
+struct crypto_engine;
struct device;
-#define ENGINE_NAME_LEN 30
-/*
- * struct crypto_engine - crypto hardware engine
- * @name: the engine name
- * @idling: the engine is entering idle state
- * @busy: request pump is busy
- * @running: the engine is on working
- * @retry_support: indication that the hardware allows re-execution
- * of a failed backlog request
- * crypto-engine, in head position to keep order
- * @list: link with the global crypto engine list
- * @queue_lock: spinlock to synchronise access to request queue
- * @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
- * @prepare_crypt_hardware: a request will soon arrive from the queue
- * so the subsystem requests the driver to prepare the hardware
- * by issuing this call
- * @unprepare_crypt_hardware: there are currently no more requests on the
- * queue so the subsystem notifies the driver that it may relax the
- * hardware by issuing this call
- * @do_batch_requests: execute a batch of requests. Depends on multiple
- * requests support.
- * @kworker: kthread worker struct for request pump
- * @pump_requests: work struct for scheduling work to the request pump
- * @priv_data: the engine private data
- * @cur_req: the current request which is on processing
- */
-struct crypto_engine {
- char name[ENGINE_NAME_LEN];
- bool idling;
- bool busy;
- bool running;
-
- bool retry_support;
-
- struct list_head list;
- spinlock_t queue_lock;
- struct crypto_queue queue;
- struct device *dev;
-
- bool rt;
-
- int (*prepare_crypt_hardware)(struct crypto_engine *engine);
- int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
- int (*do_batch_requests)(struct crypto_engine *engine);
-
-
- struct kthread_worker *kworker;
- struct kthread_work pump_requests;
-
- void *priv_data;
- struct crypto_async_request *cur_req;
-};
-
/*
* struct crypto_engine_op - crypto hardware engine operations
- * @prepare_request: do some preparation if needed before handling the current request
- * @unprepare_request: undo any work done by prepare_request()
* @do_one_request: do encryption for current request
*/
struct crypto_engine_op {
- int (*prepare_request)(struct crypto_engine *engine,
- void *areq);
- int (*unprepare_request)(struct crypto_engine *engine,
- void *areq);
int (*do_one_request)(struct crypto_engine *engine,
void *areq);
};
-struct crypto_engine_ctx {
+struct aead_engine_alg {
+ struct aead_alg base;
+ struct crypto_engine_op op;
+};
+
+struct ahash_engine_alg {
+ struct ahash_alg base;
+ struct crypto_engine_op op;
+};
+
+struct akcipher_engine_alg {
+ struct akcipher_alg base;
+ struct crypto_engine_op op;
+};
+
+struct kpp_engine_alg {
+ struct kpp_alg base;
+ struct crypto_engine_op op;
+};
+
+struct skcipher_engine_alg {
+ struct skcipher_alg base;
struct crypto_engine_op op;
};
@@ -124,4 +80,28 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool rt, int qlen);
int crypto_engine_exit(struct crypto_engine *engine);
+int crypto_engine_register_aead(struct aead_engine_alg *alg);
+void crypto_engine_unregister_aead(struct aead_engine_alg *alg);
+int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count);
+void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count);
+
+int crypto_engine_register_ahash(struct ahash_engine_alg *alg);
+void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg);
+int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count);
+void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
+ int count);
+
+int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg);
+void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg);
+
+int crypto_engine_register_kpp(struct kpp_engine_alg *alg);
+void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg);
+
+int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg);
+void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg);
+int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
+ int count);
+void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
+ int count);
+
#endif /* _CRYPTO_ENGINE_H */
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
new file mode 100644
index 000000000000..fbf4be56cf12
--- /dev/null
+++ b/include/crypto/internal/engine.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Crypto engine API
+ *
+ * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org>
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_INTERNAL_ENGINE_H
+#define _CRYPTO_INTERNAL_ENGINE_H
+
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <linux/kthread.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#define ENGINE_NAME_LEN 30
+
+struct device;
+
+/*
+ * struct crypto_engine - crypto hardware engine
+ * @name: the engine name
+ * @idling: the engine is entering idle state
+ * @busy: request pump is busy
+ * @running: the engine is on working
+ * @retry_support: indication that the hardware allows re-execution
+ * of a failed backlog request
+ * crypto-engine, in head position to keep order
+ * @list: link with the global crypto engine list
+ * @queue_lock: spinlock to synchronise access to request queue
+ * @queue: the crypto queue of the engine
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_crypt_hardware: a request will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the hardware
+ * by issuing this call
+ * @unprepare_crypt_hardware: there are currently no more requests on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ * @do_batch_requests: execute a batch of requests. Depends on multiple
+ * requests support.
+ * @kworker: kthread worker struct for request pump
+ * @pump_requests: work struct for scheduling work to the request pump
+ * @priv_data: the engine private data
+ * @cur_req: the current request which is on processing
+ */
+struct crypto_engine {
+ char name[ENGINE_NAME_LEN];
+ bool idling;
+ bool busy;
+ bool running;
+
+ bool retry_support;
+
+ struct list_head list;
+ spinlock_t queue_lock;
+ struct crypto_queue queue;
+ struct device *dev;
+
+ bool rt;
+
+ int (*prepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+ int (*do_batch_requests)(struct crypto_engine *engine);
+
+
+ struct kthread_worker *kworker;
+ struct kthread_work pump_requests;
+
+ void *priv_data;
+ struct crypto_async_request *cur_req;
+};
+
+#endif
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 8fadd561c50e..462f8a34cdf8 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -78,6 +78,10 @@ extern int restrict_link_by_ca(struct key *dest_keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *trust_keyring);
+int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring);
#else
static inline int restrict_link_by_ca(struct key *dest_keyring,
const struct key_type *type,
@@ -86,6 +90,14 @@ static inline int restrict_link_by_ca(struct key *dest_keyring,
{
return 0;
}
+
+static inline int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring)
+{
+ return 0;
+}
#endif
extern int query_asymmetric_key(const struct kernel_pkey_params *,
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 02f2ac4dd2df..e69cece404b3 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -1537,7 +1537,7 @@ enum drm_dp_phy {
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
-#define DP_DSC_RECEIVER_CAP_SIZE 0xf
+#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
#define DP_LTTPR_COMMON_CAP_SIZE 8
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index 4977e0ab72db..fad3c4003b2b 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -25,6 +25,7 @@ void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector);
void drm_kms_helper_poll_disable(struct drm_device *dev);
void drm_kms_helper_poll_enable(struct drm_device *dev);
+void drm_kms_helper_poll_reschedule(struct drm_device *dev);
bool drm_kms_helper_is_poll_worker(void);
enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc,
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 91e080efb918..8365adf842ef 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -23,10 +23,15 @@ extern int restrict_link_by_builtin_trusted(struct key *keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);
+int restrict_link_by_digsig_builtin(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key);
extern __init int load_module_cert(struct key *keyring);
#else
#define restrict_link_by_builtin_trusted restrict_link_reject
+#define restrict_link_by_digsig_builtin restrict_link_reject
static inline __init int load_module_cert(struct key *keyring)
{
@@ -41,8 +46,17 @@ extern int restrict_link_by_builtin_and_secondary_trusted(
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);
+int restrict_link_by_digsig_builtin_and_secondary(struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key);
+void __init add_to_secondary_keyring(const char *source, const void *data, size_t len);
#else
#define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
+#define restrict_link_by_digsig_builtin_and_secondary restrict_link_by_digsig_builtin
+static inline void __init add_to_secondary_keyring(const char *source, const void *data, size_t len)
+{
+}
#endif
#ifdef CONFIG_INTEGRITY_MACHINE_KEYRING
diff --git a/include/kunit/attributes.h b/include/kunit/attributes.h
new file mode 100644
index 000000000000..bc76a0b786d2
--- /dev/null
+++ b/include/kunit/attributes.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit API to save and access test attributes
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+
+#ifndef _KUNIT_ATTRIBUTES_H
+#define _KUNIT_ATTRIBUTES_H
+
+/*
+ * struct kunit_attr_filter - representation of attributes filter with the
+ * attribute object and string input
+ */
+struct kunit_attr_filter {
+ struct kunit_attr *attr;
+ char *input;
+};
+
+/*
+ * Returns the name of the filter's attribute.
+ */
+const char *kunit_attr_filter_name(struct kunit_attr_filter filter);
+
+/*
+ * Print all test attributes for a test case or suite.
+ * Output format for test cases: "# <test_name>.<attribute>: <value>"
+ * Output format for test suites: "# <attribute>: <value>"
+ */
+void kunit_print_attr(void *test_or_suite, bool is_test, unsigned int test_level);
+
+/*
+ * Returns the number of fitlers in input.
+ */
+int kunit_get_filter_count(char *input);
+
+/*
+ * Parse attributes filter input and return an objects containing the
+ * attribute object and the string input of the next filter.
+ */
+struct kunit_attr_filter kunit_next_attr_filter(char **filters, int *err);
+
+/*
+ * Returns a copy of the suite containing only tests that pass the filter.
+ */
+struct kunit_suite *kunit_filter_attr_tests(const struct kunit_suite *const suite,
+ struct kunit_attr_filter filter, char *action, int *err);
+
+#endif /* _KUNIT_ATTRIBUTES_H */
diff --git a/include/kunit/static_stub.h b/include/kunit/static_stub.h
index 9b80150a5d62..85315c80b303 100644
--- a/include/kunit/static_stub.h
+++ b/include/kunit/static_stub.h
@@ -11,7 +11,7 @@
#if !IS_ENABLED(CONFIG_KUNIT)
/* If CONFIG_KUNIT is not enabled, these stubs quietly disappear. */
-#define KUNIT_TRIGGER_STATIC_STUB(real_fn_name, args...) do {} while (0)
+#define KUNIT_STATIC_STUB_REDIRECT(real_fn_name, args...) do {} while (0)
#else
@@ -30,7 +30,7 @@
* This is a function prologue which is used to allow calls to the current
* function to be redirected by a KUnit test. KUnit tests can call
* kunit_activate_static_stub() to pass a replacement function in. The
- * replacement function will be called by KUNIT_TRIGGER_STATIC_STUB(), which
+ * replacement function will be called by KUNIT_STATIC_STUB_REDIRECT(), which
* will then return from the function. If the caller is not in a KUnit context,
* the function will continue execution as normal.
*
@@ -87,7 +87,7 @@ void __kunit_activate_static_stub(struct kunit *test,
* When activated, calls to real_fn_addr from within this test (even if called
* indirectly) will instead call replacement_addr. The function pointed to by
* real_fn_addr must begin with the static stub prologue in
- * KUNIT_TRIGGER_STATIC_STUB() for this to work. real_fn_addr and
+ * KUNIT_STATIC_STUB_REDIRECT() for this to work. real_fn_addr and
* replacement_addr must have the same type.
*
* The redirection can be disabled again with kunit_deactivate_static_stub().
diff --git a/include/kunit/test-bug.h b/include/kunit/test-bug.h
index 30ca541b6ff2..47aa8f21ccce 100644
--- a/include/kunit/test-bug.h
+++ b/include/kunit/test-bug.h
@@ -9,6 +9,8 @@
#ifndef _KUNIT_TEST_BUG_H
#define _KUNIT_TEST_BUG_H
+#include <linux/stddef.h> /* for NULL */
+
#if IS_ENABLED(CONFIG_KUNIT)
#include <linux/jump_label.h> /* For static branch */
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 23120d50499e..d33114097d0d 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -63,12 +63,35 @@ enum kunit_status {
KUNIT_SKIPPED,
};
+/* Attribute struct/enum definitions */
+
+/*
+ * Speed Attribute is stored as an enum and separated into categories of
+ * speed: very_slowm, slow, and normal. These speeds are relative to
+ * other KUnit tests.
+ *
+ * Note: unset speed attribute acts as default of KUNIT_SPEED_NORMAL.
+ */
+enum kunit_speed {
+ KUNIT_SPEED_UNSET,
+ KUNIT_SPEED_VERY_SLOW,
+ KUNIT_SPEED_SLOW,
+ KUNIT_SPEED_NORMAL,
+ KUNIT_SPEED_MAX = KUNIT_SPEED_NORMAL,
+};
+
+/* Holds attributes for each test case and suite */
+struct kunit_attributes {
+ enum kunit_speed speed;
+};
+
/**
* struct kunit_case - represents an individual test case.
*
* @run_case: the function representing the actual test case.
* @name: the name of the test case.
* @generate_params: the generator function for parameterized tests.
+ * @attr: the attributes associated with the test
*
* A test case is a function with the signature,
* ``void (*)(struct kunit *)``
@@ -104,9 +127,11 @@ struct kunit_case {
void (*run_case)(struct kunit *test);
const char *name;
const void* (*generate_params)(const void *prev, char *desc);
+ struct kunit_attributes attr;
/* private: internal use only. */
enum kunit_status status;
+ char *module_name;
char *log;
};
@@ -131,7 +156,32 @@ static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
* &struct kunit_case object from it. See the documentation for
* &struct kunit_case for an example on how to use it.
*/
-#define KUNIT_CASE(test_name) { .run_case = test_name, .name = #test_name }
+#define KUNIT_CASE(test_name) \
+ { .run_case = test_name, .name = #test_name, \
+ .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_ATTR - A helper for creating a &struct kunit_case
+ * with attributes
+ *
+ * @test_name: a reference to a test case function.
+ * @attributes: a reference to a struct kunit_attributes object containing
+ * test attributes
+ */
+#define KUNIT_CASE_ATTR(test_name, attributes) \
+ { .run_case = test_name, .name = #test_name, \
+ .attr = attributes, .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_SLOW - A helper for creating a &struct kunit_case
+ * with the slow attribute
+ *
+ * @test_name: a reference to a test case function.
+ */
+
+#define KUNIT_CASE_SLOW(test_name) \
+ { .run_case = test_name, .name = #test_name, \
+ .attr.speed = KUNIT_SPEED_SLOW, .module_name = KBUILD_MODNAME}
/**
* KUNIT_CASE_PARAM - A helper for creation a parameterized &struct kunit_case
@@ -152,7 +202,21 @@ static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
*/
#define KUNIT_CASE_PARAM(test_name, gen_params) \
{ .run_case = test_name, .name = #test_name, \
- .generate_params = gen_params }
+ .generate_params = gen_params, .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_PARAM_ATTR - A helper for creating a parameterized &struct
+ * kunit_case with attributes
+ *
+ * @test_name: a reference to a test case function.
+ * @gen_params: a reference to a parameter generator function.
+ * @attributes: a reference to a struct kunit_attributes object containing
+ * test attributes
+ */
+#define KUNIT_CASE_PARAM_ATTR(test_name, gen_params, attributes) \
+ { .run_case = test_name, .name = #test_name, \
+ .generate_params = gen_params, \
+ .attr = attributes, .module_name = KBUILD_MODNAME}
/**
* struct kunit_suite - describes a related collection of &struct kunit_case
@@ -163,6 +227,7 @@ static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
* @init: called before every test case.
* @exit: called after every test case.
* @test_cases: a null terminated array of test cases.
+ * @attr: the attributes associated with the test suite
*
* A kunit_suite is a collection of related &struct kunit_case s, such that
* @init is called before every test case and @exit is called after every
@@ -182,6 +247,7 @@ struct kunit_suite {
int (*init)(struct kunit *test);
void (*exit)(struct kunit *test);
struct kunit_case *test_cases;
+ struct kunit_attributes attr;
/* private: internal use only */
char status_comment[KUNIT_STATUS_COMMENT_SIZE];
@@ -190,6 +256,12 @@ struct kunit_suite {
int suite_init_err;
};
+/* Stores an array of suites, end points one past the end */
+struct kunit_suite_set {
+ struct kunit_suite * const *start;
+ struct kunit_suite * const *end;
+};
+
/**
* struct kunit - represents a running instance of a test.
*
@@ -237,6 +309,10 @@ static inline void kunit_set_failure(struct kunit *test)
}
bool kunit_enabled(void);
+const char *kunit_action(void);
+const char *kunit_filter_glob(void);
+char *kunit_filter(void);
+char *kunit_filter_action(void);
void kunit_init_test(struct kunit *test, const char *name, char *log);
@@ -247,10 +323,21 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite);
unsigned int kunit_test_case_num(struct kunit_suite *suite,
struct kunit_case *test_case);
+struct kunit_suite_set
+kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ const char *filter_glob,
+ char *filters,
+ char *filter_action,
+ int *err);
+void kunit_free_suite_set(struct kunit_suite_set suite_set);
+
int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites);
void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites);
+void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin);
+void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr);
+
#if IS_BUILTIN(CONFIG_KUNIT)
int kunit_run_all_tests(void);
#else
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 641dc4843987..a73246c3c35e 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -477,8 +477,6 @@ static inline int acpi_get_node(acpi_handle handle)
return 0;
}
#endif
-extern int acpi_paddr_to_node(u64 start_addr, u64 size);
-
extern int pnpacpi_disabled;
#define PXM_INVAL (-1)
@@ -1100,7 +1098,7 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-#ifdef CONFIG_X86
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
struct acpi_s2idle_dev_ops {
struct list_head list_node;
void (*prepare)(void);
@@ -1109,7 +1107,13 @@ struct acpi_s2idle_dev_ops {
};
int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
-#endif /* CONFIG_X86 */
+int acpi_get_lps0_constraint(struct acpi_device *adev);
+#else /* CONFIG_SUSPEND && CONFIG_X86 */
+static inline int acpi_get_lps0_constraint(struct device *dev)
+{
+ return ACPI_STATE_UNKNOWN;
+}
+#endif /* CONFIG_SUSPEND && CONFIG_X86 */
#ifndef CONFIG_IA64
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index ee7cb6aaff71..1cb65592c95d 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -21,6 +21,7 @@
*/
#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */
#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */
+#define IORT_SMMU_V3_PMCG_HISI_HIP09 0x00000002 /* HiSilicon HIP09 PMCG */
int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node);
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
index 14dc461b0e82..255701e1251b 100644
--- a/include/linux/arm_sdei.h
+++ b/include/linux/arm_sdei.h
@@ -47,10 +47,12 @@ int sdei_unregister_ghes(struct ghes *ghes);
int sdei_mask_local_cpu(void);
int sdei_unmask_local_cpu(void);
void __init sdei_init(void);
+void sdei_handler_abort(void);
#else
static inline int sdei_mask_local_cpu(void) { return 0; }
static inline int sdei_unmask_local_cpu(void) { return 0; }
static inline void sdei_init(void) { }
+static inline void sdei_handler_abort(void) { }
#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
deleted file mode 100644
index 1491af38cc6e..000000000000
--- a/include/linux/atmel-mci.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_ATMEL_MCI_H
-#define __LINUX_ATMEL_MCI_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-
-#define ATMCI_MAX_NR_SLOTS 2
-
-/**
- * struct mci_slot_pdata - board-specific per-slot configuration
- * @bus_width: Number of data lines wired up the slot
- * @detect_pin: GPIO pin wired to the card detect switch
- * @wp_pin: GPIO pin wired to the write protect sensor
- * @detect_is_active_high: The state of the detect pin when it is active
- * @non_removable: The slot is not removable, only detect once
- *
- * If a given slot is not present on the board, @bus_width should be
- * set to 0. The other fields are ignored in this case.
- *
- * Any pins that aren't available should be set to a negative value.
- *
- * Note that support for multiple slots is experimental -- some cards
- * might get upset if we don't get the clock management exactly right.
- * But in most cases, it should work just fine.
- */
-struct mci_slot_pdata {
- unsigned int bus_width;
- int detect_pin;
- int wp_pin;
- bool detect_is_active_high;
- bool non_removable;
-};
-
-/**
- * struct mci_platform_data - board-specific MMC/SDcard configuration
- * @dma_slave: DMA slave interface to use in data transfers.
- * @slot: Per-slot configuration data.
- */
-struct mci_platform_data {
- void *dma_slave;
- dma_filter_fn dma_filter;
- struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS];
-};
-
-#endif /* __LINUX_ATMEL_MCI_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 0bad62cca3d0..d5c5e59ddbd2 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -52,7 +52,6 @@ struct block_device {
atomic_t bd_openers;
spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
struct inode * bd_inode; /* will die */
- struct super_block * bd_super;
void * bd_claiming;
void * bd_holder;
const struct blk_holder_ops *bd_holder_ops;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 87d94be7825a..83ce87354e9a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -750,7 +750,8 @@ static inline int bdev_read_only(struct block_device *bdev)
}
bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
-bool disk_force_media_change(struct gendisk *disk, unsigned int events);
+void disk_force_media_change(struct gendisk *disk);
+void bdev_mark_dead(struct block_device *bdev, bool surprise);
void add_disk_randomness(struct gendisk *disk) __latent_entropy;
void rand_initialize_disk(struct gendisk *disk);
@@ -809,7 +810,6 @@ int __register_blkdev(unsigned int major, const char *name,
void unregister_blkdev(unsigned int major, const char *name);
bool disk_check_media_change(struct gendisk *disk);
-int __invalidate_device(struct block_device *bdev, bool kill_dirty);
void set_capacity(struct gendisk *disk, sector_t size);
#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
@@ -1460,9 +1460,16 @@ void blkdev_show(struct seq_file *seqf, off_t offset);
#endif
struct blk_holder_ops {
- void (*mark_dead)(struct block_device *bdev);
+ void (*mark_dead)(struct block_device *bdev, bool surprise);
+
+ /*
+ * Sync the file system mounted on the block device.
+ */
+ void (*sync)(struct block_device *bdev);
};
+extern const struct blk_holder_ops fs_holder_ops;
+
/*
* Return the correct open flags for blkdev_get_by_* for super block flags
* as stored in sb->s_flags.
@@ -1521,8 +1528,6 @@ static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
}
#endif /* CONFIG_BLOCK */
-int fsync_bdev(struct block_device *bdev);
-
int freeze_bdev(struct block_device *bdev);
int thaw_bdev(struct block_device *bdev);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 8a0d5466c7be..ae20dbb885d6 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -661,6 +661,8 @@ struct cgroup_subsys {
void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
int (*css_extra_stat_show)(struct seq_file *seq,
struct cgroup_subsys_state *css);
+ int (*css_local_stat_show)(struct seq_file *seq,
+ struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 1ef013324237..06f1b292f8a0 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -183,6 +183,39 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
*/
bool clk_is_match(const struct clk *p, const struct clk *q);
+/**
+ * clk_rate_exclusive_get - get exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to get exclusive control over the rate of a
+ * provider. It prevents any other consumer to execute, even indirectly,
+ * opereation which could alter the rate of the provider or cause glitches
+ *
+ * If exlusivity is claimed more than once on clock, even by the same driver,
+ * the rate effectively gets locked as exclusivity can't be preempted.
+ *
+ * Must not be called from within atomic context.
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_rate_exclusive_get(struct clk *clk);
+
+/**
+ * clk_rate_exclusive_put - release exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to release the exclusivity it previously got
+ * from clk_rate_exclusive_get()
+ *
+ * The caller must balance the number of clk_rate_exclusive_get() and
+ * clk_rate_exclusive_put() calls.
+ *
+ * Must not be called from within atomic context.
+ */
+void clk_rate_exclusive_put(struct clk *clk);
+
#else
static inline int clk_notifier_register(struct clk *clk,
@@ -236,6 +269,13 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
return p == q;
}
+static inline int clk_rate_exclusive_get(struct clk *clk)
+{
+ return 0;
+}
+
+static inline void clk_rate_exclusive_put(struct clk *clk) {}
+
#endif
#ifdef CONFIG_HAVE_CLK_PREPARE
@@ -583,38 +623,6 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
*/
struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id);
-/**
- * clk_rate_exclusive_get - get exclusivity over the rate control of a
- * producer
- * @clk: clock source
- *
- * This function allows drivers to get exclusive control over the rate of a
- * provider. It prevents any other consumer to execute, even indirectly,
- * opereation which could alter the rate of the provider or cause glitches
- *
- * If exlusivity is claimed more than once on clock, even by the same driver,
- * the rate effectively gets locked as exclusivity can't be preempted.
- *
- * Must not be called from within atomic context.
- *
- * Returns success (0) or negative errno.
- */
-int clk_rate_exclusive_get(struct clk *clk);
-
-/**
- * clk_rate_exclusive_put - release exclusivity over the rate control of a
- * producer
- * @clk: clock source
- *
- * This function allows drivers to release the exclusivity it previously got
- * from clk_rate_exclusive_get()
- *
- * The caller must balance the number of clk_rate_exclusive_get() and
- * clk_rate_exclusive_put() calls.
- *
- * Must not be called from within atomic context.
- */
-void clk_rate_exclusive_put(struct clk *clk);
/**
* clk_enable - inform the system when the clock source should be running.
@@ -974,14 +982,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
-
-static inline int clk_rate_exclusive_get(struct clk *clk)
-{
- return 0;
-}
-
-static inline void clk_rate_exclusive_put(struct clk *clk) {}
-
static inline int clk_enable(struct clk *clk)
{
return 0;
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 00efa35c350f..28566624f008 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -95,6 +95,19 @@
#endif
/*
+ * Optional: only supported since gcc >= 14
+ * Optional: only supported since clang >= 18
+ *
+ * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
+ * clang: https://reviews.llvm.org/D148381
+ */
+#if __has_attribute(__counted_by__)
+# define __counted_by(member) __attribute__((__counted_by__(member)))
+#else
+# define __counted_by(member)
+#endif
+
+/*
* Optional: not supported by gcc
* Optional: only supported since clang >= 14.0
*
@@ -130,19 +143,6 @@
#endif
/*
- * Optional: only supported since gcc >= 14
- * Optional: only supported since clang >= 17
- *
- * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
- * clang: https://reviews.llvm.org/D148381
- */
-#if __has_attribute(__element_count__)
-# define __counted_by(member) __attribute__((__element_count__(#member)))
-#else
-# define __counted_by(member)
-#endif
-
-/*
* Optional: only supported since clang >= 14.0
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-error-function-attribute
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 547ea1ff806e..c523c6683789 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -106,6 +106,34 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
#define __cold
#endif
+/*
+ * On x86-64 and arm64 targets, __preserve_most changes the calling convention
+ * of a function to make the code in the caller as unintrusive as possible. This
+ * convention behaves identically to the C calling convention on how arguments
+ * and return values are passed, but uses a different set of caller- and callee-
+ * saved registers.
+ *
+ * The purpose is to alleviates the burden of saving and recovering a large
+ * register set before and after the call in the caller. This is beneficial for
+ * rarely taken slow paths, such as error-reporting functions that may be called
+ * from hot paths.
+ *
+ * Note: This may conflict with instrumentation inserted on function entry which
+ * does not use __preserve_most or equivalent convention (if in assembly). Since
+ * function tracing assumes the normal C calling convention, where the attribute
+ * is supported, __preserve_most implies notrace. It is recommended to restrict
+ * use of the attribute to functions that should or already disable tracing.
+ *
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#preserve-most
+ */
+#if __has_attribute(__preserve_most__) && (defined(CONFIG_X86_64) || defined(CONFIG_ARM64))
+# define __preserve_most notrace __attribute__((__preserve_most__))
+#else
+# define __preserve_most
+#endif
+
/* Builtins */
/*
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 62b32b19e0a8..fb2915676574 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -116,6 +116,7 @@ extern bool try_wait_for_completion(struct completion *x);
extern bool completion_done(struct completion *x);
extern void complete(struct completion *);
+extern void complete_on_current_cpu(struct completion *x);
extern void complete_all(struct completion *);
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index e006c719182b..0abd60a7987b 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -18,6 +18,7 @@
#include <linux/compiler.h>
#include <linux/cpumask.h>
#include <linux/cpuhotplug.h>
+#include <linux/cpu_smt.h>
struct device;
struct device_node;
@@ -194,7 +195,6 @@ void arch_cpu_finalize_init(void);
static inline void arch_cpu_finalize_init(void) { }
#endif
-void cpu_set_state_online(int cpu);
void play_idle_precise(u64 duration_ns, u64 latency_ns);
static inline void play_idle(unsigned long duration_us)
@@ -208,30 +208,6 @@ void cpuhp_report_idle_dead(void);
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-enum cpuhp_smt_control {
- CPU_SMT_ENABLED,
- CPU_SMT_DISABLED,
- CPU_SMT_FORCE_DISABLED,
- CPU_SMT_NOT_SUPPORTED,
- CPU_SMT_NOT_IMPLEMENTED,
-};
-
-#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
-extern enum cpuhp_smt_control cpu_smt_control;
-extern void cpu_smt_disable(bool force);
-extern void cpu_smt_check_topology(void);
-extern bool cpu_smt_possible(void);
-extern int cpuhp_smt_enable(void);
-extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
-#else
-# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
-static inline void cpu_smt_disable(bool force) { }
-static inline void cpu_smt_check_topology(void) { }
-static inline bool cpu_smt_possible(void) { return false; }
-static inline int cpuhp_smt_enable(void) { return 0; }
-static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
-#endif
-
extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
diff --git a/include/linux/cpu_smt.h b/include/linux/cpu_smt.h
new file mode 100644
index 000000000000..0c1664294b57
--- /dev/null
+++ b/include/linux/cpu_smt.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CPU_SMT_H_
+#define _LINUX_CPU_SMT_H_
+
+enum cpuhp_smt_control {
+ CPU_SMT_ENABLED,
+ CPU_SMT_DISABLED,
+ CPU_SMT_FORCE_DISABLED,
+ CPU_SMT_NOT_SUPPORTED,
+ CPU_SMT_NOT_IMPLEMENTED,
+};
+
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+extern enum cpuhp_smt_control cpu_smt_control;
+extern unsigned int cpu_smt_num_threads;
+extern void cpu_smt_disable(bool force);
+extern void cpu_smt_set_num_threads(unsigned int num_threads,
+ unsigned int max_threads);
+extern bool cpu_smt_possible(void);
+extern int cpuhp_smt_enable(void);
+extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
+#else
+# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
+# define cpu_smt_num_threads 1
+static inline void cpu_smt_disable(bool force) { }
+static inline void cpu_smt_set_num_threads(unsigned int num_threads,
+ unsigned int max_threads) { }
+static inline bool cpu_smt_possible(void) { return false; }
+static inline int cpuhp_smt_enable(void) { return 0; }
+static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
+#endif
+
+#endif /* _LINUX_CPU_SMT_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 172ff51c1b2a..43b363a99215 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -19,6 +19,7 @@
#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
+#include <linux/minmax.h>
/*********************************************************************
* CPUFREQ INTERFACE *
@@ -370,7 +371,7 @@ struct cpufreq_driver {
int (*target_intermediate)(struct cpufreq_policy *policy,
unsigned int index);
- /* should be defined, if possible */
+ /* should be defined, if possible, return 0 on error */
unsigned int (*get)(unsigned int cpu);
/* Called to update policy limits on firmware notifications. */
@@ -467,17 +468,8 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *poli
unsigned int min,
unsigned int max)
{
- if (policy->min < min)
- policy->min = min;
- if (policy->max < min)
- policy->max = min;
- if (policy->min > max)
- policy->min = max;
- if (policy->max > max)
- policy->max = max;
- if (policy->min > policy->max)
- policy->min = policy->max;
- return;
+ policy->max = clamp(policy->max, min, max);
+ policy->min = clamp(policy->min, min, policy->max);
}
static inline void
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 25b6e6e6ba6b..06dda85f0424 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -48,7 +48,7 @@
* same section.
*
* If neither #1 nor #2 apply, please use the dynamic state space when
- * setting up a state by using CPUHP_PREPARE_DYN or CPUHP_PREPARE_ONLINE
+ * setting up a state by using CPUHP_BP_PREPARE_DYN or CPUHP_AP_ONLINE_DYN
* for the @state argument of the setup function.
*
* See Documentation/core-api/cpu_hotplug.rst for further information and
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 9192986b1a73..ac862422df15 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -48,7 +48,7 @@ MALLOC_VISIBLE void *malloc(int size)
if (!malloc_ptr)
malloc_ptr = free_mem_ptr;
- malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
+ malloc_ptr = (malloc_ptr + 7) & ~7; /* Align */
p = (void *)malloc_ptr;
malloc_ptr += size;
diff --git a/include/linux/dm-verity-loadpin.h b/include/linux/dm-verity-loadpin.h
index 552b817ab102..3ac6dbaeaa37 100644
--- a/include/linux/dm-verity-loadpin.h
+++ b/include/linux/dm-verity-loadpin.h
@@ -12,7 +12,7 @@ extern struct list_head dm_verity_loadpin_trusted_root_digests;
struct dm_verity_loadpin_trusted_root_digest {
struct list_head node;
unsigned int len;
- u8 data[];
+ u8 data[] __counted_by(len);
};
#if IS_ENABLED(CONFIG_SECURITY_LOADPIN_VERITY)
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h
index b1d26f9f1c9f..9f183a679277 100644
--- a/include/linux/dnotify.h
+++ b/include/linux/dnotify.h
@@ -30,7 +30,7 @@ struct dnotify_struct {
FS_MOVED_FROM | FS_MOVED_TO)
extern void dnotify_flush(struct file *, fl_owner_t);
-extern int fcntl_dirnotify(int, struct file *, unsigned long);
+extern int fcntl_dirnotify(int, struct file *, unsigned int);
#else
@@ -38,7 +38,7 @@ static inline void dnotify_flush(struct file *filp, fl_owner_t id)
{
}
-static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
+static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
{
return -EINVAL;
}
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ab088c662e88..5a1e39df8b26 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -726,7 +726,6 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
return EFI_SUCCESS;
}
#endif
-extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
extern int __init __efi_memmap_init(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
@@ -1130,7 +1129,7 @@ extern bool efi_runtime_disabled(void);
static inline bool efi_runtime_disabled(void) { return true; }
#endif
-extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
+extern void efi_call_virt_check_flags(unsigned long flags, const void *caller);
extern unsigned long efi_call_virt_save_flags(void);
enum efi_secureboot_mode {
@@ -1171,8 +1170,7 @@ static inline void efi_check_for_embedded_firmwares(void) { }
#define arch_efi_call_virt(p, f, args...) ((p)->f(args))
/*
- * Arch code can implement the following three template macros, avoiding
- * reptition for the void/non-void return cases of {__,}efi_call_virt():
+ * Arch code must implement the following three routines:
*
* * arch_efi_call_virt_setup()
*
@@ -1181,9 +1179,8 @@ static inline void efi_check_for_embedded_firmwares(void) { }
*
* * arch_efi_call_virt()
*
- * Performs the call. The last expression in the macro must be the call
- * itself, allowing the logic to be shared by the void and non-void
- * cases.
+ * Performs the call. This routine takes a variable number of arguments so
+ * it must be implemented as a variadic preprocessor macro.
*
* * arch_efi_call_virt_teardown()
*
@@ -1192,33 +1189,20 @@ static inline void efi_check_for_embedded_firmwares(void) { }
#define efi_call_virt_pointer(p, f, args...) \
({ \
- efi_status_t __s; \
+ typeof((p)->f(args)) __s; \
unsigned long __flags; \
\
arch_efi_call_virt_setup(); \
\
__flags = efi_call_virt_save_flags(); \
__s = arch_efi_call_virt(p, f, args); \
- efi_call_virt_check_flags(__flags, __stringify(f)); \
+ efi_call_virt_check_flags(__flags, NULL); \
\
arch_efi_call_virt_teardown(); \
\
__s; \
})
-#define __efi_call_virt_pointer(p, f, args...) \
-({ \
- unsigned long __flags; \
- \
- arch_efi_call_virt_setup(); \
- \
- __flags = efi_call_virt_save_flags(); \
- arch_efi_call_virt(p, f, args); \
- efi_call_virt_check_flags(__flags, __stringify(f)); \
- \
- arch_efi_call_virt_teardown(); \
-})
-
#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE
struct linux_efi_random_seed {
@@ -1244,6 +1228,10 @@ extern int efi_tpm_final_log_size;
extern unsigned long rci2_table_phys;
+efi_status_t
+efi_call_acpi_prm_handler(efi_status_t (__efiapi *handler_addr)(u64, void *),
+ u64 param_buffer_addr, void *context);
+
/*
* efi_runtime_service() function identifiers.
* "NONE" is used by efi_recover_from_page_fault() to check if the page
@@ -1263,25 +1251,26 @@ enum efi_rts_ids {
EFI_RESET_SYSTEM,
EFI_UPDATE_CAPSULE,
EFI_QUERY_CAPSULE_CAPS,
+ EFI_ACPI_PRM_HANDLER,
};
+union efi_rts_args;
+
/*
* efi_runtime_work: Details of EFI Runtime Service work
- * @arg<1-5>: EFI Runtime Service function arguments
+ * @args: Pointer to union describing the arguments
* @status: Status of executing EFI Runtime Service
* @efi_rts_id: EFI Runtime Service function identifier
* @efi_rts_comp: Struct used for handling completions
+ * @caller: The caller of the runtime service
*/
struct efi_runtime_work {
- void *arg1;
- void *arg2;
- void *arg3;
- void *arg4;
- void *arg5;
- efi_status_t status;
- struct work_struct work;
- enum efi_rts_ids efi_rts_id;
- struct completion efi_rts_comp;
+ union efi_rts_args *args;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+ const void *caller;
};
extern struct efi_runtime_work efi_rts_work;
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
index efcdd1631d9b..95e868e09e29 100644
--- a/include/linux/filelock.h
+++ b/include/linux/filelock.h
@@ -144,7 +144,7 @@ int fcntl_setlk64(unsigned int, struct file *, unsigned int,
struct flock64 *);
#endif
-int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
+int fcntl_setlease(unsigned int fd, struct file *filp, int arg);
int fcntl_getlease(struct file *filp);
/* fs/locks.c */
@@ -167,8 +167,8 @@ bool vfs_inode_has_locks(struct inode *inode);
int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
void lease_get_mtime(struct inode *, struct timespec64 *time);
-int generic_setlease(struct file *, long, struct file_lock **, void **priv);
-int vfs_setlease(struct file *, long, struct file_lock **, void **);
+int generic_setlease(struct file *, int, struct file_lock **, void **priv);
+int vfs_setlease(struct file *, int, struct file_lock **, void **);
int lease_modify(struct file_lock *, int, struct list_head *);
struct notifier_block;
@@ -213,7 +213,7 @@ static inline int fcntl_setlk64(unsigned int fd, struct file *file,
return -EACCES;
}
#endif
-static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
+static inline int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
{
return -EINVAL;
}
@@ -306,13 +306,13 @@ static inline void lease_get_mtime(struct inode *inode,
return;
}
-static inline int generic_setlease(struct file *filp, long arg,
+static inline int generic_setlease(struct file *filp, int arg,
struct file_lock **flp, void **priv)
{
return -EINVAL;
}
-static inline int vfs_setlease(struct file *filp, long arg,
+static inline int vfs_setlease(struct file *filp, int arg,
struct file_lock **lease, void **priv)
{
return -EINVAL;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 562f2623c9c9..dda08d973639 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -338,6 +338,20 @@ enum rw_hint {
#define IOCB_NOIO (1 << 20)
/* can use bio alloc cache */
#define IOCB_ALLOC_CACHE (1 << 21)
+/*
+ * IOCB_DIO_CALLER_COMP can be set by the iocb owner, to indicate that the
+ * iocb completion can be passed back to the owner for execution from a safe
+ * context rather than needing to be punted through a workqueue. If this
+ * flag is set, the bio completion handling may set iocb->dio_complete to a
+ * handler function and iocb->private to context information for that handler.
+ * The issuer should call the handler with that context information from task
+ * context to complete the processing of the iocb. Note that while this
+ * provides a task context for the dio_complete() callback, it should only be
+ * used on the completion side for non-IO generating completions. It's fine to
+ * call blocking functions from this callback, but they should not wait for
+ * unrelated IO (like cache flushing, new IO generation, etc).
+ */
+#define IOCB_DIO_CALLER_COMP (1 << 22)
/* for use in trace events */
#define TRACE_IOCB_STRINGS \
@@ -351,7 +365,8 @@ enum rw_hint {
{ IOCB_WRITE, "WRITE" }, \
{ IOCB_WAITQ, "WAITQ" }, \
{ IOCB_NOIO, "NOIO" }, \
- { IOCB_ALLOC_CACHE, "ALLOC_CACHE" }
+ { IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \
+ { IOCB_DIO_CALLER_COMP, "CALLER_COMP" }
struct kiocb {
struct file *ki_filp;
@@ -360,7 +375,23 @@ struct kiocb {
void *private;
int ki_flags;
u16 ki_ioprio; /* See linux/ioprio.h */
- struct wait_page_queue *ki_waitq; /* for async buffered IO */
+ union {
+ /*
+ * Only used for async buffered reads, where it denotes the
+ * page waitqueue associated with completing the read. Valid
+ * IFF IOCB_WAITQ is set.
+ */
+ struct wait_page_queue *ki_waitq;
+ /*
+ * Can be used for O_DIRECT IO, where the completion handling
+ * is punted back to the issuer of the IO. May only be set
+ * if IOCB_DIO_CALLER_COMP is set by the issuer, and the issuer
+ * must then check for presence of this handler when ki_complete
+ * is invoked. The data passed in to this handler must be
+ * assigned to ->private when dio_complete is assigned.
+ */
+ ssize_t (*dio_complete)(void *data);
+ };
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -642,7 +673,7 @@ struct inode {
loff_t i_size;
struct timespec64 i_atime;
struct timespec64 i_mtime;
- struct timespec64 i_ctime;
+ struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
u8 i_blkbits;
@@ -1069,7 +1100,7 @@ extern void fasync_free(struct fasync_struct *);
extern void kill_fasync(struct fasync_struct **, int, int);
extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
-extern int f_setown(struct file *filp, unsigned long arg, int force);
+extern int f_setown(struct file *filp, int who, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct fown_struct *fown);
@@ -1095,6 +1126,8 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */
/* These sb flags are internal to the kernel */
+#define SB_DEAD BIT(21)
+#define SB_DYING BIT(24)
#define SB_SUBMOUNT BIT(26)
#define SB_FORCE BIT(27)
#define SB_NOSEC BIT(28)
@@ -1147,7 +1180,8 @@ enum {
#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
struct sb_writers {
- int frozen; /* Is sb frozen? */
+ unsigned short frozen; /* Is sb frozen? */
+ unsigned short freeze_holders; /* Who froze fs? */
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
@@ -1474,7 +1508,79 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
kgid_has_mapping(fs_userns, kgid);
}
-extern struct timespec64 current_time(struct inode *inode);
+struct timespec64 current_mgtime(struct inode *inode);
+struct timespec64 current_time(struct inode *inode);
+struct timespec64 inode_set_ctime_current(struct inode *inode);
+
+/*
+ * Multigrain timestamps
+ *
+ * Conditionally use fine-grained ctime and mtime timestamps when there
+ * are users actively observing them via getattr. The primary use-case
+ * for this is NFS clients that use the ctime to distinguish between
+ * different states of the file, and that are often fooled by multiple
+ * operations that occur in the same coarse-grained timer tick.
+ *
+ * The kernel always keeps normalized struct timespec64 values in the ctime,
+ * which means that only the first 30 bits of the value are used. Use the
+ * 31st bit of the ctime's tv_nsec field as a flag to indicate that the value
+ * has been queried since it was last updated.
+ */
+#define I_CTIME_QUERIED (1L<<30)
+
+/**
+ * inode_get_ctime - fetch the current ctime from the inode
+ * @inode: inode from which to fetch ctime
+ *
+ * Grab the current ctime tv_nsec field from the inode, mask off the
+ * I_CTIME_QUERIED flag and return it. This is mostly intended for use by
+ * internal consumers of the ctime that aren't concerned with ensuring a
+ * fine-grained update on the next change (e.g. when preparing to store
+ * the value in the backing store for later retrieval).
+ *
+ * This is safe to call regardless of whether the underlying filesystem
+ * is using multigrain timestamps.
+ */
+static inline struct timespec64 inode_get_ctime(const struct inode *inode)
+{
+ struct timespec64 ctime;
+
+ ctime.tv_sec = inode->__i_ctime.tv_sec;
+ ctime.tv_nsec = inode->__i_ctime.tv_nsec & ~I_CTIME_QUERIED;
+
+ return ctime;
+}
+
+/**
+ * inode_set_ctime_to_ts - set the ctime in the inode
+ * @inode: inode in which to set the ctime
+ * @ts: value to set in the ctime field
+ *
+ * Set the ctime in @inode to @ts
+ */
+static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->__i_ctime = ts;
+ return ts;
+}
+
+/**
+ * inode_set_ctime - set the ctime in the inode
+ * @inode: inode in which to set the ctime
+ * @sec: tv_sec value to set
+ * @nsec: tv_nsec value to set
+ *
+ * Set the ctime in @inode to { @sec, @nsec }
+ */
+static inline struct timespec64 inode_set_ctime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+
+ return inode_set_ctime_to_ts(inode, ts);
+}
/*
* Snapshotting support.
@@ -1770,6 +1876,7 @@ struct dir_context {
struct iov_iter;
struct io_uring_cmd;
+struct offset_ctx;
struct file_operations {
struct module *owner;
@@ -1798,7 +1905,7 @@ struct file_operations {
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
void (*splice_eof)(struct file *file);
- int (*setlease)(struct file *, long, struct file_lock **, void **);
+ int (*setlease)(struct file *, int, struct file_lock **, void **);
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
void (*show_fdinfo)(struct seq_file *m, struct file *f);
@@ -1850,7 +1957,7 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
- int (*update_time)(struct inode *, struct timespec64 *, int);
+ int (*update_time)(struct inode *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
umode_t create_mode);
@@ -1863,6 +1970,7 @@ struct inode_operations {
int (*fileattr_set)(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa);
int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa);
+ struct offset_ctx *(*get_offset_ctx)(struct inode *inode);
} ____cacheline_aligned;
static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio,
@@ -1908,6 +2016,10 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
loff_t len, unsigned int remap_flags);
+enum freeze_holder {
+ FREEZE_HOLDER_KERNEL = (1U << 0),
+ FREEZE_HOLDER_USERSPACE = (1U << 1),
+};
struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
@@ -1920,9 +2032,9 @@ struct super_operations {
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
- int (*freeze_super) (struct super_block *);
+ int (*freeze_super) (struct super_block *, enum freeze_holder who);
int (*freeze_fs) (struct super_block *);
- int (*thaw_super) (struct super_block *);
+ int (*thaw_super) (struct super_block *, enum freeze_holder who);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
@@ -2200,7 +2312,7 @@ enum file_time_flags {
extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
-int inode_update_time(struct inode *inode, struct timespec64 *time, int flags);
+int inode_update_time(struct inode *inode, int flags);
static inline void file_accessed(struct file *file)
{
@@ -2222,6 +2334,7 @@ struct file_system_type {
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
+#define FS_MGTIME 64 /* FS uses multigrain timestamps */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
@@ -2245,6 +2358,17 @@ struct file_system_type {
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
+/**
+ * is_mgtime: is this inode using multigrain timestamps
+ * @inode: inode to test for multigrain timestamps
+ *
+ * Return true if the inode uses multigrain timestamps, false otherwise.
+ */
+static inline bool is_mgtime(const struct inode *inode)
+{
+ return inode->i_sb->s_type->fs_flags & FS_MGTIME;
+}
+
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
@@ -2296,8 +2420,8 @@ extern int unregister_filesystem(struct file_system_type *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
-extern int freeze_super(struct super_block *super);
-extern int thaw_super(struct super_block *super);
+int freeze_super(struct super_block *super, enum freeze_holder who);
+int thaw_super(struct super_block *super, enum freeze_holder who);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
@@ -2306,7 +2430,8 @@ extern int current_umask(void);
extern void ihold(struct inode * inode);
extern void iput(struct inode *);
-extern int generic_update_time(struct inode *, struct timespec64 *, int);
+int inode_update_timestamps(struct inode *inode, int flags);
+int generic_update_time(struct inode *, int);
/* /sys/fs */
extern struct kobject *fs_kobj;
@@ -2545,6 +2670,13 @@ static inline bool inode_wrong_type(const struct inode *inode, umode_t mode)
return (inode->i_mode ^ mode) & S_IFMT;
}
+/**
+ * file_start_write - get write access to a superblock for regular file io
+ * @file: the file we want to write to
+ *
+ * This is a variant of sb_start_write() which is a noop on non-regualr file.
+ * Should be matched with a call to file_end_write().
+ */
static inline void file_start_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
@@ -2559,11 +2691,53 @@ static inline bool file_start_write_trylock(struct file *file)
return sb_start_write_trylock(file_inode(file)->i_sb);
}
+/**
+ * file_end_write - drop write access to a superblock of a regular file
+ * @file: the file we wrote to
+ *
+ * Should be matched with a call to file_start_write().
+ */
static inline void file_end_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return;
- __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ sb_end_write(file_inode(file)->i_sb);
+}
+
+/**
+ * kiocb_start_write - get write access to a superblock for async file io
+ * @iocb: the io context we want to submit the write with
+ *
+ * This is a variant of sb_start_write() for async io submission.
+ * Should be matched with a call to kiocb_end_write().
+ */
+static inline void kiocb_start_write(struct kiocb *iocb)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ sb_start_write(inode->i_sb);
+ /*
+ * Fool lockdep by telling it the lock got released so that it
+ * doesn't complain about the held lock when we return to userspace.
+ */
+ __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * kiocb_end_write - drop write access to a superblock after async file io
+ * @iocb: the io context we sumbitted the write with
+ *
+ * Should be matched with a call to kiocb_start_write().
+ */
+static inline void kiocb_end_write(struct kiocb *iocb)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ /*
+ * Tell lockdep we inherited freeze protection from submission thread.
+ */
+ __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
+ sb_end_write(inode->i_sb);
}
/*
@@ -2613,8 +2787,7 @@ static inline bool inode_is_open_for_write(const struct inode *inode)
#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING)
static inline void i_readcount_dec(struct inode *inode)
{
- BUG_ON(!atomic_read(&inode->i_readcount));
- atomic_dec(&inode->i_readcount);
+ BUG_ON(atomic_dec_return(&inode->i_readcount) < 0);
}
static inline void i_readcount_inc(struct inode *inode)
{
@@ -2880,7 +3053,8 @@ extern void page_put_link(void *);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-void generic_fillattr(struct mnt_idmap *, struct inode *, struct kstat *);
+void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode);
+void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
@@ -2919,7 +3093,6 @@ extern int vfs_readlink(struct dentry *, char __user *, int);
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
-extern struct super_block *get_super(struct block_device *);
extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
@@ -2940,6 +3113,8 @@ extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
+void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
extern int simple_rename(struct mnt_idmap *, struct inode *,
@@ -2956,7 +3131,7 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
-extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
+extern int simple_nosetlease(struct file *, int, struct file_lock **, void **);
extern const struct dentry_operations simple_dentry_operations;
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
@@ -2977,6 +3152,22 @@ extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
const void __user *from, size_t count);
+struct offset_ctx {
+ struct xarray xa;
+ u32 next_offset;
+};
+
+void simple_offset_init(struct offset_ctx *octx);
+int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
+void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
+int simple_offset_rename_exchange(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry);
+void simple_offset_destroy(struct offset_ctx *octx);
+
+extern const struct file_operations simple_offset_dir_operations;
+
extern int __generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index ff6341e09925..96332db693d5 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -109,6 +109,7 @@ struct fs_context {
bool need_free:1; /* Need to call ops->free() */
bool global:1; /* Goes into &init_user_ns */
bool oldapi:1; /* Coming from mount(2) */
+ bool exclusive:1; /* create new superblock, reject existing one */
};
struct fs_context_operations {
@@ -150,14 +151,13 @@ extern int get_tree_nodev(struct fs_context *fc,
extern int get_tree_single(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
-extern int get_tree_single_reconf(struct fs_context *fc,
- int (*fill_super)(struct super_block *sb,
- struct fs_context *fc));
extern int get_tree_keyed(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc),
void *key);
+int setup_bdev_super(struct super_block *sb, int sb_flags,
+ struct fs_context *fc);
extern int get_tree_bdev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index 54210a42c30d..010d39d0dc1c 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -24,7 +24,7 @@ static inline void fsstack_copy_attr_times(struct inode *dest,
{
dest->i_atime = src->i_atime;
dest->i_mtime = src->i_mtime;
- dest->i_ctime = src->i_ctime;
+ inode_set_ctime_to_ts(dest, inode_get_ctime(src));
}
#endif /* _LINUX_FS_STACK_H */
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index a7d54d4d41fd..39fbfb4be944 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -104,7 +104,7 @@
enum qm_stop_reason {
QM_NORMAL,
QM_SOFT_RESET,
- QM_FLR,
+ QM_DOWN,
};
enum qm_state {
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 20284387b841..e718dbe928ba 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -25,9 +25,6 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
#endif
vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
-struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmd,
- unsigned int flags);
bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, unsigned long next);
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 1ed52441972f..10a1e81434cb 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -53,6 +53,10 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
case ARPHRD_NONE:
case ARPHRD_RAWIP:
case ARPHRD_PIMREG:
+ /* PPP adds its l2 header automatically in ppp_start_xmit().
+ * This makes it look like an l3 device to __bpf_redirect() and tcf_mirred_init().
+ */
+ case ARPHRD_PPP:
return false;
default:
return true;
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index e2b836c2e119..fdc6e64f49d6 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -261,9 +261,10 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops);
void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
-struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos);
+struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
+bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
diff --git a/include/linux/list.h b/include/linux/list.h
index f10344dbad4d..164b4d0e9d2a 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -38,11 +38,92 @@ static inline void INIT_LIST_HEAD(struct list_head *list)
WRITE_ONCE(list->prev, list);
}
+#ifdef CONFIG_LIST_HARDENED
+
#ifdef CONFIG_DEBUG_LIST
-extern bool __list_add_valid(struct list_head *new,
- struct list_head *prev,
- struct list_head *next);
-extern bool __list_del_entry_valid(struct list_head *entry);
+# define __list_valid_slowpath
+#else
+# define __list_valid_slowpath __cold __preserve_most
+#endif
+
+/*
+ * Performs the full set of list corruption checks before __list_add().
+ * On list corruption reports a warning, and returns false.
+ */
+extern bool __list_valid_slowpath __list_add_valid_or_report(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next);
+
+/*
+ * Performs list corruption checks before __list_add(). Returns false if a
+ * corruption is detected, true otherwise.
+ *
+ * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking
+ * inline to catch non-faulting corruptions, and only if a corruption is
+ * detected calls the reporting function __list_add_valid_or_report().
+ */
+static __always_inline bool __list_add_valid(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ bool ret = true;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_LIST)) {
+ /*
+ * With the hardening version, elide checking if next and prev
+ * are NULL, since the immediate dereference of them below would
+ * result in a fault if NULL.
+ *
+ * With the reduced set of checks, we can afford to inline the
+ * checks, which also gives the compiler a chance to elide some
+ * of them completely if they can be proven at compile-time. If
+ * one of the pre-conditions does not hold, the slow-path will
+ * show a report which pre-condition failed.
+ */
+ if (likely(next->prev == prev && prev->next == next && new != prev && new != next))
+ return true;
+ ret = false;
+ }
+
+ ret &= __list_add_valid_or_report(new, prev, next);
+ return ret;
+}
+
+/*
+ * Performs the full set of list corruption checks before __list_del_entry().
+ * On list corruption reports a warning, and returns false.
+ */
+extern bool __list_valid_slowpath __list_del_entry_valid_or_report(struct list_head *entry);
+
+/*
+ * Performs list corruption checks before __list_del_entry(). Returns false if a
+ * corruption is detected, true otherwise.
+ *
+ * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking
+ * inline to catch non-faulting corruptions, and only if a corruption is
+ * detected calls the reporting function __list_del_entry_valid_or_report().
+ */
+static __always_inline bool __list_del_entry_valid(struct list_head *entry)
+{
+ bool ret = true;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_LIST)) {
+ struct list_head *prev = entry->prev;
+ struct list_head *next = entry->next;
+
+ /*
+ * With the hardening version, elide checking if next and prev
+ * are NULL, LIST_POISON1 or LIST_POISON2, since the immediate
+ * dereference of them below would result in a fault.
+ */
+ if (likely(prev->next == entry && next->prev == entry))
+ return true;
+ ret = false;
+ }
+
+ ret &= __list_del_entry_valid_or_report(entry);
+ return ret;
+}
#else
static inline bool __list_add_valid(struct list_head *new,
struct list_head *prev,
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 4f2621e87634..f5b7352afaac 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -54,6 +54,7 @@ LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *f
LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
+LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference)
LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
struct fs_context *src_sc)
LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
diff --git a/include/linux/mfd/cs42l43-regs.h b/include/linux/mfd/cs42l43-regs.h
new file mode 100644
index 000000000000..c39a49269cb7
--- /dev/null
+++ b/include/linux/mfd/cs42l43-regs.h
@@ -0,0 +1,1184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cs42l43 register definitions
+ *
+ * Copyright (c) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS42L43_CORE_REGS_H
+#define CS42L43_CORE_REGS_H
+
+/* Registers */
+#define CS42L43_GEN_INT_STAT_1 0x000000C0
+#define CS42L43_GEN_INT_MASK_1 0x000000C1
+#define CS42L43_DEVID 0x00003000
+#define CS42L43_REVID 0x00003004
+#define CS42L43_RELID 0x0000300C
+#define CS42L43_SFT_RESET 0x00003020
+#define CS42L43_DRV_CTRL1 0x00006004
+#define CS42L43_DRV_CTRL3 0x0000600C
+#define CS42L43_DRV_CTRL4 0x00006010
+#define CS42L43_DRV_CTRL_5 0x00006014
+#define CS42L43_GPIO_CTRL1 0x00006034
+#define CS42L43_GPIO_CTRL2 0x00006038
+#define CS42L43_GPIO_STS 0x0000603C
+#define CS42L43_GPIO_FN_SEL 0x00006040
+#define CS42L43_MCLK_SRC_SEL 0x00007004
+#define CS42L43_CCM_BLK_CLK_CONTROL 0x00007010
+#define CS42L43_SAMPLE_RATE1 0x00007014
+#define CS42L43_SAMPLE_RATE2 0x00007018
+#define CS42L43_SAMPLE_RATE3 0x0000701C
+#define CS42L43_SAMPLE_RATE4 0x00007020
+#define CS42L43_PLL_CONTROL 0x00007034
+#define CS42L43_FS_SELECT1 0x00007038
+#define CS42L43_FS_SELECT2 0x0000703C
+#define CS42L43_FS_SELECT3 0x00007040
+#define CS42L43_FS_SELECT4 0x00007044
+#define CS42L43_PDM_CONTROL 0x0000704C
+#define CS42L43_ASP_CLK_CONFIG1 0x00007058
+#define CS42L43_ASP_CLK_CONFIG2 0x0000705C
+#define CS42L43_OSC_DIV_SEL 0x00007068
+#define CS42L43_ADC_B_CTRL1 0x00008000
+#define CS42L43_ADC_B_CTRL2 0x00008004
+#define CS42L43_DECIM_HPF_WNF_CTRL1 0x0000803C
+#define CS42L43_DECIM_HPF_WNF_CTRL2 0x00008040
+#define CS42L43_DECIM_HPF_WNF_CTRL3 0x00008044
+#define CS42L43_DECIM_HPF_WNF_CTRL4 0x00008048
+#define CS42L43_DMIC_PDM_CTRL 0x0000804C
+#define CS42L43_DECIM_VOL_CTRL_CH1_CH2 0x00008050
+#define CS42L43_DECIM_VOL_CTRL_CH3_CH4 0x00008054
+#define CS42L43_DECIM_VOL_CTRL_UPDATE 0x00008058
+#define CS42L43_INTP_VOLUME_CTRL1 0x00009008
+#define CS42L43_INTP_VOLUME_CTRL2 0x0000900C
+#define CS42L43_AMP1_2_VOL_RAMP 0x00009010
+#define CS42L43_ASP_CTRL 0x0000A000
+#define CS42L43_ASP_FSYNC_CTRL1 0x0000A004
+#define CS42L43_ASP_FSYNC_CTRL2 0x0000A008
+#define CS42L43_ASP_FSYNC_CTRL3 0x0000A00C
+#define CS42L43_ASP_FSYNC_CTRL4 0x0000A010
+#define CS42L43_ASP_DATA_CTRL 0x0000A018
+#define CS42L43_ASP_RX_EN 0x0000A020
+#define CS42L43_ASP_TX_EN 0x0000A024
+#define CS42L43_ASP_RX_CH1_CTRL 0x0000A028
+#define CS42L43_ASP_RX_CH2_CTRL 0x0000A02C
+#define CS42L43_ASP_RX_CH3_CTRL 0x0000A030
+#define CS42L43_ASP_RX_CH4_CTRL 0x0000A034
+#define CS42L43_ASP_RX_CH5_CTRL 0x0000A038
+#define CS42L43_ASP_RX_CH6_CTRL 0x0000A03C
+#define CS42L43_ASP_TX_CH1_CTRL 0x0000A068
+#define CS42L43_ASP_TX_CH2_CTRL 0x0000A06C
+#define CS42L43_ASP_TX_CH3_CTRL 0x0000A070
+#define CS42L43_ASP_TX_CH4_CTRL 0x0000A074
+#define CS42L43_ASP_TX_CH5_CTRL 0x0000A078
+#define CS42L43_ASP_TX_CH6_CTRL 0x0000A07C
+#define CS42L43_OTP_REVISION_ID 0x0000B02C
+#define CS42L43_ASPTX1_INPUT 0x0000C200
+#define CS42L43_ASPTX2_INPUT 0x0000C210
+#define CS42L43_ASPTX3_INPUT 0x0000C220
+#define CS42L43_ASPTX4_INPUT 0x0000C230
+#define CS42L43_ASPTX5_INPUT 0x0000C240
+#define CS42L43_ASPTX6_INPUT 0x0000C250
+#define CS42L43_SWIRE_DP1_CH1_INPUT 0x0000C280
+#define CS42L43_SWIRE_DP1_CH2_INPUT 0x0000C290
+#define CS42L43_SWIRE_DP1_CH3_INPUT 0x0000C2A0
+#define CS42L43_SWIRE_DP1_CH4_INPUT 0x0000C2B0
+#define CS42L43_SWIRE_DP2_CH1_INPUT 0x0000C2C0
+#define CS42L43_SWIRE_DP2_CH2_INPUT 0x0000C2D0
+#define CS42L43_SWIRE_DP3_CH1_INPUT 0x0000C2E0
+#define CS42L43_SWIRE_DP3_CH2_INPUT 0x0000C2F0
+#define CS42L43_SWIRE_DP4_CH1_INPUT 0x0000C300
+#define CS42L43_SWIRE_DP4_CH2_INPUT 0x0000C310
+#define CS42L43_ASRC_INT1_INPUT1 0x0000C400
+#define CS42L43_ASRC_INT2_INPUT1 0x0000C410
+#define CS42L43_ASRC_INT3_INPUT1 0x0000C420
+#define CS42L43_ASRC_INT4_INPUT1 0x0000C430
+#define CS42L43_ASRC_DEC1_INPUT1 0x0000C440
+#define CS42L43_ASRC_DEC2_INPUT1 0x0000C450
+#define CS42L43_ASRC_DEC3_INPUT1 0x0000C460
+#define CS42L43_ASRC_DEC4_INPUT1 0x0000C470
+#define CS42L43_ISRC1INT1_INPUT1 0x0000C500
+#define CS42L43_ISRC1INT2_INPUT1 0x0000C510
+#define CS42L43_ISRC1DEC1_INPUT1 0x0000C520
+#define CS42L43_ISRC1DEC2_INPUT1 0x0000C530
+#define CS42L43_ISRC2INT1_INPUT1 0x0000C540
+#define CS42L43_ISRC2INT2_INPUT1 0x0000C550
+#define CS42L43_ISRC2DEC1_INPUT1 0x0000C560
+#define CS42L43_ISRC2DEC2_INPUT1 0x0000C570
+#define CS42L43_EQ1MIX_INPUT1 0x0000C580
+#define CS42L43_EQ1MIX_INPUT2 0x0000C584
+#define CS42L43_EQ1MIX_INPUT3 0x0000C588
+#define CS42L43_EQ1MIX_INPUT4 0x0000C58C
+#define CS42L43_EQ2MIX_INPUT1 0x0000C590
+#define CS42L43_EQ2MIX_INPUT2 0x0000C594
+#define CS42L43_EQ2MIX_INPUT3 0x0000C598
+#define CS42L43_EQ2MIX_INPUT4 0x0000C59C
+#define CS42L43_SPDIF1_INPUT1 0x0000C600
+#define CS42L43_SPDIF2_INPUT1 0x0000C610
+#define CS42L43_AMP1MIX_INPUT1 0x0000C620
+#define CS42L43_AMP1MIX_INPUT2 0x0000C624
+#define CS42L43_AMP1MIX_INPUT3 0x0000C628
+#define CS42L43_AMP1MIX_INPUT4 0x0000C62C
+#define CS42L43_AMP2MIX_INPUT1 0x0000C630
+#define CS42L43_AMP2MIX_INPUT2 0x0000C634
+#define CS42L43_AMP2MIX_INPUT3 0x0000C638
+#define CS42L43_AMP2MIX_INPUT4 0x0000C63C
+#define CS42L43_AMP3MIX_INPUT1 0x0000C640
+#define CS42L43_AMP3MIX_INPUT2 0x0000C644
+#define CS42L43_AMP3MIX_INPUT3 0x0000C648
+#define CS42L43_AMP3MIX_INPUT4 0x0000C64C
+#define CS42L43_AMP4MIX_INPUT1 0x0000C650
+#define CS42L43_AMP4MIX_INPUT2 0x0000C654
+#define CS42L43_AMP4MIX_INPUT3 0x0000C658
+#define CS42L43_AMP4MIX_INPUT4 0x0000C65C
+#define CS42L43_ASRC_INT_ENABLES 0x0000E000
+#define CS42L43_ASRC_DEC_ENABLES 0x0000E004
+#define CS42L43_PDNCNTL 0x00010000
+#define CS42L43_RINGSENSE_DEB_CTRL 0x0001001C
+#define CS42L43_TIPSENSE_DEB_CTRL 0x00010020
+#define CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS 0x00010028
+#define CS42L43_HS2 0x00010040
+#define CS42L43_HS_STAT 0x00010048
+#define CS42L43_MCU_SW_INTERRUPT 0x00010094
+#define CS42L43_STEREO_MIC_CTRL 0x000100A4
+#define CS42L43_STEREO_MIC_CLAMP_CTRL 0x000100C4
+#define CS42L43_BLOCK_EN2 0x00010104
+#define CS42L43_BLOCK_EN3 0x00010108
+#define CS42L43_BLOCK_EN4 0x0001010C
+#define CS42L43_BLOCK_EN5 0x00010110
+#define CS42L43_BLOCK_EN6 0x00010114
+#define CS42L43_BLOCK_EN7 0x00010118
+#define CS42L43_BLOCK_EN8 0x0001011C
+#define CS42L43_BLOCK_EN9 0x00010120
+#define CS42L43_BLOCK_EN10 0x00010124
+#define CS42L43_BLOCK_EN11 0x00010128
+#define CS42L43_TONE_CH1_CTRL 0x00010134
+#define CS42L43_TONE_CH2_CTRL 0x00010138
+#define CS42L43_MIC_DETECT_CONTROL_1 0x00011074
+#define CS42L43_DETECT_STATUS_1 0x0001107C
+#define CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL 0x00011090
+#define CS42L43_MIC_DETECT_CONTROL_ANDROID 0x000110B0
+#define CS42L43_ISRC1_CTRL 0x00012004
+#define CS42L43_ISRC2_CTRL 0x00013004
+#define CS42L43_CTRL_REG 0x00014000
+#define CS42L43_FDIV_FRAC 0x00014004
+#define CS42L43_CAL_RATIO 0x00014008
+#define CS42L43_SPI_CLK_CONFIG1 0x00016004
+#define CS42L43_SPI_CONFIG1 0x00016010
+#define CS42L43_SPI_CONFIG2 0x00016014
+#define CS42L43_SPI_CONFIG3 0x00016018
+#define CS42L43_SPI_CONFIG4 0x00016024
+#define CS42L43_SPI_STATUS1 0x00016100
+#define CS42L43_SPI_STATUS2 0x00016104
+#define CS42L43_TRAN_CONFIG1 0x00016200
+#define CS42L43_TRAN_CONFIG2 0x00016204
+#define CS42L43_TRAN_CONFIG3 0x00016208
+#define CS42L43_TRAN_CONFIG4 0x0001620C
+#define CS42L43_TRAN_CONFIG5 0x00016220
+#define CS42L43_TRAN_CONFIG6 0x00016224
+#define CS42L43_TRAN_CONFIG7 0x00016228
+#define CS42L43_TRAN_CONFIG8 0x0001622C
+#define CS42L43_TRAN_STATUS1 0x00016300
+#define CS42L43_TRAN_STATUS2 0x00016304
+#define CS42L43_TRAN_STATUS3 0x00016308
+#define CS42L43_TX_DATA 0x00016400
+#define CS42L43_RX_DATA 0x00016600
+#define CS42L43_DACCNFG1 0x00017000
+#define CS42L43_DACCNFG2 0x00017004
+#define CS42L43_HPPATHVOL 0x0001700C
+#define CS42L43_PGAVOL 0x00017014
+#define CS42L43_LOADDETRESULTS 0x00017018
+#define CS42L43_LOADDETENA 0x00017024
+#define CS42L43_CTRL 0x00017028
+#define CS42L43_COEFF_DATA_IN0 0x00018000
+#define CS42L43_COEFF_RD_WR0 0x00018008
+#define CS42L43_INIT_DONE0 0x00018010
+#define CS42L43_START_EQZ0 0x00018014
+#define CS42L43_MUTE_EQ_IN0 0x0001801C
+#define CS42L43_DECIM_INT 0x0001B000
+#define CS42L43_EQ_INT 0x0001B004
+#define CS42L43_ASP_INT 0x0001B008
+#define CS42L43_PLL_INT 0x0001B00C
+#define CS42L43_SOFT_INT 0x0001B010
+#define CS42L43_SWIRE_INT 0x0001B014
+#define CS42L43_MSM_INT 0x0001B018
+#define CS42L43_ACC_DET_INT 0x0001B01C
+#define CS42L43_I2C_TGT_INT 0x0001B020
+#define CS42L43_SPI_MSTR_INT 0x0001B024
+#define CS42L43_SW_TO_SPI_BRIDGE_INT 0x0001B028
+#define CS42L43_OTP_INT 0x0001B02C
+#define CS42L43_CLASS_D_AMP_INT 0x0001B030
+#define CS42L43_GPIO_INT 0x0001B034
+#define CS42L43_ASRC_INT 0x0001B038
+#define CS42L43_HPOUT_INT 0x0001B03C
+#define CS42L43_DECIM_MASK 0x0001B0A0
+#define CS42L43_EQ_MIX_MASK 0x0001B0A4
+#define CS42L43_ASP_MASK 0x0001B0A8
+#define CS42L43_PLL_MASK 0x0001B0AC
+#define CS42L43_SOFT_MASK 0x0001B0B0
+#define CS42L43_SWIRE_MASK 0x0001B0B4
+#define CS42L43_MSM_MASK 0x0001B0B8
+#define CS42L43_ACC_DET_MASK 0x0001B0BC
+#define CS42L43_I2C_TGT_MASK 0x0001B0C0
+#define CS42L43_SPI_MSTR_MASK 0x0001B0C4
+#define CS42L43_SW_TO_SPI_BRIDGE_MASK 0x0001B0C8
+#define CS42L43_OTP_MASK 0x0001B0CC
+#define CS42L43_CLASS_D_AMP_MASK 0x0001B0D0
+#define CS42L43_GPIO_INT_MASK 0x0001B0D4
+#define CS42L43_ASRC_MASK 0x0001B0D8
+#define CS42L43_HPOUT_MASK 0x0001B0DC
+#define CS42L43_DECIM_INT_SHADOW 0x0001B300
+#define CS42L43_EQ_MIX_INT_SHADOW 0x0001B304
+#define CS42L43_ASP_INT_SHADOW 0x0001B308
+#define CS42L43_PLL_INT_SHADOW 0x0001B30C
+#define CS42L43_SOFT_INT_SHADOW 0x0001B310
+#define CS42L43_SWIRE_INT_SHADOW 0x0001B314
+#define CS42L43_MSM_INT_SHADOW 0x0001B318
+#define CS42L43_ACC_DET_INT_SHADOW 0x0001B31C
+#define CS42L43_I2C_TGT_INT_SHADOW 0x0001B320
+#define CS42L43_SPI_MSTR_INT_SHADOW 0x0001B324
+#define CS42L43_SW_TO_SPI_BRIDGE_SHADOW 0x0001B328
+#define CS42L43_OTP_INT_SHADOW 0x0001B32C
+#define CS42L43_CLASS_D_AMP_INT_SHADOW 0x0001B330
+#define CS42L43_GPIO_SHADOW 0x0001B334
+#define CS42L43_ASRC_SHADOW 0x0001B338
+#define CS42L43_HP_OUT_SHADOW 0x0001B33C
+#define CS42L43_BOOT_CONTROL 0x00101000
+#define CS42L43_BLOCK_EN 0x00101008
+#define CS42L43_SHUTTER_CONTROL 0x0010100C
+#define CS42L43_MCU_SW_REV 0x00114000
+#define CS42L43_PATCH_START_ADDR 0x00114004
+#define CS42L43_NEED_CONFIGS 0x0011400C
+#define CS42L43_BOOT_STATUS 0x0011401C
+#define CS42L43_FW_SH_BOOT_CFG_NEED_CONFIGS 0x0011F8F8
+#define CS42L43_FW_MISSION_CTRL_NEED_CONFIGS 0x0011FE00
+#define CS42L43_FW_MISSION_CTRL_HAVE_CONFIGS 0x0011FE04
+#define CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION 0x0011FE0C
+#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG 0x0011FE10
+#define CS42L43_MCU_RAM_MAX 0x0011FFFF
+
+/* CS42L43_DEVID */
+#define CS42L43_DEVID_VAL 0x00042A43
+
+/* CS42L43_GEN_INT_STAT_1 */
+#define CS42L43_INT_STAT_GEN1_MASK 0x00000001
+#define CS42L43_INT_STAT_GEN1_SHIFT 0
+
+/* CS42L43_SFT_RESET */
+#define CS42L43_SFT_RESET_MASK 0xFF000000
+#define CS42L43_SFT_RESET_SHIFT 24
+
+#define CS42L43_SFT_RESET_VAL 0x5A000000
+
+/* CS42L43_DRV_CTRL1 */
+#define CS42L43_ASP_DOUT_DRV_MASK 0x00038000
+#define CS42L43_ASP_DOUT_DRV_SHIFT 15
+#define CS42L43_ASP_FSYNC_DRV_MASK 0x00000E00
+#define CS42L43_ASP_FSYNC_DRV_SHIFT 9
+#define CS42L43_ASP_BCLK_DRV_MASK 0x000001C0
+#define CS42L43_ASP_BCLK_DRV_SHIFT 6
+
+/* CS42L43_DRV_CTRL3 */
+#define CS42L43_I2C_ADDR_DRV_MASK 0x30000000
+#define CS42L43_I2C_ADDR_DRV_SHIFT 28
+#define CS42L43_I2C_SDA_DRV_MASK 0x0C000000
+#define CS42L43_I2C_SDA_DRV_SHIFT 26
+#define CS42L43_PDMOUT2_CLK_DRV_MASK 0x00E00000
+#define CS42L43_PDMOUT2_CLK_DRV_SHIFT 21
+#define CS42L43_PDMOUT2_DATA_DRV_MASK 0x001C0000
+#define CS42L43_PDMOUT2_DATA_DRV_SHIFT 18
+#define CS42L43_PDMOUT1_CLK_DRV_MASK 0x00038000
+#define CS42L43_PDMOUT1_CLK_DRV_SHIFT 15
+#define CS42L43_PDMOUT1_DATA_DRV_MASK 0x00007000
+#define CS42L43_PDMOUT1_DATA_DRV_SHIFT 12
+#define CS42L43_SPI_MISO_DRV_MASK 0x00000038
+#define CS42L43_SPI_MISO_DRV_SHIFT 3
+
+/* CS42L43_DRV_CTRL4 */
+#define CS42L43_GPIO3_DRV_MASK 0x00000E00
+#define CS42L43_GPIO3_DRV_SHIFT 9
+#define CS42L43_GPIO2_DRV_MASK 0x000001C0
+#define CS42L43_GPIO2_DRV_SHIFT 6
+#define CS42L43_GPIO1_DRV_MASK 0x00000038
+#define CS42L43_GPIO1_DRV_SHIFT 3
+
+/* CS42L43_DRV_CTRL_5 */
+#define CS42L43_I2C_SCL_DRV_MASK 0x18000000
+#define CS42L43_I2C_SCL_DRV_SHIFT 27
+#define CS42L43_SPI_SCK_DRV_MASK 0x07000000
+#define CS42L43_SPI_SCK_DRV_SHIFT 24
+#define CS42L43_SPI_MOSI_DRV_MASK 0x00E00000
+#define CS42L43_SPI_MOSI_DRV_SHIFT 21
+#define CS42L43_SPI_SSB_DRV_MASK 0x001C0000
+#define CS42L43_SPI_SSB_DRV_SHIFT 18
+#define CS42L43_ASP_DIN_DRV_MASK 0x000001C0
+#define CS42L43_ASP_DIN_DRV_SHIFT 6
+
+/* CS42L43_GPIO_CTRL1 */
+#define CS42L43_GPIO3_POL_MASK 0x00040000
+#define CS42L43_GPIO3_POL_SHIFT 18
+#define CS42L43_GPIO2_POL_MASK 0x00020000
+#define CS42L43_GPIO2_POL_SHIFT 17
+#define CS42L43_GPIO1_POL_MASK 0x00010000
+#define CS42L43_GPIO1_POL_SHIFT 16
+#define CS42L43_GPIO3_LVL_MASK 0x00000400
+#define CS42L43_GPIO3_LVL_SHIFT 10
+#define CS42L43_GPIO2_LVL_MASK 0x00000200
+#define CS42L43_GPIO2_LVL_SHIFT 9
+#define CS42L43_GPIO1_LVL_MASK 0x00000100
+#define CS42L43_GPIO1_LVL_SHIFT 8
+#define CS42L43_GPIO3_DIR_MASK 0x00000004
+#define CS42L43_GPIO3_DIR_SHIFT 2
+#define CS42L43_GPIO2_DIR_MASK 0x00000002
+#define CS42L43_GPIO2_DIR_SHIFT 1
+#define CS42L43_GPIO1_DIR_MASK 0x00000001
+#define CS42L43_GPIO1_DIR_SHIFT 0
+
+/* CS42L43_GPIO_CTRL2 */
+#define CS42L43_GPIO3_DEGLITCH_BYP_MASK 0x00000004
+#define CS42L43_GPIO3_DEGLITCH_BYP_SHIFT 2
+#define CS42L43_GPIO2_DEGLITCH_BYP_MASK 0x00000002
+#define CS42L43_GPIO2_DEGLITCH_BYP_SHIFT 1
+#define CS42L43_GPIO1_DEGLITCH_BYP_MASK 0x00000001
+#define CS42L43_GPIO1_DEGLITCH_BYP_SHIFT 0
+
+/* CS42L43_GPIO_STS */
+#define CS42L43_GPIO3_STS_MASK 0x00000004
+#define CS42L43_GPIO3_STS_SHIFT 2
+#define CS42L43_GPIO2_STS_MASK 0x00000002
+#define CS42L43_GPIO2_STS_SHIFT 1
+#define CS42L43_GPIO1_STS_MASK 0x00000001
+#define CS42L43_GPIO1_STS_SHIFT 0
+
+/* CS42L43_GPIO_FN_SEL */
+#define CS42L43_GPIO3_FN_SEL_MASK 0x00000004
+#define CS42L43_GPIO3_FN_SEL_SHIFT 2
+#define CS42L43_GPIO1_FN_SEL_MASK 0x00000001
+#define CS42L43_GPIO1_FN_SEL_SHIFT 0
+
+/* CS42L43_MCLK_SRC_SEL */
+#define CS42L43_OSC_PLL_MCLK_SEL_MASK 0x00000001
+#define CS42L43_OSC_PLL_MCLK_SEL_SHIFT 0
+
+/* CS42L43_SAMPLE_RATE1..CS42L43_SAMPLE_RATE4 */
+#define CS42L43_SAMPLE_RATE_MASK 0x0000001F
+#define CS42L43_SAMPLE_RATE_SHIFT 0
+
+/* CS42L43_PLL_CONTROL */
+#define CS42L43_PLL_REFCLK_EN_MASK 0x00000008
+#define CS42L43_PLL_REFCLK_EN_SHIFT 3
+#define CS42L43_PLL_REFCLK_DIV_MASK 0x00000006
+#define CS42L43_PLL_REFCLK_DIV_SHIFT 1
+#define CS42L43_PLL_REFCLK_SRC_MASK 0x00000001
+#define CS42L43_PLL_REFCLK_SRC_SHIFT 0
+
+/* CS42L43_FS_SELECT1 */
+#define CS42L43_ASP_RATE_MASK 0x00000003
+#define CS42L43_ASP_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT2 */
+#define CS42L43_ASRC_DEC_OUT_RATE_MASK 0x000000C0
+#define CS42L43_ASRC_DEC_OUT_RATE_SHIFT 6
+#define CS42L43_ASRC_INT_OUT_RATE_MASK 0x00000030
+#define CS42L43_ASRC_INT_OUT_RATE_SHIFT 4
+#define CS42L43_ASRC_DEC_IN_RATE_MASK 0x0000000C
+#define CS42L43_ASRC_DEC_IN_RATE_SHIFT 2
+#define CS42L43_ASRC_INT_IN_RATE_MASK 0x00000003
+#define CS42L43_ASRC_INT_IN_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT3 */
+#define CS42L43_HPOUT_RATE_MASK 0x0000C000
+#define CS42L43_HPOUT_RATE_SHIFT 14
+#define CS42L43_EQZ_RATE_MASK 0x00003000
+#define CS42L43_EQZ_RATE_SHIFT 12
+#define CS42L43_DIAGGEN_RATE_MASK 0x00000C00
+#define CS42L43_DIAGGEN_RATE_SHIFT 10
+#define CS42L43_DECIM_CH4_RATE_MASK 0x00000300
+#define CS42L43_DECIM_CH4_RATE_SHIFT 8
+#define CS42L43_DECIM_CH3_RATE_MASK 0x000000C0
+#define CS42L43_DECIM_CH3_RATE_SHIFT 6
+#define CS42L43_DECIM_CH2_RATE_MASK 0x00000030
+#define CS42L43_DECIM_CH2_RATE_SHIFT 4
+#define CS42L43_DECIM_CH1_RATE_MASK 0x0000000C
+#define CS42L43_DECIM_CH1_RATE_SHIFT 2
+#define CS42L43_AMP1_2_RATE_MASK 0x00000003
+#define CS42L43_AMP1_2_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT4 */
+#define CS42L43_SW_DP7_RATE_MASK 0x00C00000
+#define CS42L43_SW_DP7_RATE_SHIFT 22
+#define CS42L43_SW_DP6_RATE_MASK 0x00300000
+#define CS42L43_SW_DP6_RATE_SHIFT 20
+#define CS42L43_SPDIF_RATE_MASK 0x000C0000
+#define CS42L43_SPDIF_RATE_SHIFT 18
+#define CS42L43_SW_DP5_RATE_MASK 0x00030000
+#define CS42L43_SW_DP5_RATE_SHIFT 16
+#define CS42L43_SW_DP4_RATE_MASK 0x0000C000
+#define CS42L43_SW_DP4_RATE_SHIFT 14
+#define CS42L43_SW_DP3_RATE_MASK 0x00003000
+#define CS42L43_SW_DP3_RATE_SHIFT 12
+#define CS42L43_SW_DP2_RATE_MASK 0x00000C00
+#define CS42L43_SW_DP2_RATE_SHIFT 10
+#define CS42L43_SW_DP1_RATE_MASK 0x00000300
+#define CS42L43_SW_DP1_RATE_SHIFT 8
+#define CS42L43_ISRC2_LOW_RATE_MASK 0x000000C0
+#define CS42L43_ISRC2_LOW_RATE_SHIFT 6
+#define CS42L43_ISRC2_HIGH_RATE_MASK 0x00000030
+#define CS42L43_ISRC2_HIGH_RATE_SHIFT 4
+#define CS42L43_ISRC1_LOW_RATE_MASK 0x0000000C
+#define CS42L43_ISRC1_LOW_RATE_SHIFT 2
+#define CS42L43_ISRC1_HIGH_RATE_MASK 0x00000003
+#define CS42L43_ISRC1_HIGH_RATE_SHIFT 0
+
+/* CS42L43_PDM_CONTROL */
+#define CS42L43_PDM2_CLK_DIV_MASK 0x0000000C
+#define CS42L43_PDM2_CLK_DIV_SHIFT 2
+#define CS42L43_PDM1_CLK_DIV_MASK 0x00000003
+#define CS42L43_PDM1_CLK_DIV_SHIFT 0
+
+/* CS42L43_ASP_CLK_CONFIG1 */
+#define CS42L43_ASP_BCLK_N_MASK 0x03FF0000
+#define CS42L43_ASP_BCLK_N_SHIFT 16
+#define CS42L43_ASP_BCLK_M_MASK 0x000003FF
+#define CS42L43_ASP_BCLK_M_SHIFT 0
+
+/* CS42L43_ASP_CLK_CONFIG2 */
+#define CS42L43_ASP_MASTER_MODE_MASK 0x00000002
+#define CS42L43_ASP_MASTER_MODE_SHIFT 1
+#define CS42L43_ASP_BCLK_INV_MASK 0x00000001
+#define CS42L43_ASP_BCLK_INV_SHIFT 0
+
+/* CS42L43_OSC_DIV_SEL */
+#define CS42L43_OSC_DIV2_EN_MASK 0x00000001
+#define CS42L43_OSC_DIV2_EN_SHIFT 0
+
+/* CS42L43_ADC_B_CTRL1..CS42L43_ADC_B_CTRL1 */
+#define CS42L43_PGA_WIDESWING_MODE_EN_MASK 0x00000080
+#define CS42L43_PGA_WIDESWING_MODE_EN_SHIFT 7
+#define CS42L43_ADC_AIN_SEL_MASK 0x00000010
+#define CS42L43_ADC_AIN_SEL_SHIFT 4
+#define CS42L43_ADC_PGA_GAIN_MASK 0x0000000F
+#define CS42L43_ADC_PGA_GAIN_SHIFT 0
+
+/* CS42L43_DECIM_HPF_WNF_CTRL1..CS42L43_DECIM_HPF_WNF_CTRL4 */
+#define CS42L43_DECIM_WNF_CF_MASK 0x00000070
+#define CS42L43_DECIM_WNF_CF_SHIFT 4
+#define CS42L43_DECIM_WNF_EN_MASK 0x00000008
+#define CS42L43_DECIM_WNF_EN_SHIFT 3
+#define CS42L43_DECIM_HPF_CF_MASK 0x00000006
+#define CS42L43_DECIM_HPF_CF_SHIFT 1
+#define CS42L43_DECIM_HPF_EN_MASK 0x00000001
+#define CS42L43_DECIM_HPF_EN_SHIFT 0
+
+/* CS42L43_DMIC_PDM_CTRL */
+#define CS42L43_PDM2R_INV_MASK 0x00000020
+#define CS42L43_PDM2R_INV_SHIFT 5
+#define CS42L43_PDM2L_INV_MASK 0x00000010
+#define CS42L43_PDM2L_INV_SHIFT 4
+#define CS42L43_PDM1R_INV_MASK 0x00000008
+#define CS42L43_PDM1R_INV_SHIFT 3
+#define CS42L43_PDM1L_INV_MASK 0x00000004
+#define CS42L43_PDM1L_INV_SHIFT 2
+
+/* CS42L43_DECIM_VOL_CTRL_CH1_CH2 */
+#define CS42L43_DECIM2_MUTE_MASK 0x80000000
+#define CS42L43_DECIM2_MUTE_SHIFT 31
+#define CS42L43_DECIM2_VOL_MASK 0x3FC00000
+#define CS42L43_DECIM2_VOL_SHIFT 22
+#define CS42L43_DECIM2_VD_RAMP_MASK 0x00380000
+#define CS42L43_DECIM2_VD_RAMP_SHIFT 19
+#define CS42L43_DECIM2_VI_RAMP_MASK 0x00070000
+#define CS42L43_DECIM2_VI_RAMP_SHIFT 16
+#define CS42L43_DECIM1_MUTE_MASK 0x00008000
+#define CS42L43_DECIM1_MUTE_SHIFT 15
+#define CS42L43_DECIM1_VOL_MASK 0x00003FC0
+#define CS42L43_DECIM1_VOL_SHIFT 6
+#define CS42L43_DECIM1_VD_RAMP_MASK 0x00000038
+#define CS42L43_DECIM1_VD_RAMP_SHIFT 3
+#define CS42L43_DECIM1_VI_RAMP_MASK 0x00000007
+#define CS42L43_DECIM1_VI_RAMP_SHIFT 0
+
+/* CS42L43_DECIM_VOL_CTRL_CH3_CH4 */
+#define CS42L43_DECIM4_MUTE_MASK 0x80000000
+#define CS42L43_DECIM4_MUTE_SHIFT 31
+#define CS42L43_DECIM4_VOL_MASK 0x3FC00000
+#define CS42L43_DECIM4_VOL_SHIFT 22
+#define CS42L43_DECIM4_VD_RAMP_MASK 0x00380000
+#define CS42L43_DECIM4_VD_RAMP_SHIFT 19
+#define CS42L43_DECIM4_VI_RAMP_MASK 0x00070000
+#define CS42L43_DECIM4_VI_RAMP_SHIFT 16
+#define CS42L43_DECIM3_MUTE_MASK 0x00008000
+#define CS42L43_DECIM3_MUTE_SHIFT 15
+#define CS42L43_DECIM3_VOL_MASK 0x00003FC0
+#define CS42L43_DECIM3_VOL_SHIFT 6
+#define CS42L43_DECIM3_VD_RAMP_MASK 0x00000038
+#define CS42L43_DECIM3_VD_RAMP_SHIFT 3
+#define CS42L43_DECIM3_VI_RAMP_MASK 0x00000007
+#define CS42L43_DECIM3_VI_RAMP_SHIFT 0
+
+/* CS42L43_DECIM_VOL_CTRL_UPDATE */
+#define CS42L43_DECIM4_VOL_UPDATE_MASK 0x00000008
+#define CS42L43_DECIM4_VOL_UPDATE_SHIFT 3
+#define CS42L43_DECIM3_VOL_UPDATE_MASK 0x00000004
+#define CS42L43_DECIM3_VOL_UPDATE_SHIFT 2
+#define CS42L43_DECIM2_VOL_UPDATE_MASK 0x00000002
+#define CS42L43_DECIM2_VOL_UPDATE_SHIFT 1
+#define CS42L43_DECIM1_VOL_UPDATE_MASK 0x00000001
+#define CS42L43_DECIM1_VOL_UPDATE_SHIFT 0
+
+/* CS42L43_INTP_VOLUME_CTRL1..CS42L43_INTP_VOLUME_CTRL2 */
+#define CS42L43_AMP1_2_VU_MASK 0x00000200
+#define CS42L43_AMP1_2_VU_SHIFT 9
+#define CS42L43_AMP_MUTE_MASK 0x00000100
+#define CS42L43_AMP_MUTE_SHIFT 8
+#define CS42L43_AMP_VOL_MASK 0x000000FF
+#define CS42L43_AMP_VOL_SHIFT 0
+
+/* CS42L43_AMP1_2_VOL_RAMP */
+#define CS42L43_AMP1_2_VD_RAMP_MASK 0x00000070
+#define CS42L43_AMP1_2_VD_RAMP_SHIFT 4
+#define CS42L43_AMP1_2_VI_RAMP_MASK 0x00000007
+#define CS42L43_AMP1_2_VI_RAMP_SHIFT 0
+
+/* CS42L43_ASP_CTRL */
+#define CS42L43_ASP_FSYNC_MODE_MASK 0x00000004
+#define CS42L43_ASP_FSYNC_MODE_SHIFT 2
+#define CS42L43_ASP_BCLK_EN_MASK 0x00000002
+#define CS42L43_ASP_BCLK_EN_SHIFT 1
+#define CS42L43_ASP_FSYNC_EN_MASK 0x00000001
+#define CS42L43_ASP_FSYNC_EN_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL1 */
+#define CS42L43_ASP_FSYNC_M_MASK 0x0007FFFF
+#define CS42L43_ASP_FSYNC_M_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL3 */
+#define CS42L43_ASP_FSYNC_IN_INV_MASK 0x00000002
+#define CS42L43_ASP_FSYNC_IN_INV_SHIFT 1
+#define CS42L43_ASP_FSYNC_OUT_INV_MASK 0x00000001
+#define CS42L43_ASP_FSYNC_OUT_INV_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL4 */
+#define CS42L43_ASP_NUM_BCLKS_PER_FSYNC_MASK 0x00001FFE
+#define CS42L43_ASP_NUM_BCLKS_PER_FSYNC_SHIFT 1
+
+/* CS42L43_ASP_DATA_CTRL */
+#define CS42L43_ASP_FSYNC_FRAME_START_PHASE_MASK 0x00000008
+#define CS42L43_ASP_FSYNC_FRAME_START_PHASE_SHIFT 3
+#define CS42L43_ASP_FSYNC_FRAME_START_DLY_MASK 0x00000007
+#define CS42L43_ASP_FSYNC_FRAME_START_DLY_SHIFT 0
+
+/* CS42L43_ASP_RX_EN */
+#define CS42L43_ASP_RX_CH6_EN_MASK 0x00000020
+#define CS42L43_ASP_RX_CH6_EN_SHIFT 5
+#define CS42L43_ASP_RX_CH5_EN_MASK 0x00000010
+#define CS42L43_ASP_RX_CH5_EN_SHIFT 4
+#define CS42L43_ASP_RX_CH4_EN_MASK 0x00000008
+#define CS42L43_ASP_RX_CH4_EN_SHIFT 3
+#define CS42L43_ASP_RX_CH3_EN_MASK 0x00000004
+#define CS42L43_ASP_RX_CH3_EN_SHIFT 2
+#define CS42L43_ASP_RX_CH2_EN_MASK 0x00000002
+#define CS42L43_ASP_RX_CH2_EN_SHIFT 1
+#define CS42L43_ASP_RX_CH1_EN_MASK 0x00000001
+#define CS42L43_ASP_RX_CH1_EN_SHIFT 0
+
+/* CS42L43_ASP_TX_EN */
+#define CS42L43_ASP_TX_CH6_EN_MASK 0x00000020
+#define CS42L43_ASP_TX_CH6_EN_SHIFT 5
+#define CS42L43_ASP_TX_CH5_EN_MASK 0x00000010
+#define CS42L43_ASP_TX_CH5_EN_SHIFT 4
+#define CS42L43_ASP_TX_CH4_EN_MASK 0x00000008
+#define CS42L43_ASP_TX_CH4_EN_SHIFT 3
+#define CS42L43_ASP_TX_CH3_EN_MASK 0x00000004
+#define CS42L43_ASP_TX_CH3_EN_SHIFT 2
+#define CS42L43_ASP_TX_CH2_EN_MASK 0x00000002
+#define CS42L43_ASP_TX_CH2_EN_SHIFT 1
+#define CS42L43_ASP_TX_CH1_EN_MASK 0x00000001
+#define CS42L43_ASP_TX_CH1_EN_SHIFT 0
+
+/* CS42L43_ASP_RX_CH1_CTRL..CS42L43_ASP_TX_CH6_CTRL */
+#define CS42L43_ASP_CH_WIDTH_MASK 0x001F0000
+#define CS42L43_ASP_CH_WIDTH_SHIFT 16
+#define CS42L43_ASP_CH_SLOT_MASK 0x00001FFE
+#define CS42L43_ASP_CH_SLOT_SHIFT 1
+#define CS42L43_ASP_CH_SLOT_PHASE_MASK 0x00000001
+#define CS42L43_ASP_CH_SLOT_PHASE_SHIFT 0
+
+/* CS42L43_ASPTX1_INPUT..CS42L43_AMP4MIX_INPUT4 */
+#define CS42L43_MIXER_VOL_MASK 0x00FE0000
+#define CS42L43_MIXER_VOL_SHIFT 17
+#define CS42L43_MIXER_SRC_MASK 0x000001FF
+#define CS42L43_MIXER_SRC_SHIFT 0
+
+/* CS42L43_ASRC_INT_ENABLES */
+#define CS42L43_ASRC_INT4_EN_MASK 0x00000008
+#define CS42L43_ASRC_INT4_EN_SHIFT 3
+#define CS42L43_ASRC_INT3_EN_MASK 0x00000004
+#define CS42L43_ASRC_INT3_EN_SHIFT 2
+#define CS42L43_ASRC_INT2_EN_MASK 0x00000002
+#define CS42L43_ASRC_INT2_EN_SHIFT 1
+#define CS42L43_ASRC_INT1_EN_MASK 0x00000001
+#define CS42L43_ASRC_INT1_EN_SHIFT 0
+
+/* CS42L43_ASRC_DEC_ENABLES */
+#define CS42L43_ASRC_DEC4_EN_MASK 0x00000008
+#define CS42L43_ASRC_DEC4_EN_SHIFT 3
+#define CS42L43_ASRC_DEC3_EN_MASK 0x00000004
+#define CS42L43_ASRC_DEC3_EN_SHIFT 2
+#define CS42L43_ASRC_DEC2_EN_MASK 0x00000002
+#define CS42L43_ASRC_DEC2_EN_SHIFT 1
+#define CS42L43_ASRC_DEC1_EN_MASK 0x00000001
+#define CS42L43_ASRC_DEC1_EN_SHIFT 0
+
+/* CS42L43_PDNCNTL */
+#define CS42L43_RING_SENSE_EN_MASK 0x00000002
+#define CS42L43_RING_SENSE_EN_SHIFT 1
+
+/* CS42L43_RINGSENSE_DEB_CTRL */
+#define CS42L43_RINGSENSE_INV_MASK 0x00000080
+#define CS42L43_RINGSENSE_INV_SHIFT 7
+#define CS42L43_RINGSENSE_PULLUP_PDNB_MASK 0x00000040
+#define CS42L43_RINGSENSE_PULLUP_PDNB_SHIFT 6
+#define CS42L43_RINGSENSE_FALLING_DB_TIME_MASK 0x00000038
+#define CS42L43_RINGSENSE_FALLING_DB_TIME_SHIFT 3
+#define CS42L43_RINGSENSE_RISING_DB_TIME_MASK 0x00000007
+#define CS42L43_RINGSENSE_RISING_DB_TIME_SHIFT 0
+
+/* CS42L43_TIPSENSE_DEB_CTRL */
+#define CS42L43_TIPSENSE_INV_MASK 0x00000080
+#define CS42L43_TIPSENSE_INV_SHIFT 7
+#define CS42L43_TIPSENSE_FALLING_DB_TIME_MASK 0x00000038
+#define CS42L43_TIPSENSE_FALLING_DB_TIME_SHIFT 3
+#define CS42L43_TIPSENSE_RISING_DB_TIME_MASK 0x00000007
+#define CS42L43_TIPSENSE_RISING_DB_TIME_SHIFT 0
+
+/* CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS */
+#define CS42L43_TIPSENSE_UNPLUG_DB_STS_MASK 0x00000008
+#define CS42L43_TIPSENSE_UNPLUG_DB_STS_SHIFT 3
+#define CS42L43_TIPSENSE_PLUG_DB_STS_MASK 0x00000004
+#define CS42L43_TIPSENSE_PLUG_DB_STS_SHIFT 2
+#define CS42L43_RINGSENSE_UNPLUG_DB_STS_MASK 0x00000002
+#define CS42L43_RINGSENSE_UNPLUG_DB_STS_SHIFT 1
+#define CS42L43_RINGSENSE_PLUG_DB_STS_MASK 0x00000001
+#define CS42L43_RINGSENSE_PLUG_DB_STS_SHIFT 0
+
+/* CS42L43_HS2 */
+#define CS42L43_HS_CLAMP_DISABLE_MASK 0x10000000
+#define CS42L43_HS_CLAMP_DISABLE_SHIFT 28
+#define CS42L43_HSBIAS_RAMP_MASK 0x0C000000
+#define CS42L43_HSBIAS_RAMP_SHIFT 26
+#define CS42L43_HSDET_MODE_MASK 0x00018000
+#define CS42L43_HSDET_MODE_SHIFT 15
+#define CS42L43_HSDET_MANUAL_MODE_MASK 0x00006000
+#define CS42L43_HSDET_MANUAL_MODE_SHIFT 13
+#define CS42L43_AUTO_HSDET_TIME_MASK 0x00000700
+#define CS42L43_AUTO_HSDET_TIME_SHIFT 8
+#define CS42L43_AMP3_4_GNDREF_HS3_SEL_MASK 0x00000080
+#define CS42L43_AMP3_4_GNDREF_HS3_SEL_SHIFT 7
+#define CS42L43_AMP3_4_GNDREF_HS4_SEL_MASK 0x00000040
+#define CS42L43_AMP3_4_GNDREF_HS4_SEL_SHIFT 6
+#define CS42L43_HSBIAS_GNDREF_HS3_SEL_MASK 0x00000020
+#define CS42L43_HSBIAS_GNDREF_HS3_SEL_SHIFT 5
+#define CS42L43_HSBIAS_GNDREF_HS4_SEL_MASK 0x00000010
+#define CS42L43_HSBIAS_GNDREF_HS4_SEL_SHIFT 4
+#define CS42L43_HSBIAS_OUT_HS3_SEL_MASK 0x00000008
+#define CS42L43_HSBIAS_OUT_HS3_SEL_SHIFT 3
+#define CS42L43_HSBIAS_OUT_HS4_SEL_MASK 0x00000004
+#define CS42L43_HSBIAS_OUT_HS4_SEL_SHIFT 2
+#define CS42L43_HSGND_HS3_SEL_MASK 0x00000002
+#define CS42L43_HSGND_HS3_SEL_SHIFT 1
+#define CS42L43_HSGND_HS4_SEL_MASK 0x00000001
+#define CS42L43_HSGND_HS4_SEL_SHIFT 0
+
+/* CS42L43_HS_STAT */
+#define CS42L43_HSDET_TYPE_STS_MASK 0x00000007
+#define CS42L43_HSDET_TYPE_STS_SHIFT 0
+
+/* CS42L43_MCU_SW_INTERRUPT */
+#define CS42L43_CONTROL_IND_MASK 0x00000004
+#define CS42L43_CONTROL_IND_SHIFT 2
+#define CS42L43_CONFIGS_IND_MASK 0x00000002
+#define CS42L43_CONFIGS_IND_SHIFT 1
+#define CS42L43_PATCH_IND_MASK 0x00000001
+#define CS42L43_PATCH_IND_SHIFT 0
+
+/* CS42L43_STEREO_MIC_CTRL */
+#define CS42L43_HS2_BIAS_SENSE_EN_MASK 0x00000020
+#define CS42L43_HS2_BIAS_SENSE_EN_SHIFT 5
+#define CS42L43_HS1_BIAS_SENSE_EN_MASK 0x00000010
+#define CS42L43_HS1_BIAS_SENSE_EN_SHIFT 4
+#define CS42L43_HS2_BIAS_EN_MASK 0x00000008
+#define CS42L43_HS2_BIAS_EN_SHIFT 3
+#define CS42L43_HS1_BIAS_EN_MASK 0x00000004
+#define CS42L43_HS1_BIAS_EN_SHIFT 2
+#define CS42L43_JACK_STEREO_CONFIG_MASK 0x00000003
+#define CS42L43_JACK_STEREO_CONFIG_SHIFT 0
+
+/* CS42L43_STEREO_MIC_CLAMP_CTRL */
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK 0x00000002
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_SHIFT 1
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_MASK 0x00000001
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_SHIFT 0
+
+/* CS42L43_BLOCK_EN2 */
+#define CS42L43_SPI_MSTR_EN_MASK 0x00000001
+#define CS42L43_SPI_MSTR_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN3 */
+#define CS42L43_PDM2_DIN_R_EN_MASK 0x00000020
+#define CS42L43_PDM2_DIN_R_EN_SHIFT 5
+#define CS42L43_PDM2_DIN_L_EN_MASK 0x00000010
+#define CS42L43_PDM2_DIN_L_EN_SHIFT 4
+#define CS42L43_PDM1_DIN_R_EN_MASK 0x00000008
+#define CS42L43_PDM1_DIN_R_EN_SHIFT 3
+#define CS42L43_PDM1_DIN_L_EN_MASK 0x00000004
+#define CS42L43_PDM1_DIN_L_EN_SHIFT 2
+#define CS42L43_ADC2_EN_MASK 0x00000002
+#define CS42L43_ADC2_EN_SHIFT 1
+#define CS42L43_ADC1_EN_MASK 0x00000001
+#define CS42L43_ADC1_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN4 */
+#define CS42L43_ASRC_DEC_BANK_EN_MASK 0x00000002
+#define CS42L43_ASRC_DEC_BANK_EN_SHIFT 1
+#define CS42L43_ASRC_INT_BANK_EN_MASK 0x00000001
+#define CS42L43_ASRC_INT_BANK_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN5 */
+#define CS42L43_ISRC2_BANK_EN_MASK 0x00000002
+#define CS42L43_ISRC2_BANK_EN_SHIFT 1
+#define CS42L43_ISRC1_BANK_EN_MASK 0x00000001
+#define CS42L43_ISRC1_BANK_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN6 */
+#define CS42L43_MIXER_EN_MASK 0x00000001
+#define CS42L43_MIXER_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN7 */
+#define CS42L43_EQ_EN_MASK 0x00000001
+#define CS42L43_EQ_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN8 */
+#define CS42L43_HP_EN_MASK 0x00000001
+#define CS42L43_HP_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN9 */
+#define CS42L43_TONE_EN_MASK 0x00000001
+#define CS42L43_TONE_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN10 */
+#define CS42L43_AMP2_EN_MASK 0x00000002
+#define CS42L43_AMP2_EN_SHIFT 1
+#define CS42L43_AMP1_EN_MASK 0x00000001
+#define CS42L43_AMP1_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN11 */
+#define CS42L43_SPDIF_EN_MASK 0x00000001
+#define CS42L43_SPDIF_EN_SHIFT 0
+
+/* CS42L43_TONE_CH1_CTRL..CS42L43_TONE_CH2_CTRL */
+#define CS42L43_TONE_FREQ_MASK 0x00000070
+#define CS42L43_TONE_FREQ_SHIFT 4
+#define CS42L43_TONE_SEL_MASK 0x0000000F
+#define CS42L43_TONE_SEL_SHIFT 0
+
+/* CS42L43_MIC_DETECT_CONTROL_1 */
+#define CS42L43_BUTTON_DETECT_MODE_MASK 0x00000018
+#define CS42L43_BUTTON_DETECT_MODE_SHIFT 3
+#define CS42L43_HSBIAS_MODE_MASK 0x00000006
+#define CS42L43_HSBIAS_MODE_SHIFT 1
+#define CS42L43_MIC_LVL_DET_DISABLE_MASK 0x00000001
+#define CS42L43_MIC_LVL_DET_DISABLE_SHIFT 0
+
+/* CS42L43_DETECT_STATUS_1 */
+#define CS42L43_HSDET_DC_STS_MASK 0x01FF0000
+#define CS42L43_HSDET_DC_STS_SHIFT 16
+#define CS42L43_JACKDET_STS_MASK 0x00000080
+#define CS42L43_JACKDET_STS_SHIFT 7
+#define CS42L43_HSBIAS_CLAMP_STS_MASK 0x00000040
+#define CS42L43_HSBIAS_CLAMP_STS_SHIFT 6
+
+/* CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL */
+#define CS42L43_JACKDET_MODE_MASK 0xC0000000
+#define CS42L43_JACKDET_MODE_SHIFT 30
+#define CS42L43_JACKDET_INV_MASK 0x20000000
+#define CS42L43_JACKDET_INV_SHIFT 29
+#define CS42L43_JACKDET_DB_TIME_MASK 0x03000000
+#define CS42L43_JACKDET_DB_TIME_SHIFT 24
+#define CS42L43_S0_AUTO_ADCMUTE_DISABLE_MASK 0x00800000
+#define CS42L43_S0_AUTO_ADCMUTE_DISABLE_SHIFT 23
+#define CS42L43_HSBIAS_SENSE_EN_MASK 0x00000080
+#define CS42L43_HSBIAS_SENSE_EN_SHIFT 7
+#define CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK 0x00000040
+#define CS42L43_AUTO_HSBIAS_CLAMP_EN_SHIFT 6
+#define CS42L43_JACKDET_SENSE_EN_MASK 0x00000020
+#define CS42L43_JACKDET_SENSE_EN_SHIFT 5
+#define CS42L43_HSBIAS_SENSE_TRIP_MASK 0x00000007
+#define CS42L43_HSBIAS_SENSE_TRIP_SHIFT 0
+
+/* CS42L43_MIC_DETECT_CONTROL_ANDROID */
+#define CS42L43_HSDET_LVL_COMBWIDTH_MASK 0xC0000000
+#define CS42L43_HSDET_LVL_COMBWIDTH_SHIFT 30
+#define CS42L43_HSDET_LVL2_THRESH_MASK 0x01FF0000
+#define CS42L43_HSDET_LVL2_THRESH_SHIFT 16
+#define CS42L43_HSDET_LVL1_THRESH_MASK 0x000001FF
+#define CS42L43_HSDET_LVL1_THRESH_SHIFT 0
+
+/* CS42L43_ISRC1_CTRL..CS42L43_ISRC2_CTRL */
+#define CS42L43_ISRC_INT2_EN_MASK 0x00000200
+#define CS42L43_ISRC_INT2_EN_SHIFT 9
+#define CS42L43_ISRC_INT1_EN_MASK 0x00000100
+#define CS42L43_ISRC_INT1_EN_SHIFT 8
+#define CS42L43_ISRC_DEC2_EN_MASK 0x00000002
+#define CS42L43_ISRC_DEC2_EN_SHIFT 1
+#define CS42L43_ISRC_DEC1_EN_MASK 0x00000001
+#define CS42L43_ISRC_DEC1_EN_SHIFT 0
+
+/* CS42L43_CTRL_REG */
+#define CS42L43_PLL_MODE_BYPASS_500_MASK 0x00000004
+#define CS42L43_PLL_MODE_BYPASS_500_SHIFT 2
+#define CS42L43_PLL_MODE_BYPASS_1029_MASK 0x00000002
+#define CS42L43_PLL_MODE_BYPASS_1029_SHIFT 1
+#define CS42L43_PLL_EN_MASK 0x00000001
+#define CS42L43_PLL_EN_SHIFT 0
+
+/* CS42L43_FDIV_FRAC */
+#define CS42L43_PLL_DIV_INT_MASK 0xFF000000
+#define CS42L43_PLL_DIV_INT_SHIFT 24
+#define CS42L43_PLL_DIV_FRAC_BYTE2_MASK 0x00FF0000
+#define CS42L43_PLL_DIV_FRAC_BYTE2_SHIFT 16
+#define CS42L43_PLL_DIV_FRAC_BYTE1_MASK 0x0000FF00
+#define CS42L43_PLL_DIV_FRAC_BYTE1_SHIFT 8
+#define CS42L43_PLL_DIV_FRAC_BYTE0_MASK 0x000000FF
+#define CS42L43_PLL_DIV_FRAC_BYTE0_SHIFT 0
+
+/* CS42L43_CAL_RATIO */
+#define CS42L43_PLL_CAL_RATIO_MASK 0x000000FF
+#define CS42L43_PLL_CAL_RATIO_SHIFT 0
+
+/* CS42L43_SPI_CLK_CONFIG1 */
+#define CS42L43_SCLK_DIV_MASK 0x0000000F
+#define CS42L43_SCLK_DIV_SHIFT 0
+
+/* CS42L43_SPI_CONFIG1 */
+#define CS42L43_SPI_SS_IDLE_DUR_MASK 0x0F000000
+#define CS42L43_SPI_SS_IDLE_DUR_SHIFT 24
+#define CS42L43_SPI_SS_DELAY_DUR_MASK 0x000F0000
+#define CS42L43_SPI_SS_DELAY_DUR_SHIFT 16
+#define CS42L43_SPI_THREE_WIRE_MASK 0x00000100
+#define CS42L43_SPI_THREE_WIRE_SHIFT 8
+#define CS42L43_SPI_DPHA_MASK 0x00000040
+#define CS42L43_SPI_DPHA_SHIFT 6
+#define CS42L43_SPI_CPHA_MASK 0x00000020
+#define CS42L43_SPI_CPHA_SHIFT 5
+#define CS42L43_SPI_CPOL_MASK 0x00000010
+#define CS42L43_SPI_CPOL_SHIFT 4
+#define CS42L43_SPI_SS_SEL_MASK 0x00000007
+#define CS42L43_SPI_SS_SEL_SHIFT 0
+
+/* CS42L43_SPI_CONFIG2 */
+#define CS42L43_SPI_SS_FRC_MASK 0x00000001
+#define CS42L43_SPI_SS_FRC_SHIFT 0
+
+/* CS42L43_SPI_CONFIG3 */
+#define CS42L43_SPI_WDT_ENA_MASK 0x00000001
+#define CS42L43_SPI_WDT_ENA_SHIFT 0
+
+/* CS42L43_SPI_CONFIG4 */
+#define CS42L43_SPI_STALL_ENA_MASK 0x00010000
+#define CS42L43_SPI_STALL_ENA_SHIFT 16
+
+/* CS42L43_SPI_STATUS1 */
+#define CS42L43_SPI_ABORT_STS_MASK 0x00000002
+#define CS42L43_SPI_ABORT_STS_SHIFT 1
+#define CS42L43_SPI_DONE_STS_MASK 0x00000001
+#define CS42L43_SPI_DONE_STS_SHIFT 0
+
+/* CS42L43_SPI_STATUS2 */
+#define CS42L43_SPI_RX_DONE_STS_MASK 0x00000010
+#define CS42L43_SPI_RX_DONE_STS_SHIFT 4
+#define CS42L43_SPI_TX_DONE_STS_MASK 0x00000001
+#define CS42L43_SPI_TX_DONE_STS_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG1 */
+#define CS42L43_SPI_START_MASK 0x00000001
+#define CS42L43_SPI_START_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG2 */
+#define CS42L43_SPI_ABORT_MASK 0x00000001
+#define CS42L43_SPI_ABORT_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG3 */
+#define CS42L43_SPI_WORD_SIZE_MASK 0x00070000
+#define CS42L43_SPI_WORD_SIZE_SHIFT 16
+#define CS42L43_SPI_CMD_MASK 0x00000003
+#define CS42L43_SPI_CMD_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG4 */
+#define CS42L43_SPI_TX_LENGTH_MASK 0x0000FFFF
+#define CS42L43_SPI_TX_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG5 */
+#define CS42L43_SPI_RX_LENGTH_MASK 0x0000FFFF
+#define CS42L43_SPI_RX_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG6 */
+#define CS42L43_SPI_TX_BLOCK_LENGTH_MASK 0x0000000F
+#define CS42L43_SPI_TX_BLOCK_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG7 */
+#define CS42L43_SPI_RX_BLOCK_LENGTH_MASK 0x0000000F
+#define CS42L43_SPI_RX_BLOCK_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG8 */
+#define CS42L43_SPI_RX_DONE_MASK 0x00000010
+#define CS42L43_SPI_RX_DONE_SHIFT 4
+#define CS42L43_SPI_TX_DONE_MASK 0x00000001
+#define CS42L43_SPI_TX_DONE_SHIFT 0
+
+/* CS42L43_TRAN_STATUS1 */
+#define CS42L43_SPI_BUSY_STS_MASK 0x00000100
+#define CS42L43_SPI_BUSY_STS_SHIFT 8
+#define CS42L43_SPI_RX_REQUEST_MASK 0x00000010
+#define CS42L43_SPI_RX_REQUEST_SHIFT 4
+#define CS42L43_SPI_TX_REQUEST_MASK 0x00000001
+#define CS42L43_SPI_TX_REQUEST_SHIFT 0
+
+/* CS42L43_TRAN_STATUS2 */
+#define CS42L43_SPI_TX_BYTE_COUNT_MASK 0x0000FFFF
+#define CS42L43_SPI_TX_BYTE_COUNT_SHIFT 0
+
+/* CS42L43_TRAN_STATUS3 */
+#define CS42L43_SPI_RX_BYTE_COUNT_MASK 0x0000FFFF
+#define CS42L43_SPI_RX_BYTE_COUNT_SHIFT 0
+
+/* CS42L43_TX_DATA */
+#define CS42L43_SPI_TX_DATA_MASK 0xFFFFFFFF
+#define CS42L43_SPI_TX_DATA_SHIFT 0
+
+/* CS42L43_RX_DATA */
+#define CS42L43_SPI_RX_DATA_MASK 0xFFFFFFFF
+#define CS42L43_SPI_RX_DATA_SHIFT 0
+
+/* CS42L43_DACCNFG1 */
+#define CS42L43_HP_MSTR_VOL_CTRL_EN_MASK 0x00000008
+#define CS42L43_HP_MSTR_VOL_CTRL_EN_SHIFT 3
+#define CS42L43_AMP4_INV_MASK 0x00000002
+#define CS42L43_AMP4_INV_SHIFT 1
+#define CS42L43_AMP3_INV_MASK 0x00000001
+#define CS42L43_AMP3_INV_SHIFT 0
+
+/* CS42L43_DACCNFG2 */
+#define CS42L43_HP_AUTO_CLAMP_DISABLE_MASK 0x00000002
+#define CS42L43_HP_AUTO_CLAMP_DISABLE_SHIFT 1
+#define CS42L43_HP_HPF_EN_MASK 0x00000001
+#define CS42L43_HP_HPF_EN_SHIFT 0
+
+/* CS42L43_HPPATHVOL */
+#define CS42L43_AMP4_PATH_VOL_MASK 0x01FF0000
+#define CS42L43_AMP4_PATH_VOL_SHIFT 16
+#define CS42L43_AMP3_PATH_VOL_MASK 0x000001FF
+#define CS42L43_AMP3_PATH_VOL_SHIFT 0
+
+/* CS42L43_PGAVOL */
+#define CS42L43_HP_PATH_VOL_RAMP_MASK 0x0003C000
+#define CS42L43_HP_PATH_VOL_RAMP_SHIFT 14
+#define CS42L43_HP_PATH_VOL_ZC_MASK 0x00002000
+#define CS42L43_HP_PATH_VOL_ZC_SHIFT 13
+#define CS42L43_HP_PATH_VOL_SFT_MASK 0x00001000
+#define CS42L43_HP_PATH_VOL_SFT_SHIFT 12
+#define CS42L43_HP_DIG_VOL_RAMP_MASK 0x00000F00
+#define CS42L43_HP_DIG_VOL_RAMP_SHIFT 8
+#define CS42L43_HP_ANA_VOL_RAMP_MASK 0x0000000F
+#define CS42L43_HP_ANA_VOL_RAMP_SHIFT 0
+
+/* CS42L43_LOADDETRESULTS */
+#define CS42L43_AMP3_RES_DET_MASK 0x00000003
+#define CS42L43_AMP3_RES_DET_SHIFT 0
+
+/* CS42L43_LOADDETENA */
+#define CS42L43_HPLOAD_DET_EN_MASK 0x00000001
+#define CS42L43_HPLOAD_DET_EN_SHIFT 0
+
+/* CS42L43_CTRL */
+#define CS42L43_ADPTPWR_MODE_MASK 0x00000007
+#define CS42L43_ADPTPWR_MODE_SHIFT 0
+
+/* CS42L43_COEFF_RD_WR0 */
+#define CS42L43_WRITE_MODE_MASK 0x00000002
+#define CS42L43_WRITE_MODE_SHIFT 1
+
+/* CS42L43_INIT_DONE0 */
+#define CS42L43_INITIALIZE_DONE_MASK 0x00000001
+#define CS42L43_INITIALIZE_DONE_SHIFT 0
+
+/* CS42L43_START_EQZ0 */
+#define CS42L43_START_FILTER_MASK 0x00000001
+#define CS42L43_START_FILTER_SHIFT 0
+
+/* CS42L43_MUTE_EQ_IN0 */
+#define CS42L43_MUTE_EQ_CH2_MASK 0x00000002
+#define CS42L43_MUTE_EQ_CH2_SHIFT 1
+#define CS42L43_MUTE_EQ_CH1_MASK 0x00000001
+#define CS42L43_MUTE_EQ_CH1_SHIFT 0
+
+/* CS42L43_PLL_INT */
+#define CS42L43_PLL_LOST_LOCK_INT_MASK 0x00000002
+#define CS42L43_PLL_LOST_LOCK_INT_SHIFT 1
+#define CS42L43_PLL_READY_INT_MASK 0x00000001
+#define CS42L43_PLL_READY_INT_SHIFT 0
+
+/* CS42L43_SOFT_INT */
+#define CS42L43_CONTROL_APPLIED_INT_MASK 0x00000010
+#define CS42L43_CONTROL_APPLIED_INT_SHIFT 4
+#define CS42L43_CONTROL_WARN_INT_MASK 0x00000008
+#define CS42L43_CONTROL_WARN_INT_SHIFT 3
+#define CS42L43_PATCH_WARN_INT_MASK 0x00000002
+#define CS42L43_PATCH_WARN_INT_SHIFT 1
+#define CS42L43_PATCH_APPLIED_INT_MASK 0x00000001
+#define CS42L43_PATCH_APPLIED_INT_SHIFT 0
+
+/* CS42L43_MSM_INT */
+#define CS42L43_HP_STARTUP_DONE_INT_MASK 0x00000800
+#define CS42L43_HP_STARTUP_DONE_INT_SHIFT 11
+#define CS42L43_HP_SHUTDOWN_DONE_INT_MASK 0x00000400
+#define CS42L43_HP_SHUTDOWN_DONE_INT_SHIFT 10
+#define CS42L43_HSDET_DONE_INT_MASK 0x00000200
+#define CS42L43_HSDET_DONE_INT_SHIFT 9
+#define CS42L43_TIPSENSE_UNPLUG_DB_INT_MASK 0x00000080
+#define CS42L43_TIPSENSE_UNPLUG_DB_INT_SHIFT 7
+#define CS42L43_TIPSENSE_PLUG_DB_INT_MASK 0x00000040
+#define CS42L43_TIPSENSE_PLUG_DB_INT_SHIFT 6
+#define CS42L43_RINGSENSE_UNPLUG_DB_INT_MASK 0x00000020
+#define CS42L43_RINGSENSE_UNPLUG_DB_INT_SHIFT 5
+#define CS42L43_RINGSENSE_PLUG_DB_INT_MASK 0x00000010
+#define CS42L43_RINGSENSE_PLUG_DB_INT_SHIFT 4
+#define CS42L43_TIPSENSE_UNPLUG_PDET_INT_MASK 0x00000008
+#define CS42L43_TIPSENSE_UNPLUG_PDET_INT_SHIFT 3
+#define CS42L43_TIPSENSE_PLUG_PDET_INT_MASK 0x00000004
+#define CS42L43_TIPSENSE_PLUG_PDET_INT_SHIFT 2
+#define CS42L43_RINGSENSE_UNPLUG_PDET_INT_MASK 0x00000002
+#define CS42L43_RINGSENSE_UNPLUG_PDET_INT_SHIFT 1
+#define CS42L43_RINGSENSE_PLUG_PDET_INT_MASK 0x00000001
+#define CS42L43_RINGSENSE_PLUG_PDET_INT_SHIFT 0
+
+/* CS42L43_ACC_DET_INT */
+#define CS42L43_HS2_BIAS_SENSE_INT_MASK 0x00000800
+#define CS42L43_HS2_BIAS_SENSE_INT_SHIFT 11
+#define CS42L43_HS1_BIAS_SENSE_INT_MASK 0x00000400
+#define CS42L43_HS1_BIAS_SENSE_INT_SHIFT 10
+#define CS42L43_DC_DETECT1_FALSE_INT_MASK 0x00000080
+#define CS42L43_DC_DETECT1_FALSE_INT_SHIFT 7
+#define CS42L43_DC_DETECT1_TRUE_INT_MASK 0x00000040
+#define CS42L43_DC_DETECT1_TRUE_INT_SHIFT 6
+#define CS42L43_HSBIAS_CLAMPED_INT_MASK 0x00000008
+#define CS42L43_HSBIAS_CLAMPED_INT_SHIFT 3
+#define CS42L43_HS3_4_BIAS_SENSE_INT_MASK 0x00000001
+#define CS42L43_HS3_4_BIAS_SENSE_INT_SHIFT 0
+
+/* CS42L43_SPI_MSTR_INT */
+#define CS42L43_IRQ_SPI_STALLING_INT_MASK 0x00000004
+#define CS42L43_IRQ_SPI_STALLING_INT_SHIFT 2
+#define CS42L43_IRQ_SPI_STS_INT_MASK 0x00000002
+#define CS42L43_IRQ_SPI_STS_INT_SHIFT 1
+#define CS42L43_IRQ_SPI_BLOCK_INT_MASK 0x00000001
+#define CS42L43_IRQ_SPI_BLOCK_INT_SHIFT 0
+
+/* CS42L43_SW_TO_SPI_BRIDGE_INT */
+#define CS42L43_SW2SPI_BUF_OVF_UDF_INT_MASK 0x00000001
+#define CS42L43_SW2SPI_BUF_OVF_UDF_INT_SHIFT 0
+
+/* CS42L43_CLASS_D_AMP_INT */
+#define CS42L43_AMP2_CLK_STOP_FAULT_INT_MASK 0x00002000
+#define CS42L43_AMP2_CLK_STOP_FAULT_INT_SHIFT 13
+#define CS42L43_AMP1_CLK_STOP_FAULT_INT_MASK 0x00001000
+#define CS42L43_AMP1_CLK_STOP_FAULT_INT_SHIFT 12
+#define CS42L43_AMP2_VDDSPK_FAULT_INT_MASK 0x00000800
+#define CS42L43_AMP2_VDDSPK_FAULT_INT_SHIFT 11
+#define CS42L43_AMP1_VDDSPK_FAULT_INT_MASK 0x00000400
+#define CS42L43_AMP1_VDDSPK_FAULT_INT_SHIFT 10
+#define CS42L43_AMP2_SHUTDOWN_DONE_INT_MASK 0x00000200
+#define CS42L43_AMP2_SHUTDOWN_DONE_INT_SHIFT 9
+#define CS42L43_AMP1_SHUTDOWN_DONE_INT_MASK 0x00000100
+#define CS42L43_AMP1_SHUTDOWN_DONE_INT_SHIFT 8
+#define CS42L43_AMP2_STARTUP_DONE_INT_MASK 0x00000080
+#define CS42L43_AMP2_STARTUP_DONE_INT_SHIFT 7
+#define CS42L43_AMP1_STARTUP_DONE_INT_MASK 0x00000040
+#define CS42L43_AMP1_STARTUP_DONE_INT_SHIFT 6
+#define CS42L43_AMP2_THERM_SHDN_INT_MASK 0x00000020
+#define CS42L43_AMP2_THERM_SHDN_INT_SHIFT 5
+#define CS42L43_AMP1_THERM_SHDN_INT_MASK 0x00000010
+#define CS42L43_AMP1_THERM_SHDN_INT_SHIFT 4
+#define CS42L43_AMP2_THERM_WARN_INT_MASK 0x00000008
+#define CS42L43_AMP2_THERM_WARN_INT_SHIFT 3
+#define CS42L43_AMP1_THERM_WARN_INT_MASK 0x00000004
+#define CS42L43_AMP1_THERM_WARN_INT_SHIFT 2
+#define CS42L43_AMP2_SCDET_INT_MASK 0x00000002
+#define CS42L43_AMP2_SCDET_INT_SHIFT 1
+#define CS42L43_AMP1_SCDET_INT_MASK 0x00000001
+#define CS42L43_AMP1_SCDET_INT_SHIFT 0
+
+/* CS42L43_GPIO_INT */
+#define CS42L43_GPIO3_FALL_INT_MASK 0x00000020
+#define CS42L43_GPIO3_FALL_INT_SHIFT 5
+#define CS42L43_GPIO3_RISE_INT_MASK 0x00000010
+#define CS42L43_GPIO3_RISE_INT_SHIFT 4
+#define CS42L43_GPIO2_FALL_INT_MASK 0x00000008
+#define CS42L43_GPIO2_FALL_INT_SHIFT 3
+#define CS42L43_GPIO2_RISE_INT_MASK 0x00000004
+#define CS42L43_GPIO2_RISE_INT_SHIFT 2
+#define CS42L43_GPIO1_FALL_INT_MASK 0x00000002
+#define CS42L43_GPIO1_FALL_INT_SHIFT 1
+#define CS42L43_GPIO1_RISE_INT_MASK 0x00000001
+#define CS42L43_GPIO1_RISE_INT_SHIFT 0
+
+/* CS42L43_HPOUT_INT */
+#define CS42L43_HP_ILIMIT_INT_MASK 0x00000002
+#define CS42L43_HP_ILIMIT_INT_SHIFT 1
+#define CS42L43_HP_LOADDET_DONE_INT_MASK 0x00000001
+#define CS42L43_HP_LOADDET_DONE_INT_SHIFT 0
+
+/* CS42L43_BOOT_CONTROL */
+#define CS42L43_LOCK_HW_STS_MASK 0x00000002
+#define CS42L43_LOCK_HW_STS_SHIFT 1
+
+/* CS42L43_BLOCK_EN */
+#define CS42L43_MCU_EN_MASK 0x00000001
+#define CS42L43_MCU_EN_SHIFT 0
+
+/* CS42L43_SHUTTER_CONTROL */
+#define CS42L43_STATUS_SPK_SHUTTER_MUTE_MASK 0x00008000
+#define CS42L43_STATUS_SPK_SHUTTER_MUTE_SHIFT 15
+#define CS42L43_SPK_SHUTTER_CFG_MASK 0x00000F00
+#define CS42L43_SPK_SHUTTER_CFG_SHIFT 8
+#define CS42L43_STATUS_MIC_SHUTTER_MUTE_MASK 0x00000080
+#define CS42L43_STATUS_MIC_SHUTTER_MUTE_SHIFT 7
+#define CS42L43_MIC_SHUTTER_CFG_MASK 0x0000000F
+#define CS42L43_MIC_SHUTTER_CFG_SHIFT 0
+
+/* CS42L43_MCU_SW_REV */
+#define CS42L43_BIOS_SUBMINOR_REV_MASK 0xFF000000
+#define CS42L43_BIOS_SUBMINOR_REV_SHIFT 24
+#define CS42L43_BIOS_MINOR_REV_MASK 0x00F00000
+#define CS42L43_BIOS_MINOR_REV_SHIFT 20
+#define CS42L43_BIOS_MAJOR_REV_MASK 0x000F0000
+#define CS42L43_BIOS_MAJOR_REV_SHIFT 16
+#define CS42L43_FW_SUBMINOR_REV_MASK 0x0000FF00
+#define CS42L43_FW_SUBMINOR_REV_SHIFT 8
+#define CS42L43_FW_MINOR_REV_MASK 0x000000F0
+#define CS42L43_FW_MINOR_REV_SHIFT 4
+#define CS42L43_FW_MAJOR_REV_MASK 0x0000000F
+#define CS42L43_FW_MAJOR_REV_SHIFT 0
+
+/* CS42L43_NEED_CONFIGS */
+#define CS42L43_FW_PATCH_NEED_CFG_MASK 0x80000000
+#define CS42L43_FW_PATCH_NEED_CFG_SHIFT 31
+
+/* CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION */
+#define CS42L43_FW_MM_CTRL_MCU_SEL_MASK 0x00000001
+#define CS42L43_FW_MM_CTRL_MCU_SEL_SHIFT 0
+
+/* CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG */
+#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_DISABLE_VAL 0xF05AA50F
+
+#endif /* CS42L43_CORE_REGS_H */
diff --git a/include/linux/mfd/cs42l43.h b/include/linux/mfd/cs42l43.h
new file mode 100644
index 000000000000..cf8263aab41b
--- /dev/null
+++ b/include/linux/mfd/cs42l43.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CS42L43 core driver external data
+ *
+ * Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/workqueue.h>
+
+#ifndef CS42L43_CORE_EXT_H
+#define CS42L43_CORE_EXT_H
+
+#define CS42L43_N_SUPPLIES 3
+
+enum cs42l43_irq_numbers {
+ CS42L43_PLL_LOST_LOCK,
+ CS42L43_PLL_READY,
+
+ CS42L43_HP_STARTUP_DONE,
+ CS42L43_HP_SHUTDOWN_DONE,
+ CS42L43_HSDET_DONE,
+ CS42L43_TIPSENSE_UNPLUG_DB,
+ CS42L43_TIPSENSE_PLUG_DB,
+ CS42L43_RINGSENSE_UNPLUG_DB,
+ CS42L43_RINGSENSE_PLUG_DB,
+ CS42L43_TIPSENSE_UNPLUG_PDET,
+ CS42L43_TIPSENSE_PLUG_PDET,
+ CS42L43_RINGSENSE_UNPLUG_PDET,
+ CS42L43_RINGSENSE_PLUG_PDET,
+
+ CS42L43_HS2_BIAS_SENSE,
+ CS42L43_HS1_BIAS_SENSE,
+ CS42L43_DC_DETECT1_FALSE,
+ CS42L43_DC_DETECT1_TRUE,
+ CS42L43_HSBIAS_CLAMPED,
+ CS42L43_HS3_4_BIAS_SENSE,
+
+ CS42L43_AMP2_CLK_STOP_FAULT,
+ CS42L43_AMP1_CLK_STOP_FAULT,
+ CS42L43_AMP2_VDDSPK_FAULT,
+ CS42L43_AMP1_VDDSPK_FAULT,
+ CS42L43_AMP2_SHUTDOWN_DONE,
+ CS42L43_AMP1_SHUTDOWN_DONE,
+ CS42L43_AMP2_STARTUP_DONE,
+ CS42L43_AMP1_STARTUP_DONE,
+ CS42L43_AMP2_THERM_SHDN,
+ CS42L43_AMP1_THERM_SHDN,
+ CS42L43_AMP2_THERM_WARN,
+ CS42L43_AMP1_THERM_WARN,
+ CS42L43_AMP2_SCDET,
+ CS42L43_AMP1_SCDET,
+
+ CS42L43_GPIO3_FALL,
+ CS42L43_GPIO3_RISE,
+ CS42L43_GPIO2_FALL,
+ CS42L43_GPIO2_RISE,
+ CS42L43_GPIO1_FALL,
+ CS42L43_GPIO1_RISE,
+
+ CS42L43_HP_ILIMIT,
+ CS42L43_HP_LOADDET_DONE,
+};
+
+struct cs42l43 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct sdw_slave *sdw;
+
+ struct regulator *vdd_p;
+ struct regulator *vdd_d;
+ struct regulator_bulk_data core_supplies[CS42L43_N_SUPPLIES];
+
+ struct gpio_desc *reset;
+
+ int irq;
+ struct regmap_irq_chip irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct work_struct boot_work;
+ struct completion device_attach;
+ struct completion device_detach;
+ struct completion firmware_download;
+ int firmware_error;
+
+ unsigned int sdw_freq;
+ /* Lock to gate control of the PLL and its sources. */
+ struct mutex pll_lock;
+
+ bool sdw_pll_active;
+ bool attached;
+ bool hw_lock;
+};
+
+#endif /* CS42L43_CORE_EXT_H */
diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h
index 16f87cccc003..9185b5cd8371 100644
--- a/include/linux/mfd/tps65086.h
+++ b/include/linux/mfd/tps65086.h
@@ -13,8 +13,9 @@
#include <linux/regmap.h>
/* List of registers for TPS65086 */
-#define TPS65086_DEVICEID 0x01
-#define TPS65086_IRQ 0x02
+#define TPS65086_DEVICEID1 0x00
+#define TPS65086_DEVICEID2 0x01
+#define TPS65086_IRQ 0x02
#define TPS65086_IRQ_MASK 0x03
#define TPS65086_PMICSTAT 0x04
#define TPS65086_SHUTDNSRC 0x05
@@ -75,10 +76,16 @@
#define TPS65086_IRQ_SHUTDN_MASK BIT(3)
#define TPS65086_IRQ_FAULT_MASK BIT(7)
-/* DEVICEID Register field definitions */
-#define TPS65086_DEVICEID_PART_MASK GENMASK(3, 0)
-#define TPS65086_DEVICEID_OTP_MASK GENMASK(5, 4)
-#define TPS65086_DEVICEID_REV_MASK GENMASK(7, 6)
+/* DEVICEID1 Register field definitions */
+#define TPS6508640_ID 0x00
+#define TPS65086401_ID 0x01
+#define TPS6508641_ID 0x10
+#define TPS65086470_ID 0x70
+
+/* DEVICEID2 Register field definitions */
+#define TPS65086_DEVICEID2_PART_MASK GENMASK(3, 0)
+#define TPS65086_DEVICEID2_OTP_MASK GENMASK(5, 4)
+#define TPS65086_DEVICEID2_REV_MASK GENMASK(7, 6)
/* VID Masks */
#define BUCK_VID_MASK GENMASK(7, 1)
@@ -92,6 +99,8 @@ enum tps65086_irqs {
TPS65086_IRQ_FAULT,
};
+struct tps65086_regulator_config;
+
/**
* struct tps65086 - state holder for the tps65086 driver
*
@@ -100,6 +109,8 @@ enum tps65086_irqs {
struct tps65086 {
struct device *dev;
struct regmap *regmap;
+ unsigned int chip_id;
+ const struct tps65086_regulator_config *reg_config;
/* IRQ Data */
int irq;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index e95f10066eac..3033bbaeac81 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -813,6 +813,7 @@ struct mlx5_core_dev {
/* MACsec notifier chain to sync MACsec core and IB database */
struct blocking_notifier_head macsec_nh;
#endif
+ u64 num_ipsec_offloads;
};
struct mlx5_db {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 08dcb1f43be7..fc3db401f8a2 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -65,9 +65,11 @@ enum {
enum {
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
+ MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS = 0x1,
MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2,
MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4,
+ MLX5_SET_HCA_CAP_OP_MOD_IPSEC = 0x15,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2 = 0x20,
MLX5_SET_HCA_CAP_OP_MOD_PORT_SELECTION = 0x25,
};
@@ -3451,6 +3453,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
struct mlx5_ifc_macsec_cap_bits macsec_cap;
struct mlx5_ifc_crypto_cap_bits crypto_cap;
+ struct mlx5_ifc_ipsec_cap_bits ipsec_cap;
u8 reserved_at_0[0x8000];
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 406ab9ea818f..34f9dba17c1a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3421,15 +3421,24 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
* Indicates whether GUP can follow a PROT_NONE mapped page, or whether
* a (NUMA hinting) fault is required.
*/
-static inline bool gup_can_follow_protnone(unsigned int flags)
+static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
+ unsigned int flags)
{
/*
- * FOLL_FORCE has to be able to make progress even if the VMA is
- * inaccessible. Further, FOLL_FORCE access usually does not represent
- * application behaviour and we should avoid triggering NUMA hinting
- * faults.
+ * If callers don't want to honor NUMA hinting faults, no need to
+ * determine if we would actually have to trigger a NUMA hinting fault.
*/
- return flags & FOLL_FORCE;
+ if (!(flags & FOLL_HONOR_NUMA_FAULT))
+ return true;
+
+ /*
+ * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
+ *
+ * Requiring a fault here even for inaccessible VMAs would mean that
+ * FOLL_FORCE cannot make any progress, because handle_mm_fault()
+ * refuses to process NUMA hinting faults in inaccessible VMAs.
+ */
+ return !vma_is_accessible(vma);
}
typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5e74ce4a28cd..7d30dc4ff0ff 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1286,6 +1286,15 @@ enum {
FOLL_PCI_P2PDMA = 1 << 10,
/* allow interrupts from generic signals */
FOLL_INTERRUPTIBLE = 1 << 11,
+ /*
+ * Always honor (trigger) NUMA hinting faults.
+ *
+ * FOLL_WRITE implicitly honors NUMA hinting faults because a
+ * PROT_NONE-mapped page is not writable (exceptions with FOLL_FORCE
+ * apply). get_user_pages_fast_only() always implicitly honors NUMA
+ * hinting faults.
+ */
+ FOLL_HONOR_NUMA_FAULT = 1 << 12,
/* See also internal only FOLL flags in mm/internal.h */
};
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 461d1543893b..62a6847a3b6f 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -184,6 +184,12 @@ struct mmc_host_ops {
/* Execute HS400 tuning depending host driver */
int (*execute_hs400_tuning)(struct mmc_host *host, struct mmc_card *card);
+ /* Optional callback to prepare for SD high-speed tuning */
+ int (*prepare_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card);
+
+ /* Optional callback to execute SD high-speed tuning */
+ int (*execute_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card);
+
/* Prepare switch to DDR during the HS400 init sequence */
int (*hs400_prepare_ddr)(struct mmc_host *host);
@@ -665,6 +671,8 @@ static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
host->err_stats[stat] += 1;
}
+int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp);
+int mmc_send_status(struct mmc_card *card, u32 *status);
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 86544707236a..45702bdcbceb 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -73,9 +73,7 @@ struct raw_notifier_head {
struct srcu_notifier_head {
struct mutex mutex;
-#ifdef CONFIG_TREE_SRCU
struct srcu_usage srcuu;
-#endif
struct srcu_struct srcu;
struct notifier_block __rcu *head;
};
@@ -106,7 +104,6 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
-#ifdef CONFIG_TREE_SRCU
#define SRCU_NOTIFIER_INIT(name, pcpu) \
{ \
.mutex = __MUTEX_INITIALIZER(name.mutex), \
@@ -114,14 +111,6 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
.srcuu = __SRCU_USAGE_INIT(name.srcuu), \
.srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu), \
}
-#else
-#define SRCU_NOTIFIER_INIT(name, pcpu) \
- { \
- .mutex = __MUTEX_INITIALIZER(name.mutex), \
- .head = NULL, \
- .srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu), \
- }
-#endif
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index fee881cded01..771cb0285872 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -29,7 +29,7 @@ struct fs_struct;
* nsproxy is copied.
*/
struct nsproxy {
- atomic_t count;
+ refcount_t count;
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
@@ -102,14 +102,13 @@ int __init nsproxy_cache_init(void);
static inline void put_nsproxy(struct nsproxy *ns)
{
- if (atomic_dec_and_test(&ns->count)) {
+ if (refcount_dec_and_test(&ns->count))
free_nsproxy(ns);
- }
}
static inline void get_nsproxy(struct nsproxy *ns)
{
- atomic_inc(&ns->count);
+ refcount_inc(&ns->count);
}
#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 716953ee1ebd..d87840acbfb2 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -470,6 +470,19 @@ static inline void *detach_page_private(struct page *page)
return folio_detach_private(page_folio(page));
}
+/*
+ * There are some parts of the kernel which assume that PMD entries
+ * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
+ * limit the maximum allocation order to PMD size. I'm not aware of any
+ * assumptions about maximum order if THP are disabled, but 8 seems like
+ * a good order (that's 1MB if you're using 4kB pages)
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
+#else
+#define MAX_PAGECACHE_ORDER 8
+#endif
+
#ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
#else
@@ -501,22 +514,69 @@ pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t page_cache_prev_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
-#define FGP_ACCESSED 0x00000001
-#define FGP_LOCK 0x00000002
-#define FGP_CREAT 0x00000004
-#define FGP_WRITE 0x00000008
-#define FGP_NOFS 0x00000010
-#define FGP_NOWAIT 0x00000020
-#define FGP_FOR_MMAP 0x00000040
-#define FGP_STABLE 0x00000080
+/**
+ * typedef fgf_t - Flags for getting folios from the page cache.
+ *
+ * Most users of the page cache will not need to use these flags;
+ * there are convenience functions such as filemap_get_folio() and
+ * filemap_lock_folio(). For users which need more control over exactly
+ * what is done with the folios, these flags to __filemap_get_folio()
+ * are available.
+ *
+ * * %FGP_ACCESSED - The folio will be marked accessed.
+ * * %FGP_LOCK - The folio is returned locked.
+ * * %FGP_CREAT - If no folio is present then a new folio is allocated,
+ * added to the page cache and the VM's LRU list. The folio is
+ * returned locked.
+ * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
+ * folio is already in cache. If the folio was allocated, unlock it
+ * before returning so the caller can do the same dance.
+ * * %FGP_WRITE - The folio will be written to by the caller.
+ * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
+ * * %FGP_NOWAIT - Don't block on the folio lock.
+ * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
+ * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
+ * implementation.
+ */
+typedef unsigned int __bitwise fgf_t;
+
+#define FGP_ACCESSED ((__force fgf_t)0x00000001)
+#define FGP_LOCK ((__force fgf_t)0x00000002)
+#define FGP_CREAT ((__force fgf_t)0x00000004)
+#define FGP_WRITE ((__force fgf_t)0x00000008)
+#define FGP_NOFS ((__force fgf_t)0x00000010)
+#define FGP_NOWAIT ((__force fgf_t)0x00000020)
+#define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
+#define FGP_STABLE ((__force fgf_t)0x00000080)
+#define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */
#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
+/**
+ * fgf_set_order - Encode a length in the fgf_t flags.
+ * @size: The suggested size of the folio to create.
+ *
+ * The caller of __filemap_get_folio() can use this to suggest a preferred
+ * size for the folio that is created. If there is already a folio at
+ * the index, it will be returned, no matter what its size. If a folio
+ * is freshly created, it may be of a different size than requested
+ * due to alignment constraints, memory pressure, or the presence of
+ * other folios at nearby indices.
+ */
+static inline fgf_t fgf_set_order(size_t size)
+{
+ unsigned int shift = ilog2(size);
+
+ if (shift <= PAGE_SHIFT)
+ return 0;
+ return (__force fgf_t)((shift - PAGE_SHIFT) << 26);
+}
+
void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
- int fgp_flags, gfp_t gfp);
+ fgf_t fgp_flags, gfp_t gfp);
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
- int fgp_flags, gfp_t gfp);
+ fgf_t fgp_flags, gfp_t gfp);
/**
* filemap_get_folio - Find and get a folio.
@@ -590,7 +650,7 @@ static inline struct page *find_get_page(struct address_space *mapping,
}
static inline struct page *find_get_page_flags(struct address_space *mapping,
- pgoff_t offset, int fgp_flags)
+ pgoff_t offset, fgf_t fgp_flags)
{
return pagecache_get_page(mapping, offset, fgp_flags, 0);
}
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index 27a6df448ee5..27cd1e59ccf7 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -6,6 +6,16 @@
struct mm_walk;
+/* Locking requirement during a page walk. */
+enum page_walk_lock {
+ /* mmap_lock should be locked for read to stabilize the vma tree */
+ PGWALK_RDLOCK = 0,
+ /* vma will be write-locked during the walk */
+ PGWALK_WRLOCK = 1,
+ /* vma is expected to be already write-locked during the walk */
+ PGWALK_WRLOCK_VERIFY = 2,
+};
+
/**
* struct mm_walk_ops - callbacks for walk_page_range
* @pgd_entry: if set, called for each non-empty PGD (top-level) entry
@@ -66,6 +76,7 @@ struct mm_walk_ops {
int (*pre_vma)(unsigned long start, unsigned long end,
struct mm_walk *walk);
void (*post_vma)(struct mm_walk *walk);
+ enum page_walk_lock walk_lock;
};
/*
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2dc75df1437f..8f9a459e1671 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -576,6 +576,8 @@
#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
+#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
+#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
#define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index a0801f68762b..143fbc10ecfe 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -187,5 +187,6 @@ void armpmu_free_irq(int irq, int cpu);
#endif /* CONFIG_ARM_PMU */
#define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
+#define ARMV8_TRBE_PDEV_NAME "arm,trbe"
#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2166a69e3bf2..05253af70ce9 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -288,10 +288,9 @@ struct perf_event_pmu_context;
#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
#define PERF_PMU_CAP_EXCLUSIVE 0x0010
#define PERF_PMU_CAP_ITRACE 0x0020
-#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x0040
-#define PERF_PMU_CAP_NO_EXCLUDE 0x0080
-#define PERF_PMU_CAP_AUX_OUTPUT 0x0100
-#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0200
+#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
+#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
+#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
struct perf_output_handle;
@@ -1194,7 +1193,8 @@ struct perf_sample_data {
PERF_MEM_S(LVL, NA) |\
PERF_MEM_S(SNOOP, NA) |\
PERF_MEM_S(LOCK, NA) |\
- PERF_MEM_S(TLB, NA))
+ PERF_MEM_S(TLB, NA) |\
+ PERF_MEM_S(LVLNUM, NA))
static inline void perf_sample_data_init(struct perf_sample_data *data,
u64 addr, u64 period)
@@ -1316,15 +1316,31 @@ extern int perf_event_output(struct perf_event *event,
struct pt_regs *regs);
static inline bool
-is_default_overflow_handler(struct perf_event *event)
+__is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
{
- if (likely(event->overflow_handler == perf_event_output_forward))
+ if (likely(overflow_handler == perf_event_output_forward))
return true;
- if (unlikely(event->overflow_handler == perf_event_output_backward))
+ if (unlikely(overflow_handler == perf_event_output_backward))
return true;
return false;
}
+#define is_default_overflow_handler(event) \
+ __is_default_overflow_handler((event)->overflow_handler)
+
+#ifdef CONFIG_BPF_SYSCALL
+static inline bool uses_default_overflow_handler(struct perf_event *event)
+{
+ if (likely(is_default_overflow_handler(event)))
+ return true;
+
+ return __is_default_overflow_handler(event->orig_overflow_handler);
+}
+#else
+#define uses_default_overflow_handler(event) \
+ is_default_overflow_handler(event)
+#endif
+
extern void
perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
@@ -1860,10 +1876,6 @@ extern void arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg,
u64 now);
-#ifdef CONFIG_MMU
-extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
-#endif
-
/*
* Snapshot branch stack on software events.
*
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 02e0086b10f6..608a9eb86bff 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -269,10 +269,10 @@ bool pipe_is_unprivileged_user(void);
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots);
-long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
+long pipe_fcntl(struct file *, unsigned int, unsigned int arg);
struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice);
int create_pipe_files(struct file **, int);
-unsigned int round_pipe_size(unsigned long size);
+unsigned int round_pipe_size(unsigned int size);
#endif
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index dc1fb5890792..91f87d7e807c 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -103,7 +103,7 @@ int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *su
unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp);
-unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index);
unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp);
@@ -121,17 +121,29 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available);
+
+struct dev_pm_opp *
+dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
+ u32 index, bool available);
+
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq);
+struct dev_pm_opp *dev_pm_opp_find_freq_floor_indexed(struct device *dev,
+ unsigned long *freq, u32 index);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+ unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil_indexed(struct device *dev,
+ unsigned long *freq, u32 index);
+
struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
unsigned int level);
+
struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
unsigned int *level);
-struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
- unsigned long *freq);
-
struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
unsigned int *bw, int index);
@@ -200,7 +212,7 @@ static inline unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
return 0;
}
-static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+static inline unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index)
{
return 0;
}
@@ -247,26 +259,27 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
return 0;
}
-static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
- unsigned int level)
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq, bool available)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
- unsigned int *level)
+static inline struct dev_pm_opp *
+dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
+ u32 index, bool available)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
- unsigned long freq, bool available)
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
- unsigned long *freq)
+static inline struct dev_pm_opp *
+dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, u32 index)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -277,6 +290,24 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct dev_pm_opp *
+dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, u32 index)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
unsigned int *bw, int index)
{
@@ -631,4 +662,9 @@ static inline void dev_pm_opp_put_prop_name(int token)
dev_pm_opp_clear_config(token);
}
+static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_freq_indexed(opp, 0);
+}
+
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 9a8151a2bdea..7c9b35448563 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -85,8 +85,6 @@ extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
-extern void pm_runtime_update_max_time_suspended(struct device *dev,
- s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 77f4849e3418..6eb9adaef52b 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -194,6 +194,16 @@ static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec,
#endif /* !CONFIG_PM_SLEEP */
+static inline bool device_awake_path(struct device *dev)
+{
+ return device_wakeup_path(dev);
+}
+
+static inline void device_set_awake_path(struct device *dev)
+{
+ device_set_wakeup_path(dev);
+}
+
static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
{
return pm_wakeup_ws_event(ws, msec, false);
diff --git a/include/linux/power/power_on_reason.h b/include/linux/power/power_on_reason.h
new file mode 100644
index 000000000000..95a1ec0c403c
--- /dev/null
+++ b/include/linux/power/power_on_reason.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Author: Kamel Bouhra <kamel.bouhara@bootlin.com>
+ */
+
+#ifndef POWER_ON_REASON_H
+#define POWER_ON_REASON_H
+
+#define POWER_ON_REASON_REGULAR "regular power-up"
+#define POWER_ON_REASON_RTC "RTC wakeup"
+#define POWER_ON_REASON_WATCHDOG "watchdog timeout"
+#define POWER_ON_REASON_SOFTWARE "software reset"
+#define POWER_ON_REASON_RST_BTN "reset button action"
+#define POWER_ON_REASON_CPU_CLK_FAIL "CPU clock failure"
+#define POWER_ON_REASON_XTAL_FAIL "crystal oscillator failure"
+#define POWER_ON_REASON_BROWN_OUT "brown-out reset"
+#define POWER_ON_REASON_UNKNOWN "unknown reason"
+
+#endif /* POWER_ON_REASON_H */
diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h
index 75da8f5f7ad8..c1dc87fc536b 100644
--- a/include/linux/psp-platform-access.h
+++ b/include/linux/psp-platform-access.h
@@ -8,6 +8,10 @@
enum psp_platform_access_msg {
PSP_CMD_NONE = 0x0,
PSP_I2C_REQ_BUS_CMD = 0x64,
+ PSP_DYNAMIC_BOOST_GET_NONCE,
+ PSP_DYNAMIC_BOOST_SET_UID,
+ PSP_DYNAMIC_BOOST_GET_PARAMETER,
+ PSP_DYNAMIC_BOOST_SET_PARAMETER,
};
struct psp_req_buffer_hdr {
diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
index 6a9b177d5c41..e50416ba9cd9 100644
--- a/include/linux/raid_class.h
+++ b/include/linux/raid_class.h
@@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
struct raid_template *raid_class_attach(struct raid_function_template *);
void raid_class_release(struct raid_template *);
-
-int __must_check raid_component_add(struct raid_template *, struct device *,
- struct device *);
-
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index 7ee7ed5de722..6dbc5a1bf6a8 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -60,6 +60,32 @@ rb_insert_augmented_cached(struct rb_node *node,
rb_insert_augmented(node, &root->rb_root, augment);
}
+static __always_inline struct rb_node *
+rb_add_augmented_cached(struct rb_node *node, struct rb_root_cached *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *),
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node **link = &tree->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ bool leftmost = true;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(node, parent, link);
+ augment->propagate(parent, NULL); /* suboptimal */
+ rb_insert_augmented_cached(node, tree, leftmost, augment);
+
+ return leftmost ? node : NULL;
+}
+
/*
* Template for declaring augmented rbtree callbacks (generic case)
*
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index ba4c00dd8005..89186c499dd4 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -101,7 +101,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
{
struct hlist_nulls_node *first = h->first;
- n->next = first;
+ WRITE_ONCE(n->next, first);
WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
if (!is_a_nulls(first))
@@ -137,7 +137,7 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
last = i;
if (last) {
- n->next = last->next;
+ WRITE_ONCE(n->next, last->next);
n->pprev = &last->next;
rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
} else {
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index 9bc8cbb33340..eda493200663 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -87,6 +87,7 @@ static inline void rcu_read_unlock_trace(void)
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
void synchronize_rcu_tasks_trace(void);
void rcu_barrier_tasks_trace(void);
+struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
#else
/*
* The BPF JIT forms these addresses even when it doesn't call these
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 699b938358bf..5e0f74f2f8ca 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -42,6 +42,11 @@ do { \
* call_srcu() function, with this wrapper supplying the pointer to the
* corresponding srcu_struct.
*
+ * Note that call_rcu_hurry() should be used instead of call_rcu()
+ * because in kernels built with CONFIG_RCU_LAZY=y the delay between the
+ * invocation of call_rcu() and that of the corresponding RCU callback
+ * can be multiple seconds.
+ *
* The first argument tells Tiny RCU's _wait_rcu_gp() not to
* bother waiting for RCU. The reason for this is because anywhere
* synchronize_rcu_mult() can be called is automatically already a full
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 8fc0b3ebce44..c9182a47736e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1287,6 +1287,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
void regcache_cache_only(struct regmap *map, bool enable);
void regcache_cache_bypass(struct regmap *map, bool enable);
void regcache_mark_dirty(struct regmap *map);
+bool regcache_reg_cached(struct regmap *map, unsigned int reg);
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
const struct regmap_access_table *table);
diff --git a/include/linux/regulator/db8500-prcmu.h b/include/linux/regulator/db8500-prcmu.h
index f90df9ee703e..d58ff273157e 100644
--- a/include/linux/regulator/db8500-prcmu.h
+++ b/include/linux/regulator/db8500-prcmu.h
@@ -35,10 +35,4 @@ enum db8500_regulator_id {
DB8500_NUM_REGULATORS
};
-/*
- * Exported interface for CPUIdle only. This function is called with all
- * interrupts turned off.
- */
-int power_state_active_is_enabled(void);
-
#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index c6ef7d68eb9a..4b7eceb3828b 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -292,11 +292,12 @@ enum regulator_type {
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
* @min_dropout_uV: The minimum dropout voltage this regulator can handle
* @linear_ranges: A constant table of possible voltage ranges.
- * @linear_range_selectors: A constant table of voltage range selectors.
- * If pickable ranges are used each range must
- * have corresponding selector here.
+ * @linear_range_selectors_bitfield: A constant table of voltage range
+ * selectors as bitfield values. If
+ * pickable ranges are used each range
+ * must have corresponding selector here.
* @n_linear_ranges: Number of entries in the @linear_ranges (and in
- * linear_range_selectors if used) table(s).
+ * linear_range_selectors_bitfield if used) table(s).
* @volt_table: Voltage mapping table (if table based mapping)
* @curr_table: Current limit mapping table (if table based mapping)
*
@@ -384,7 +385,7 @@ struct regulator_desc {
int min_dropout_uV;
const struct linear_range *linear_ranges;
- const unsigned int *linear_range_selectors;
+ const unsigned int *linear_range_selectors_bitfield;
int n_linear_ranges;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 609bde814cb0..177b3f3676ef 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -75,14 +75,14 @@ struct user_event_mm;
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
*
- * We have two separate sets of flags: task->state
+ * We have two separate sets of flags: task->__state
* is about runnability, while task->exit_state are
* about the task exiting. Confusing, but this way
* modifying one set can't modify the other one by
* mistake.
*/
-/* Used in tsk->state: */
+/* Used in tsk->__state: */
#define TASK_RUNNING 0x00000000
#define TASK_INTERRUPTIBLE 0x00000001
#define TASK_UNINTERRUPTIBLE 0x00000002
@@ -92,7 +92,7 @@ struct user_event_mm;
#define EXIT_DEAD 0x00000010
#define EXIT_ZOMBIE 0x00000020
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
-/* Used in tsk->state again: */
+/* Used in tsk->__state again: */
#define TASK_PARKED 0x00000040
#define TASK_DEAD 0x00000080
#define TASK_WAKEKILL 0x00000100
@@ -173,7 +173,7 @@ struct user_event_mm;
#endif
/*
- * set_current_state() includes a barrier so that the write of current->state
+ * set_current_state() includes a barrier so that the write of current->__state
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
@@ -196,9 +196,9 @@ struct user_event_mm;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
* where wake_up_state()/try_to_wake_up() executes a full memory barrier before
- * accessing p->state.
+ * accessing p->__state.
*
- * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
+ * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
@@ -549,13 +549,18 @@ struct sched_entity {
/* For load-balancing: */
struct load_weight load;
struct rb_node run_node;
+ u64 deadline;
+ u64 min_deadline;
+
struct list_head group_node;
unsigned int on_rq;
u64 exec_start;
u64 sum_exec_runtime;
- u64 vruntime;
u64 prev_sum_exec_runtime;
+ u64 vruntime;
+ s64 vlag;
+ u64 slice;
u64 nr_migrations;
@@ -2433,9 +2438,11 @@ extern void sched_core_free(struct task_struct *tsk);
extern void sched_core_fork(struct task_struct *p);
extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
unsigned long uaddr);
+extern int sched_core_idle_cpu(int cpu);
#else
static inline void sched_core_free(struct task_struct *tsk) { }
static inline void sched_core_fork(struct task_struct *p) { }
+static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
#endif
extern void sched_set_stop_task(int cpu, struct task_struct *stop);
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index dd35ce28bb90..a23af225c898 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -118,11 +118,47 @@ static inline struct task_struct *get_task_struct(struct task_struct *t)
}
extern void __put_task_struct(struct task_struct *t);
+extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
static inline void put_task_struct(struct task_struct *t)
{
- if (refcount_dec_and_test(&t->usage))
+ if (!refcount_dec_and_test(&t->usage))
+ return;
+
+ /*
+ * In !RT, it is always safe to call __put_task_struct().
+ * Under RT, we can only call it in preemptible context.
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
+ static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP);
+
+ lock_map_acquire_try(&put_task_map);
__put_task_struct(t);
+ lock_map_release(&put_task_map);
+ return;
+ }
+
+ /*
+ * under PREEMPT_RT, we can't call put_task_struct
+ * in atomic context because it will indirectly
+ * acquire sleeping locks.
+ *
+ * call_rcu() will schedule delayed_put_task_struct_rcu()
+ * to be called in process context.
+ *
+ * __put_task_struct() is called when
+ * refcount_dec_and_test(&t->usage) succeeds.
+ *
+ * This means that it can't "conflict" with
+ * put_task_struct_rcu_user() which abuses ->rcu the same
+ * way; rcu_users has a reference so task->usage can't be
+ * zero after rcu_users 1 -> 0 transition.
+ *
+ * delayed_free_task() also uses ->rcu, but it is only called
+ * when it fails to fork a process. Therefore, there is no
+ * way it can conflict with put_task_struct().
+ */
+ call_rcu(&t->rcu, __put_task_struct_rcu_cb);
}
DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
diff --git a/include/linux/security.h b/include/linux/security.h
index 994cf099d9ac..b2c38bfe5647 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -293,6 +293,7 @@ int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
int security_bprm_check(struct linux_binprm *bprm);
void security_bprm_committing_creds(struct linux_binprm *bprm);
void security_bprm_committed_creds(struct linux_binprm *bprm);
+int security_fs_context_submount(struct fs_context *fc, struct super_block *reference);
int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc);
int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param);
int security_sb_alloc(struct super_block *sb);
@@ -629,6 +630,11 @@ static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
{
}
+static inline int security_fs_context_submount(struct fs_context *fc,
+ struct super_block *reference)
+{
+ return 0;
+}
static inline int security_fs_context_dup(struct fs_context *fc,
struct fs_context *src_fc)
{
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index bd023dd38ae6..386ab580b839 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -249,18 +249,19 @@ static inline void seq_show_option(struct seq_file *m, const char *name,
/**
* seq_show_option_n - display mount options with appropriate escapes
- * where @value must be a specific length.
+ * where @value must be a specific length (i.e.
+ * not NUL-terminated).
* @m: the seq_file handle
* @name: the mount option name
* @value: the mount option name's value, cannot be NULL
- * @length: the length of @value to display
+ * @length: the exact length of @value to display, must be constant expression
*
* This is a macro since this uses "length" to define the size of the
* stack buffer.
*/
#define seq_show_option_n(m, name, value, length) { \
char val_buf[length + 1]; \
- strncpy(val_buf, value, length); \
+ memcpy(val_buf, value, length); \
val_buf[length] = '\0'; \
seq_show_option(m, name, val_buf); \
}
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 9029abd29b1c..6b0c626620f5 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -13,6 +13,10 @@
/* inode in-kernel data */
+#ifdef CONFIG_TMPFS_QUOTA
+#define SHMEM_MAXQUOTAS 2
+#endif
+
struct shmem_inode_info {
spinlock_t lock;
unsigned int seals; /* shmem seals */
@@ -27,6 +31,10 @@ struct shmem_inode_info {
atomic_t stop_eviction; /* hold when working on inode */
struct timespec64 i_crtime; /* file creation time */
unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */
+#ifdef CONFIG_TMPFS_QUOTA
+ struct dquot *i_dquot[MAXQUOTAS];
+#endif
+ struct offset_ctx dir_offsets; /* stable entry offsets */
struct inode vfs_inode;
};
@@ -35,11 +43,18 @@ struct shmem_inode_info {
(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL)
#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL)
+struct shmem_quota_limits {
+ qsize_t usrquota_bhardlimit; /* Default user quota block hard limit */
+ qsize_t usrquota_ihardlimit; /* Default user quota inode hard limit */
+ qsize_t grpquota_bhardlimit; /* Default group quota block hard limit */
+ qsize_t grpquota_ihardlimit; /* Default group quota inode hard limit */
+};
+
struct shmem_sb_info {
unsigned long max_blocks; /* How many blocks are allowed */
struct percpu_counter used_blocks; /* How many are allocated */
unsigned long max_inodes; /* How many inodes are allowed */
- unsigned long free_inodes; /* How many are left for allocation */
+ unsigned long free_ispace; /* How much ispace left for allocation */
raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
umode_t mode; /* Mount mode for root directory */
unsigned char huge; /* Whether to try for hugepages */
@@ -53,6 +68,7 @@ struct shmem_sb_info {
spinlock_t shrinklist_lock; /* Protects shrinklist */
struct list_head shrinklist; /* List of shinkable inodes */
unsigned long shrinklist_len; /* Length of shrinklist */
+ struct shmem_quota_limits qlimits; /* Default quota limits */
};
static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
@@ -172,4 +188,17 @@ extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
#endif /* CONFIG_SHMEM */
#endif /* CONFIG_USERFAULTFD */
+/*
+ * Used space is stored as unsigned 64-bit value in bytes but
+ * quota core supports only signed 64-bit values so use that
+ * as a limit
+ */
+#define SHMEM_QUOTA_MAX_SPC_LIMIT 0x7fffffffffffffffLL /* 2^63-1 */
+#define SHMEM_QUOTA_MAX_INO_LIMIT 0x7fffffffffffffffLL
+
+#ifdef CONFIG_TMPFS_QUOTA
+extern const struct dquot_operations shmem_quota_operations;
+extern struct quota_format_type shmem_quota_format;
+#endif /* CONFIG_TMPFS_QUOTA */
+
#endif
diff --git a/include/linux/soc/qcom/geni-se.h b/include/linux/soc/qcom/geni-se.h
index 821a19135bb6..29e06905bc1f 100644
--- a/include/linux/soc/qcom/geni-se.h
+++ b/include/linux/soc/qcom/geni-se.h
@@ -35,6 +35,7 @@ enum geni_se_protocol_type {
GENI_SE_UART,
GENI_SE_I2C,
GENI_SE_I3C,
+ GENI_SE_SPI_SLAVE,
};
struct geni_wrapper;
@@ -73,12 +74,14 @@ struct geni_se {
/* Common SE registers */
#define GENI_FORCE_DEFAULT_REG 0x20
+#define GENI_OUTPUT_CTRL 0x24
#define SE_GENI_STATUS 0x40
#define GENI_SER_M_CLK_CFG 0x48
#define GENI_SER_S_CLK_CFG 0x4c
#define GENI_IF_DISABLE_RO 0x64
#define GENI_FW_REVISION_RO 0x68
#define SE_GENI_CLK_SEL 0x7c
+#define SE_GENI_CFG_SEQ_START 0x84
#define SE_GENI_DMA_MODE_EN 0x258
#define SE_GENI_M_CMD0 0x600
#define SE_GENI_M_CMD_CTRL_REG 0x604
@@ -111,6 +114,9 @@ struct geni_se {
/* GENI_FORCE_DEFAULT_REG fields */
#define FORCE_DEFAULT BIT(0)
+/* GENI_OUTPUT_CTRL fields */
+#define GENI_IO_MUX_0_EN BIT(0)
+
/* GENI_STATUS fields */
#define M_GENI_CMD_ACTIVE BIT(0)
#define S_GENI_CMD_ACTIVE BIT(12)
@@ -130,6 +136,9 @@ struct geni_se {
/* GENI_CLK_SEL fields */
#define CLK_SEL_MSK GENMASK(2, 0)
+/* SE_GENI_CFG_SEQ_START fields */
+#define START_TRIGGER BIT(0)
+
/* SE_GENI_DMA_MODE_EN */
#define GENI_DMA_MODE_EN BIT(0)
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index f523ceabd059..8923387a7405 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -6,6 +6,8 @@
#include <linux/bug.h>
#include <linux/lockdep_types.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/mod_devicetable.h>
#include <linux/bitfield.h>
@@ -370,6 +372,7 @@ struct sdw_dpn_prop {
* @clock_reg_supported: the Peripheral implements the clock base and scale
* registers introduced with the SoundWire 1.2 specification. SDCA devices
* do not need to set this boolean property as the registers are required.
+ * @use_domain_irq: call actual IRQ handler on slave, as well as callback
*/
struct sdw_slave_prop {
u32 mipi_revision;
@@ -394,6 +397,7 @@ struct sdw_slave_prop {
u8 scp_int1_mask;
u32 quirks;
bool clock_reg_supported;
+ bool use_domain_irq;
};
#define SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY BIT(0)
@@ -641,6 +645,7 @@ struct sdw_slave_ops {
* struct sdw_slave - SoundWire Slave
* @id: MIPI device ID
* @dev: Linux device
+ * @irq: IRQ number
* @status: Status reported by the Slave
* @bus: Bus handle
* @prop: Slave properties
@@ -670,6 +675,7 @@ struct sdw_slave_ops {
struct sdw_slave {
struct sdw_slave_id id;
struct device dev;
+ int irq;
enum sdw_slave_status status;
struct sdw_bus *bus;
struct sdw_slave_prop prop;
@@ -885,6 +891,7 @@ struct sdw_master_ops {
* is used to compute and program bus bandwidth, clock, frame shape,
* transport and port parameters
* @debugfs: Bus debugfs
+ * @domain: IRQ domain
* @defer_msg: Defer message
* @clk_stop_timeout: Clock stop timeout computed
* @bank_switch_timeout: Bank switch timeout computed
@@ -920,6 +927,8 @@ struct sdw_bus {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
#endif
+ struct irq_chip irq_chip;
+ struct irq_domain *domain;
struct sdw_defer defer_msg;
unsigned int clk_stop_timeout;
u32 bank_switch_timeout;
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 4658e7801b42..0916cb9bcb0a 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -19,7 +19,7 @@ struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
u8 dma_burst_size;
- bool is_slave;
+ bool is_target;
/* DMA engine specific config */
bool (*dma_filter)(struct dma_chan *chan, void *param);
@@ -31,7 +31,7 @@ struct pxa2xx_spi_controller {
};
/*
- * The controller specific data for SPI slave devices
+ * The controller specific data for SPI target devices
* (resides in spi_board_info.controller_data),
* copied to spi_device.platform_data ... mostly for
* DMA tuning.
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index dc2a0cbd210d..f950d280461b 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -3,8 +3,8 @@
#define __SPI_SH_MSIOF_H__
enum {
- MSIOF_SPI_MASTER,
- MSIOF_SPI_SLAVE,
+ MSIOF_SPI_HOST,
+ MSIOF_SPI_TARGET,
};
struct sh_msiof_spi_info {
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 32c94eae8926..7f8b478fdeb3 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -6,18 +6,19 @@
#ifndef __LINUX_SPI_H
#define __LINUX_SPI_H
+#include <linux/acpi.h>
#include <linux/bits.h>
+#include <linux/completion.h>
#include <linux/device.h>
-#include <linux/mod_devicetable.h>
-#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <linux/kthread.h>
-#include <linux/completion.h>
+#include <linux/mod_devicetable.h>
+#include <linux/overflow.h>
#include <linux/scatterlist.h>
-#include <linux/gpio/consumer.h>
+#include <linux/slab.h>
+#include <linux/u64_stats_sync.h>
#include <uapi/linux/spi/spi.h>
-#include <linux/acpi.h>
-#include <linux/u64_stats_sync.h>
struct dma_chan;
struct software_node;
@@ -36,7 +37,7 @@ extern struct bus_type spi_bus_type;
/**
* struct spi_statistics - statistics for spi transfers
- * @syncp: seqcount to protect members in this struct for per-cpu udate
+ * @syncp: seqcount to protect members in this struct for per-cpu update
* on 32-bit systems
*
* @messages: number of spi-messages handled
@@ -55,7 +56,7 @@ extern struct bus_type spi_bus_type;
* @bytes_rx: number of bytes received from device
*
* @transfer_bytes_histo:
- * transfer bytes histogramm
+ * transfer bytes histogram
*
* @transfers_split_maxsize:
* number of transfers that have been split because of
@@ -156,7 +157,7 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* the device will bind to the named driver and only the named driver.
* Do not set directly, because core frees it; use driver_set_override() to
* set or clear it.
- * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
+ * @cs_gpiod: GPIO descriptor of the chipselect line (optional, NULL when
* not using a GPIO line)
* @word_delay: delay to be inserted between consecutive
* words of a transfer
@@ -212,7 +213,7 @@ struct spi_device {
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- struct gpio_desc *cs_gpiod; /* Chip select gpio desc */
+ struct gpio_desc *cs_gpiod; /* Chip select GPIO descriptor */
struct spi_delay word_delay; /* Inter-word delay */
/* CS delays */
struct spi_delay cs_setup;
@@ -223,7 +224,7 @@ struct spi_device {
struct spi_statistics __percpu *pcpu_statistics;
/*
- * likely need more hooks for more protocol options affecting how
+ * Likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
* - memory packing (12 bit samples into low bits, others zeroed)
* - priority
@@ -299,11 +300,11 @@ static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_d
/**
* struct spi_driver - Host side "protocol" driver
* @id_table: List of SPI devices supported by this driver
- * @probe: Binds this driver to the spi device. Drivers can verify
+ * @probe: Binds this driver to the SPI device. Drivers can verify
* that the device is actually present, and may need to configure
* characteristics (such as bits_per_word) which weren't needed for
* the initial configuration done during system setup.
- * @remove: Unbinds this driver from the spi device
+ * @remove: Unbinds this driver from the SPI device
* @shutdown: Standard shutdown callback used during system state
* transitions such as powerdown/halt and kexec
* @driver: SPI device drivers should initialize the name and owner
@@ -415,7 +416,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @queued: whether this controller is providing an internal message queue
* @kworker: pointer to thread struct for message pump
* @pump_messages: work struct for scheduling work to the message pump
- * @queue_lock: spinlock to syncronise access to message queue
+ * @queue_lock: spinlock to synchronise access to message queue
* @queue: message queue
* @cur_msg: the currently in-flight message
* @cur_msg_completion: a completion for the current in-flight message
@@ -473,7 +474,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @unprepare_message: undo any work done by prepare_message().
* @slave_abort: abort the ongoing transfer request on an SPI slave controller
* @target_abort: abort the ongoing transfer request on an SPI target controller
- * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS
+ * @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS
* number. Any individual value may be NULL for CS lines that
* are not GPIOs (driven by the SPI controller itself).
* @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab
@@ -500,7 +501,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* If the driver does not set this, the SPI core takes the snapshot as
* close to the driver hand-over as possible.
* @irq_flags: Interrupt enable state during PTP system timestamping
- * @fallback: fallback to pio if dma transfer return failure with
+ * @fallback: fallback to PIO if DMA transfer return failure with
* SPI_TRANS_FAIL_NO_START.
* @queue_empty: signal green light for opportunistically skipping the queue
* for spi_sync transfers.
@@ -522,15 +523,17 @@ struct spi_controller {
struct list_head list;
- /* Other than negative (== assign one dynamically), bus_num is fully
- * board-specific. usually that simplifies to being SOC-specific.
- * example: one SOC has three SPI controllers, numbered 0..2,
- * and one board's schematics might show it using SPI-2. software
+ /*
+ * Other than negative (== assign one dynamically), bus_num is fully
+ * board-specific. Usually that simplifies to being SoC-specific.
+ * example: one SoC has three SPI controllers, numbered 0..2,
+ * and one board's schematics might show it using SPI-2. Software
* would normally use bus_num=2 for that controller.
*/
s16 bus_num;
- /* chipselects will be integral to many controllers; some others
+ /*
+ * Chipselects will be integral to many controllers; some others
* might use board-specific GPIOs.
*/
u16 num_chipselect;
@@ -562,8 +565,7 @@ struct spi_controller {
#define SPI_CONTROLLER_NO_TX BIT(2) /* Can't do buffer write */
#define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */
#define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
-
-#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+#define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select slave */
/* Flag indicating if the allocation of this struct is devres-managed */
bool devm_allocated;
@@ -576,8 +578,8 @@ struct spi_controller {
};
/*
- * on some hardware transfer / message size may be constrained
- * the limit may depend on device transfer settings
+ * On some hardware transfer / message size may be constrained
+ * the limit may depend on device transfer settings.
*/
size_t (*max_transfer_size)(struct spi_device *spi);
size_t (*max_message_size)(struct spi_device *spi);
@@ -595,7 +597,8 @@ struct spi_controller {
/* Flag indicating that the SPI bus is locked for exclusive use */
bool bus_lock_flag;
- /* Setup mode and clock, etc (spi driver may call many times).
+ /*
+ * Setup mode and clock, etc (SPI driver may call many times).
*
* IMPORTANT: this may be called when transfers to another
* device are active. DO NOT UPDATE SHARED REGISTERS in ways
@@ -613,18 +616,19 @@ struct spi_controller {
*/
int (*set_cs_timing)(struct spi_device *spi);
- /* Bidirectional bulk transfers
+ /*
+ * Bidirectional bulk transfers
*
* + The transfer() method may not sleep; its main role is
* just to add the message to the queue.
* + For now there's no remove-from-queue operation, or
* any other request management
- * + To a given spi_device, message queueing is pure fifo
+ * + To a given spi_device, message queueing is pure FIFO
*
* + The controller's main job is to process its message queue,
* selecting a chip (for masters), then transferring data
* + If there are multiple spi_device children, the i/o queue
- * arbitration algorithm is unspecified (round robin, fifo,
+ * arbitration algorithm is unspecified (round robin, FIFO,
* priority, reservations, preemption, etc)
*
* + Chipselect stays active during the entire message
@@ -705,7 +709,7 @@ struct spi_controller {
const struct spi_controller_mem_ops *mem_ops;
const struct spi_controller_mem_caps *mem_caps;
- /* gpio chip select */
+ /* GPIO chip select */
struct gpio_desc **cs_gpiods;
bool use_gpio_descriptors;
s8 unused_native_cs;
@@ -789,7 +793,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
size_t progress, bool irqs_off);
-/* The spi driver core manages memory for the spi_controller classdev */
+/* The SPI driver core manages memory for the spi_controller classdev */
extern struct spi_controller *__spi_alloc_controller(struct device *host,
unsigned int size, bool slave);
@@ -878,13 +882,13 @@ typedef void (*spi_res_release_t)(struct spi_controller *ctlr,
void *res);
/**
- * struct spi_res - spi resource management structure
+ * struct spi_res - SPI resource management structure
* @entry: list entry
* @release: release code called prior to freeing this resource
* @data: extra data allocated for the specific use-case
*
- * this is based on ideas from devres, but focused on life-cycle
- * management during spi_message processing
+ * This is based on ideas from devres, but focused on life-cycle
+ * management during spi_message processing.
*/
struct spi_res {
struct list_head entry;
@@ -902,7 +906,7 @@ struct spi_res {
*
* The spi_messages themselves consist of a series of read+write transfer
* segments. Those segments always read the same number of bits as they
- * write; but one or the other is easily ignored by passing a null buffer
+ * write; but one or the other is easily ignored by passing a NULL buffer
* pointer. (This is unlike most types of I/O API, because SPI hardware
* is full duplex.)
*
@@ -913,8 +917,8 @@ struct spi_res {
/**
* struct spi_transfer - a read/write buffer pair
- * @tx_buf: data to be written (dma-safe memory), or NULL
- * @rx_buf: data to be read (dma-safe memory), or NULL
+ * @tx_buf: data to be written (DMA-safe memory), or NULL
+ * @rx_buf: data to be read (DMA-safe memory), or NULL
* @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
* @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
* @tx_nbits: number of bits used for writing. If 0 the default
@@ -937,7 +941,7 @@ struct spi_res {
* @word_delay: inter word delay to be introduced after each word size
* (set by bits_per_word) transmission.
* @effective_speed_hz: the effective SCK-speed that was used to
- * transfer this transfer. Set to 0 if the spi bus driver does
+ * transfer this transfer. Set to 0 if the SPI bus driver does
* not support it.
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
@@ -966,16 +970,16 @@ struct spi_res {
* transmitting the "pre" word, and the "post" timestamp after receiving
* transmit confirmation from the controller for the "post" word.
* @timestamped: true if the transfer has been timestamped
- * @error: Error status logged by spi controller driver.
+ * @error: Error status logged by SPI controller driver.
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
* In some cases, they may also want to provide DMA addresses for
* the data being transferred; that may reduce overhead, when the
- * underlying driver uses dma.
+ * underlying driver uses DMA.
*
- * If the transmit buffer is null, zeroes will be shifted out
- * while filling @rx_buf. If the receive buffer is null, the data
+ * If the transmit buffer is NULL, zeroes will be shifted out
+ * while filling @rx_buf. If the receive buffer is NULL, the data
* shifted in will be discarded. Only "len" bytes shift out (or in).
* It's an error to try to shift out a partial word. (For example, by
* shifting out three bytes with word size of sixteen or twenty bits;
@@ -1009,7 +1013,7 @@ struct spi_res {
* Some devices need protocol transactions to be built from a series of
* spi_message submissions, where the content of one message is determined
* by the results of previous messages and where the whole transaction
- * ends when the chipselect goes intactive.
+ * ends when the chipselect goes inactive.
*
* When SPI can transfer in 1x,2x or 4x. It can get this transfer information
* from device through @tx_nbits and @rx_nbits. In Bi-direction, these
@@ -1023,10 +1027,11 @@ struct spi_res {
* and its transfers, ignore them until its completion callback.
*/
struct spi_transfer {
- /* It's ok if tx_buf == rx_buf (right?)
- * for MicroWire, one buffer must be null
- * buffers must work with dma_*map_single() calls, unless
- * spi_message.is_dma_mapped reports a pre-existing mapping
+ /*
+ * It's okay if tx_buf == rx_buf (right?).
+ * For MicroWire, one buffer must be NULL.
+ * Buffers must work with dma_*map_single() calls, unless
+ * spi_message.is_dma_mapped reports a pre-existing mapping.
*/
const void *tx_buf;
void *rx_buf;
@@ -1046,9 +1051,9 @@ struct spi_transfer {
unsigned tx_nbits:3;
unsigned rx_nbits:3;
unsigned timestamped:1;
-#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
-#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
-#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
+#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
+#define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */
+#define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */
u8 bits_per_word;
struct spi_delay delay;
struct spi_delay cs_change_delay;
@@ -1069,7 +1074,7 @@ struct spi_transfer {
* struct spi_message - one multi-segment SPI transaction
* @transfers: list of transfer segments in this transaction
* @spi: SPI device to which the transaction is queued
- * @is_dma_mapped: if true, the caller provided both dma and cpu virtual
+ * @is_dma_mapped: if true, the caller provided both DMA and CPU virtual
* addresses for each transfer buffer
* @complete: called to report transaction completions
* @context: the argument to complete() when it's called
@@ -1079,8 +1084,10 @@ struct spi_transfer {
* @status: zero for success, else negative errno
* @queue: for use by whichever driver currently owns the message
* @state: for use by whichever driver currently owns the message
- * @resources: for resource management when the spi message is processed
+ * @resources: for resource management when the SPI message is processed
* @prepared: spi_prepare_message was called for the this message
+ * @t: for use with spi_message_alloc() when message and transfers have
+ * been allocated together
*
* A @spi_message is used to execute an atomic sequence of data transfers,
* each represented by a struct spi_transfer. The sequence is "atomic"
@@ -1106,7 +1113,8 @@ struct spi_message {
/* spi_prepare_message() was called for this message */
bool prepared;
- /* REVISIT: we might want a flag affecting the behavior of the
+ /*
+ * REVISIT: we might want a flag affecting the behavior of the
* last transfer ... allowing things like "read 16 bit length L"
* immediately followed by "read L bytes". Basically imposing
* a specific message scheduling algorithm.
@@ -1124,15 +1132,19 @@ struct spi_message {
unsigned frame_length;
unsigned actual_length;
- /* For optional use by whatever driver currently owns the
+ /*
+ * For optional use by whatever driver currently owns the
* spi_message ... between calls to spi_async and then later
* complete(), that's the spi_controller controller driver.
*/
struct list_head queue;
void *state;
- /* List of spi_res reources when the spi message is processed */
+ /* List of spi_res resources when the SPI message is processed */
struct list_head resources;
+
+ /* For embedding transfers into the memory of the message */
+ struct spi_transfer t[];
};
static inline void spi_message_init_no_memset(struct spi_message *m)
@@ -1168,7 +1180,7 @@ spi_transfer_delay_exec(struct spi_transfer *t)
/**
* spi_message_init_with_transfers - Initialize spi_message and append transfers
* @m: spi_message to be initialized
- * @xfers: An array of spi transfers
+ * @xfers: An array of SPI transfers
* @num_xfers: Number of items in the xfer array
*
* This function initializes the given spi_message and adds each spi_transfer in
@@ -1185,24 +1197,21 @@ struct spi_transfer *xfers, unsigned int num_xfers)
spi_message_add_tail(&xfers[i], m);
}
-/* It's fine to embed message and transaction structures in other data
+/*
+ * It's fine to embed message and transaction structures in other data
* structures so long as you don't free them while they're in use.
*/
-
static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
{
struct spi_message *m;
- m = kzalloc(sizeof(struct spi_message)
- + ntrans * sizeof(struct spi_transfer),
- flags);
+ m = kzalloc(struct_size(m, t, ntrans), flags);
if (m) {
unsigned i;
- struct spi_transfer *t = (struct spi_transfer *)(m + 1);
spi_message_init_no_memset(m);
- for (i = 0; i < ntrans; i++, t++)
- spi_message_add_tail(t, m);
+ for (i = 0; i < ntrans; i++)
+ spi_message_add_tail(&m->t[i], m);
}
return m;
}
@@ -1291,7 +1300,7 @@ typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
* replacements that have occurred
* so that they can get reverted
* @release: some extra release code to get executed prior to
- * relasing this structure
+ * releasing this structure
* @extradata: pointer to some extra data if requested or NULL
* @replaced_transfers: transfers that have been replaced and which need
* to get restored
@@ -1301,9 +1310,9 @@ typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
* @inserted_transfers: array of spi_transfers of array-size @inserted,
* that have been replacing replaced_transfers
*
- * note: that @extradata will point to @inserted_transfers[@inserted]
+ * Note: that @extradata will point to @inserted_transfers[@inserted]
* if some extra allocation is requested, so alignment will be the same
- * as for spi_transfers
+ * as for spi_transfers.
*/
struct spi_replaced_transfers {
spi_replaced_release_t release;
@@ -1329,7 +1338,8 @@ extern int spi_split_transfers_maxwords(struct spi_controller *ctlr,
/*---------------------------------------------------------------------------*/
-/* All these synchronous SPI transfer routines are utilities layered
+/*
+ * All these synchronous SPI transfer routines are utilities layered
* over the core async transfer primitive. Here, "synchronous" means
* they will sleep uninterruptibly until the async transfer completes.
*/
@@ -1472,7 +1482,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
*
* Callable only from contexts that can sleep.
*
- * Return: the (unsigned) sixteen bit number returned by the device in cpu
+ * Return: the (unsigned) sixteen bit number returned by the device in CPU
* endianness, or else a negative error code.
*/
static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
@@ -1500,7 +1510,7 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* As a rule, SPI devices can't be probed. Instead, board init code
* provides a table listing the devices which are present, with enough
* information to bind and set up the device's driver. There's basic
- * support for nonstatic configurations too; enough to handle adding
+ * support for non-static configurations too; enough to handle adding
* parport adapters, or microcontrollers acting as USB-to-SPI bridges.
*/
@@ -1537,12 +1547,13 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* are active in some dynamic board configuration models.
*/
struct spi_board_info {
- /* The device name and module name are coupled, like platform_bus;
+ /*
+ * The device name and module name are coupled, like platform_bus;
* "modalias" is normally the driver name.
*
* platform_data goes to spi_device.dev.platform_data,
* controller_data goes to spi_device.controller_data,
- * irq is copied too
+ * IRQ is copied too.
*/
char modalias[SPI_NAME_SIZE];
const void *platform_data;
@@ -1554,7 +1565,8 @@ struct spi_board_info {
u32 max_speed_hz;
- /* bus_num is board specific and matches the bus_num of some
+ /*
+ * bus_num is board specific and matches the bus_num of some
* spi_controller that will probably be registered later.
*
* chip_select reflects how this chip is wired to that master;
@@ -1563,12 +1575,14 @@ struct spi_board_info {
u16 bus_num;
u16 chip_select;
- /* mode becomes spi_device.mode, and is essential for chips
+ /*
+ * mode becomes spi_device.mode, and is essential for chips
* where the default of SPI_CS_HIGH = 0 is wrong.
*/
u32 mode;
- /* ... may need additional spi_device chip config data here.
+ /*
+ * ... may need additional spi_device chip config data here.
* avoid stuff protocol drivers can set; but include stuff
* needed to behave without being bound to a driver:
* - quirks like clock rate mattering when not selected
@@ -1585,7 +1599,8 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
{ return 0; }
#endif
-/* If you're hotplugging an adapter with devices (parport, usb, etc)
+/*
+ * If you're hotplugging an adapter with devices (parport, USB, etc)
* use spi_new_device() to describe each device. You can also call
* spi_unregister_device() to start making that device vanish, but
* normally that would be handled by spi_unregister_controller().
@@ -1623,10 +1638,6 @@ spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
#define spi_master spi_controller
#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX
-#define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX
-#define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX
-#define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX
-#define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX
#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr)
#define spi_master_set_devdata(_ctlr, _data) \
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index ebd72491af99..447133171d95 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -48,6 +48,10 @@ void srcu_drive_gp(struct work_struct *wp);
#define DEFINE_STATIC_SRCU(name) \
static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name)
+// Dummy structure for srcu_notifier_head.
+struct srcu_usage { };
+#define __SRCU_USAGE_INIT(name) { }
+
void synchronize_srcu(struct srcu_struct *ssp);
/*
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index b2ccd827bb80..ce89cc3e4913 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -223,7 +223,20 @@ struct dwmac4_addrs {
struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
- int interface;
+ /* MAC ----- optional PCS ----- SerDes ----- optional PHY ----- Media
+ * ^ ^
+ * mac_interface phy_interface
+ *
+ * mac_interface is the MAC-side interface, which may be the same
+ * as phy_interface if there is no intervening PCS. If there is a
+ * PCS, then mac_interface describes the interface mode between the
+ * MAC and PCS, and phy_interface describes the interface mode
+ * between the PCS and PHY.
+ */
+ phy_interface_t mac_interface;
+ /* phy_interface is the PHY-side interface - the interface used by
+ * an attached PHY.
+ */
phy_interface_t phy_interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
struct device_node *phy_node;
diff --git a/include/linux/swait.h b/include/linux/swait.h
index 6a8c22b8c2a5..d324419482a0 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -146,7 +146,7 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
extern void swake_up_one(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
-extern void swake_up_locked(struct swait_queue_head *q);
+extern void swake_up_locked(struct swait_queue_head *q, int wake_flags);
extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 03e3d0121d5e..c0cb22cd607d 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -284,22 +284,6 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
#endif
/*
- * Called before coming back to user-mode. Returning to user-mode with an
- * address limit different than USER_DS can allow to overwrite kernel memory.
- */
-static inline void addr_limit_user_check(void)
-{
-#ifdef TIF_FSCHECK
- if (!test_thread_flag(TIF_FSCHECK))
- return;
-#endif
-
-#ifdef TIF_FSCHECK
- clear_thread_flag(TIF_FSCHECK);
-#endif
-}
-
-/*
* These syscall function prototypes are kept in the same order as
* include/uapi/asm-generic/unistd.h. Architecture specific entries go below,
* followed by deprecated or obsolete system calls.
@@ -438,8 +422,10 @@ asmlinkage long sys_chdir(const char __user *filename);
asmlinkage long sys_fchdir(unsigned int fd);
asmlinkage long sys_chroot(const char __user *filename);
asmlinkage long sys_fchmod(unsigned int fd, umode_t mode);
-asmlinkage long sys_fchmodat(int dfd, const char __user * filename,
+asmlinkage long sys_fchmodat(int dfd, const char __user *filename,
umode_t mode);
+asmlinkage long sys_fchmodat2(int dfd, const char __user *filename,
+ umode_t mode, unsigned int flags);
asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
gid_t group, int flag);
asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index dee66ade89a0..b449a46766f5 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -81,11 +81,13 @@ struct thermal_zone_device_ops {
* @temperature: temperature value in miliCelsius
* @hysteresis: relative hysteresis in miliCelsius
* @type: trip point type
+ * @priv: pointer to driver data associated with this trip
*/
struct thermal_trip {
int temperature;
int hysteresis;
enum thermal_trip_type type;
+ void *priv;
};
struct thermal_cooling_device_ops {
@@ -287,6 +289,9 @@ int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id,
const struct thermal_trip *trip);
+int for_each_thermal_trip(struct thermal_zone_device *tz,
+ int (*cb)(struct thermal_trip *, void *),
+ void *data);
int thermal_zone_get_num_trips(struct thermal_zone_device *tz);
int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp);
@@ -323,6 +328,10 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *);
void thermal_zone_device_update(struct thermal_zone_device *,
enum thermal_notify_event);
+void thermal_zone_device_exec(struct thermal_zone_device *tz,
+ void (*cb)(struct thermal_zone_device *,
+ unsigned long),
+ unsigned long data);
struct thermal_cooling_device *thermal_cooling_device_register(const char *,
void *, const struct thermal_cooling_device_ops *);
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 7038104463e4..bb466eec01e4 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -108,12 +108,15 @@ bool torture_must_stop(void);
bool torture_must_stop_irq(void);
void torture_kthread_stopping(char *title);
int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
- char *f, struct task_struct **tp);
+ char *f, struct task_struct **tp, void (*cbf)(struct task_struct *tp));
void _torture_stop_kthread(char *m, struct task_struct **tp);
#define torture_create_kthread(n, arg, tp) \
_torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
- "Failed to create " #n, &(tp))
+ "Failed to create " #n, &(tp), NULL)
+#define torture_create_kthread_cb(n, arg, tp, cbf) \
+ _torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
+ "Failed to create " #n, &(tp), cbf)
#define torture_stop_kthread(n, tp) \
_torture_stop_kthread("Stopping " #n " task", &(tp))
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 5b85cf18c350..c1a0a19d80fb 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -59,6 +59,17 @@ int trace_raw_output_prep(struct trace_iterator *iter,
extern __printf(2, 3)
void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
+/* Used to find the offset and length of dynamic fields in trace events */
+struct trace_dynamic_info {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ u16 offset;
+ u16 len;
+#else
+ u16 len;
+ u16 offset;
+#endif
+};
+
/*
* The trace entry - the most basic unit of tracing. This is what
* is printed in the end as a single line in the trace output, such as:
diff --git a/include/linux/uio.h b/include/linux/uio.h
index ff81e5ccaef2..42bce38a8e87 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -163,7 +163,7 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
return ret;
}
-size_t copy_page_from_iter_atomic(struct page *page, unsigned offset,
+size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
size_t bytes, struct iov_iter *i);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
void iov_iter_revert(struct iov_iter *i, size_t bytes);
@@ -184,6 +184,13 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
{
return copy_page_to_iter(&folio->page, offset, bytes, i);
}
+
+static inline size_t copy_folio_from_iter_atomic(struct folio *folio,
+ size_t offset, size_t bytes, struct iov_iter *i)
+{
+ return copy_page_from_iter_atomic(&folio->page, offset, bytes, i);
+}
+
size_t copy_page_to_iter_nofault(struct page *page, unsigned offset,
size_t bytes, struct iov_iter *i);
diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
index 20d88b1defc3..287e9d83fb8b 100644
--- a/include/linux/usb/r8152.h
+++ b/include/linux/usb/r8152.h
@@ -29,6 +29,7 @@
#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
#define VENDOR_ID_TPLINK 0x2357
+#define VENDOR_ID_DLINK 0x2001
#if IS_REACHABLE(CONFIG_USB_RTL8152)
extern u8 rtl8152_get_version(struct usb_interface *intf);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a0307b516b09..5ec7739400f4 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -210,6 +210,7 @@ __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
}
int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
+void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
unsigned int mode, void *key, wait_queue_entry_t *bookmark);
@@ -237,6 +238,8 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head);
#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
#define wake_up_poll(x, m) \
__wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
+#define wake_up_poll_on_current_cpu(x, m) \
+ __wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m))
#define wake_up_locked_poll(x, m) \
__wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
#define wake_up_interruptible_poll(x, m) \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fba937999fbf..083387c00f0c 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -375,11 +375,6 @@ void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end);
bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
-void folio_account_redirty(struct folio *folio);
-static inline void account_page_redirty(struct page *page)
-{
- folio_account_redirty(page_folio(page));
-}
bool folio_redirty_for_writepage(struct writeback_control *, struct folio *);
bool redirty_page_for_writepage(struct writeback_control *, struct page *);
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index d591ef59aa98..d20051865800 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -114,13 +114,15 @@ struct simple_xattr {
};
void simple_xattrs_init(struct simple_xattrs *xattrs);
-void simple_xattrs_free(struct simple_xattrs *xattrs);
+void simple_xattrs_free(struct simple_xattrs *xattrs, size_t *freed_space);
+size_t simple_xattr_space(const char *name, size_t size);
struct simple_xattr *simple_xattr_alloc(const void *value, size_t size);
+void simple_xattr_free(struct simple_xattr *xattr);
int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
void *buffer, size_t size);
-int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
- const void *value, size_t size, int flags,
- ssize_t *removed_size);
+struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs,
+ const char *name, const void *value,
+ size_t size, int flags);
ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
char *buffer, size_t size);
void simple_xattr_add(struct simple_xattrs *xattrs,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 34c50f7273d1..a9ef43b33748 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -6055,6 +6055,7 @@ void wiphy_delayed_work_flush(struct wiphy *wiphy,
* @event_lock: (private) lock for event list
* @owner_nlportid: (private) owner socket port ID
* @nl_owner_dead: (private) owner socket went away
+ * @cqm_rssi_work: (private) CQM RSSI reporting work
* @cqm_config: (private) nl80211 RSSI monitor state
* @pmsr_list: (private) peer measurement requests
* @pmsr_lock: (private) peer measurements requests/results lock
@@ -6125,7 +6126,8 @@ struct wireless_dev {
} wext;
#endif
- struct cfg80211_cqm_config *cqm_config;
+ struct wiphy_work cqm_rssi_work;
+ struct cfg80211_cqm_config __rcu *cqm_config;
struct list_head pmsr_list;
spinlock_t pmsr_lock;
@@ -7290,7 +7292,7 @@ struct cfg80211_rx_assoc_resp {
int uapsd_queues;
const u8 *ap_mld_addr;
struct {
- const u8 *addr;
+ u8 addr[ETH_ALEN] __aligned(2);
struct cfg80211_bss *bss;
u16 status;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
diff --git a/include/net/devlink.h b/include/net/devlink.h
index f7fec0791acc..29fd1b4ee654 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -1583,6 +1583,24 @@ void devlink_free(struct devlink *devlink);
* Should be used by device drivers set
* the admin state of a function managed
* by the devlink port.
+ * @port_fn_ipsec_crypto_get: Callback used to get port function's ipsec_crypto
+ * capability. Should be used by device drivers
+ * to report the current state of ipsec_crypto
+ * capability of a function managed by the devlink
+ * port.
+ * @port_fn_ipsec_crypto_set: Callback used to set port function's ipsec_crypto
+ * capability. Should be used by device drivers to
+ * enable/disable ipsec_crypto capability of a
+ * function managed by the devlink port.
+ * @port_fn_ipsec_packet_get: Callback used to get port function's ipsec_packet
+ * capability. Should be used by device drivers
+ * to report the current state of ipsec_packet
+ * capability of a function managed by the devlink
+ * port.
+ * @port_fn_ipsec_packet_set: Callback used to set port function's ipsec_packet
+ * capability. Should be used by device drivers to
+ * enable/disable ipsec_packet capability of a
+ * function managed by the devlink port.
*
* Note: Driver should return -EOPNOTSUPP if it doesn't support
* port function (@port_fn_*) handling for a particular port.
@@ -1620,6 +1638,18 @@ struct devlink_port_ops {
int (*port_fn_state_set)(struct devlink_port *port,
enum devlink_port_fn_state state,
struct netlink_ext_ack *extack);
+ int (*port_fn_ipsec_crypto_get)(struct devlink_port *devlink_port,
+ bool *is_enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_ipsec_crypto_set)(struct devlink_port *devlink_port,
+ bool enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_ipsec_packet_get)(struct devlink_port *devlink_port,
+ bool *is_enable,
+ struct netlink_ext_ack *extack);
+ int (*port_fn_ipsec_packet_set)(struct devlink_port *devlink_port,
+ bool enable,
+ struct netlink_ext_ack *extack);
};
void devlink_port_init(struct devlink *devlink,
diff --git a/include/net/tls.h b/include/net/tls.h
index 06fca9160346..a2b44578dcb7 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -51,16 +51,6 @@
struct tls_rec;
-struct tls_cipher_size_desc {
- unsigned int iv;
- unsigned int key;
- unsigned int salt;
- unsigned int tag;
- unsigned int rec_seq;
-};
-
-extern const struct tls_cipher_size_desc tls_cipher_size_desc[];
-
/* Maximum data size carried in a TLS record */
#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index a8206f5332e9..b2db2c2f1c57 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -38,7 +38,6 @@ struct find_free_extent_ctl;
__print_symbolic(type, \
{ BTRFS_TREE_BLOCK_REF_KEY, "TREE_BLOCK_REF" }, \
{ BTRFS_EXTENT_DATA_REF_KEY, "EXTENT_DATA_REF" }, \
- { BTRFS_EXTENT_REF_V0_KEY, "EXTENT_REF_V0" }, \
{ BTRFS_SHARED_BLOCK_REF_KEY, "SHARED_BLOCK_REF" }, \
{ BTRFS_SHARED_DATA_REF_KEY, "SHARED_DATA_REF" })
@@ -2482,7 +2481,7 @@ DECLARE_EVENT_CLASS(btrfs_raid56_bio,
__entry->offset, __entry->opf, __entry->physical, __entry->len)
);
-DEFINE_EVENT(btrfs_raid56_bio, raid56_read_partial,
+DEFINE_EVENT(btrfs_raid56_bio, raid56_read,
TP_PROTO(const struct btrfs_raid_bio *rbio,
const struct bio *bio,
const struct raid56_bio_trace_info *trace_info),
@@ -2490,32 +2489,7 @@ DEFINE_EVENT(btrfs_raid56_bio, raid56_read_partial,
TP_ARGS(rbio, bio, trace_info)
);
-DEFINE_EVENT(btrfs_raid56_bio, raid56_write_stripe,
- TP_PROTO(const struct btrfs_raid_bio *rbio,
- const struct bio *bio,
- const struct raid56_bio_trace_info *trace_info),
-
- TP_ARGS(rbio, bio, trace_info)
-);
-
-
-DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_write_stripe,
- TP_PROTO(const struct btrfs_raid_bio *rbio,
- const struct bio *bio,
- const struct raid56_bio_trace_info *trace_info),
-
- TP_ARGS(rbio, bio, trace_info)
-);
-
-DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read,
- TP_PROTO(const struct btrfs_raid_bio *rbio,
- const struct bio *bio,
- const struct raid56_bio_trace_info *trace_info),
-
- TP_ARGS(rbio, bio, trace_info)
-);
-
-DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read_recover,
+DEFINE_EVENT(btrfs_raid56_bio, raid56_write,
TP_PROTO(const struct btrfs_raid_bio *rbio,
const struct bio *bio,
const struct raid56_bio_trace_info *trace_info),
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index 71dbe8bfa7db..e18684b02c3d 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -80,11 +80,11 @@ TRACE_EVENT(erofs_fill_inode,
__entry->blkaddr, __entry->ofs)
);
-TRACE_EVENT(erofs_readpage,
+TRACE_EVENT(erofs_read_folio,
- TP_PROTO(struct page *page, bool raw),
+ TP_PROTO(struct folio *folio, bool raw),
- TP_ARGS(page, raw),
+ TP_ARGS(folio, raw),
TP_STRUCT__entry(
__field(dev_t, dev )
@@ -96,11 +96,11 @@ TRACE_EVENT(erofs_readpage,
),
TP_fast_assign(
- __entry->dev = page->mapping->host->i_sb->s_dev;
- __entry->nid = EROFS_I(page->mapping->host)->nid;
- __entry->dir = S_ISDIR(page->mapping->host->i_mode);
- __entry->index = page->index;
- __entry->uptodate = PageUptodate(page);
+ __entry->dev = folio->mapping->host->i_sb->s_dev;
+ __entry->nid = EROFS_I(folio->mapping->host)->nid;
+ __entry->dir = S_ISDIR(folio->mapping->host->i_mode);
+ __entry->index = folio->index;
+ __entry->uptodate = folio_test_uptodate(folio);
__entry->raw = raw;
),
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index c0248a8fa79c..e63d4a24d879 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -167,7 +167,7 @@ TRACE_EVENT(spi_message_done,
);
/*
- * consider a buffer valid if non-NULL and if it doesn't match the dummy buffer
+ * Consider a buffer valid if non-NULL and if it doesn't match the dummy buffer
* that only exist to work with controllers that have SPI_CONTROLLER_MUST_TX or
* SPI_CONTROLLER_MUST_RX.
*/
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index fd6c1cb585db..abe087c53b4b 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -820,8 +820,11 @@ __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
#define __NR_cachestat 451
__SYSCALL(__NR_cachestat, sys_cachestat)
+#define __NR_fchmodat2 452
+__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
+
#undef __NR_syscalls
-#define __NR_syscalls 452
+#define __NR_syscalls 453
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index ab38d0f411fa..fc3c32186d7e 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -220,7 +220,11 @@
#define BTRFS_EXTENT_DATA_REF_KEY 178
-#define BTRFS_EXTENT_REF_V0_KEY 180
+/*
+ * Obsolete key. Defintion removed in 6.6, value may be reused in the future.
+ *
+ * #define BTRFS_EXTENT_REF_V0_KEY 180
+ */
#define BTRFS_SHARED_BLOCK_REF_KEY 182
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 3782d4219ac9..03875e078be8 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -661,6 +661,8 @@ enum devlink_resource_unit {
enum devlink_port_fn_attr_cap {
DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT,
DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT,
+ DEVLINK_PORT_FN_ATTR_CAP_IPSEC_CRYPTO_BIT,
+ DEVLINK_PORT_FN_ATTR_CAP_IPSEC_PACKET_BIT,
/* Add new caps above */
__DEVLINK_PORT_FN_ATTR_CAPS_MAX,
@@ -669,6 +671,8 @@ enum devlink_port_fn_attr_cap {
#define DEVLINK_PORT_FN_CAP_ROCE _BITUL(DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT)
#define DEVLINK_PORT_FN_CAP_MIGRATABLE \
_BITUL(DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT)
+#define DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO _BITUL(DEVLINK_PORT_FN_ATTR_CAP_IPSEC_CRYPTO_BIT)
+#define DEVLINK_PORT_FN_CAP_IPSEC_PACKET _BITUL(DEVLINK_PORT_FN_ATTR_CAP_IPSEC_PACKET_BIT)
enum devlink_port_function_attr {
DEVLINK_PORT_FUNCTION_ATTR_UNSPEC,
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 0c8cf359ea5b..e0e159138331 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -443,7 +443,6 @@ typedef struct elf64_shdr {
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
-#define NT_RISCV_VECTOR 0x900 /* RISC-V vector registers */
#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */
#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index 8eb0d7b758d2..bb242fdcfe6b 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -100,8 +100,9 @@ enum fsconfig_command {
FSCONFIG_SET_PATH = 3, /* Set parameter, supplying an object by path */
FSCONFIG_SET_PATH_EMPTY = 4, /* Set parameter, supplying an object by (empty) path */
FSCONFIG_SET_FD = 5, /* Set parameter, supplying an object by fd */
- FSCONFIG_CMD_CREATE = 6, /* Invoke superblock creation */
+ FSCONFIG_CMD_CREATE = 6, /* Create new or reuse existing superblock */
FSCONFIG_CMD_RECONFIGURE = 7, /* Invoke superblock reconfiguration */
+ FSCONFIG_CMD_CREATE_EXCL = 8, /* Create new superblock, fail if reusing existing superblock */
};
/*
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 37675437b768..39c6a250dd1b 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -1339,7 +1339,8 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
-/* 5-0x8 available */
+/* 5-0x7 available */
+#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
diff --git a/include/uapi/linux/psp-dbc.h b/include/uapi/linux/psp-dbc.h
new file mode 100644
index 000000000000..b3845a9ff5fd
--- /dev/null
+++ b/include/uapi/linux/psp-dbc.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Userspace interface for AMD Dynamic Boost Control (DBC)
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef __PSP_DBC_USER_H__
+#define __PSP_DBC_USER_H__
+
+#include <linux/types.h>
+
+/**
+ * DOC: AMD Dynamic Boost Control (DBC) interface
+ */
+
+#define DBC_NONCE_SIZE 16
+#define DBC_SIG_SIZE 32
+#define DBC_UID_SIZE 16
+
+/**
+ * struct dbc_user_nonce - Nonce exchange structure (input/output).
+ * @auth_needed: Whether the PSP should authenticate this request (input).
+ * 0: no authentication, PSP will return single use nonce.
+ * 1: authentication: PSP will return multi-use nonce.
+ * @nonce: 8 byte value used for future authentication (output).
+ * @signature: Optional 32 byte signature created by software using a
+ * previous nonce (input).
+ */
+struct dbc_user_nonce {
+ __u32 auth_needed;
+ __u8 nonce[DBC_NONCE_SIZE];
+ __u8 signature[DBC_SIG_SIZE];
+} __packed;
+
+/**
+ * struct dbc_user_setuid - UID exchange structure (input).
+ * @uid: 16 byte value representing software identity
+ * @signature: 32 byte signature created by software using a previous nonce
+ */
+struct dbc_user_setuid {
+ __u8 uid[DBC_UID_SIZE];
+ __u8 signature[DBC_SIG_SIZE];
+} __packed;
+
+/**
+ * struct dbc_user_param - Parameter exchange structure (input/output).
+ * @msg_index: Message indicating what parameter to set or get (input)
+ * @param: 4 byte parameter, units are message specific. (input/output)
+ * @signature: 32 byte signature.
+ * - When sending a message this is to be created by software
+ * using a previous nonce (input)
+ * - For interpreting results, this signature is updated by the
+ * PSP to allow software to validate the authenticity of the
+ * results.
+ */
+struct dbc_user_param {
+ __u32 msg_index;
+ __u32 param;
+ __u8 signature[DBC_SIG_SIZE];
+} __packed;
+
+/**
+ * Dynamic Boost Control (DBC) IOC
+ *
+ * possible return codes for all DBC IOCTLs:
+ * 0: success
+ * -EINVAL: invalid input
+ * -E2BIG: excess data passed
+ * -EFAULT: failed to copy to/from userspace
+ * -EBUSY: mailbox in recovery or in use
+ * -ENODEV: driver not bound with PSP device
+ * -EACCES: request isn't authorized
+ * -EINVAL: invalid parameter
+ * -ETIMEDOUT: request timed out
+ * -EAGAIN: invalid request for state machine
+ * -ENOENT: not implemented
+ * -ENFILE: overflow
+ * -EPERM: invalid signature
+ * -EIO: unknown error
+ */
+#define DBC_IOC_TYPE 'D'
+
+/**
+ * DBCIOCNONCE - Fetch a nonce from the PSP for authenticating commands.
+ * If a nonce is fetched without authentication it can only
+ * be utilized for one command.
+ * If a nonce is fetched with authentication it can be used
+ * for multiple requests.
+ */
+#define DBCIOCNONCE _IOWR(DBC_IOC_TYPE, 0x1, struct dbc_user_nonce)
+
+/**
+ * DBCIOCUID - Set the user ID (UID) of a calling process.
+ * The user ID is 8 bytes long. It must be programmed using a
+ * 32 byte signature built using the nonce fetched from
+ * DBCIOCNONCE.
+ * The UID can only be set once until the system is rebooted.
+ */
+#define DBCIOCUID _IOW(DBC_IOC_TYPE, 0x2, struct dbc_user_setuid)
+
+/**
+ * DBCIOCPARAM - Set or get a parameter from the PSP.
+ * This request will only work after DBCIOCUID has successfully
+ * set the UID of the calling process.
+ * Whether the parameter is set or get is controlled by the
+ * message ID in the request.
+ * This command must be sent using a 32 byte signature built
+ * using the nonce fetched from DBCIOCNONCE.
+ * When the command succeeds, the 32 byte signature will be
+ * updated by the PSP for software to authenticate the results.
+ */
+#define DBCIOCPARAM _IOWR(DBC_IOC_TYPE, 0x3, struct dbc_user_param)
+
+/**
+ * enum dbc_cmd_msg - Messages utilized by DBCIOCPARAM
+ * @PARAM_GET_FMAX_CAP: Get frequency cap (MHz)
+ * @PARAM_SET_FMAX_CAP: Set frequency cap (MHz)
+ * @PARAM_GET_PWR_CAP: Get socket power cap (mW)
+ * @PARAM_SET_PWR_CAP: Set socket power cap (mW)
+ * @PARAM_GET_GFX_MODE: Get graphics mode (0/1)
+ * @PARAM_SET_GFX_MODE: Set graphics mode (0/1)
+ * @PARAM_GET_CURR_TEMP: Get current temperature (degrees C)
+ * @PARAM_GET_FMAX_MAX: Get maximum allowed value for frequency (MHz)
+ * @PARAM_GET_FMAX_MIN: Get minimum allowed value for frequency (MHz)
+ * @PARAM_GET_SOC_PWR_MAX: Get maximum allowed value for SoC power (mw)
+ * @PARAM_GET_SOC_PWR_MIN: Get minimum allowed value for SoC power (mw)
+ * @PARAM_GET_SOC_PWR_CUR: Get current value for SoC Power (mW)
+ */
+enum dbc_cmd_msg {
+ PARAM_GET_FMAX_CAP = 0x3,
+ PARAM_SET_FMAX_CAP = 0x4,
+ PARAM_GET_PWR_CAP = 0x5,
+ PARAM_SET_PWR_CAP = 0x6,
+ PARAM_GET_GFX_MODE = 0x7,
+ PARAM_SET_GFX_MODE = 0x8,
+ PARAM_GET_CURR_TEMP = 0x9,
+ PARAM_GET_FMAX_MAX = 0xA,
+ PARAM_GET_FMAX_MIN = 0xB,
+ PARAM_GET_SOC_PWR_MAX = 0xC,
+ PARAM_GET_SOC_PWR_MIN = 0xD,
+ PARAM_GET_SOC_PWR_CUR = 0xE,
+};
+
+#endif /* __PSP_DBC_USER_H__ */
diff --git a/include/uapi/linux/quota.h b/include/uapi/linux/quota.h
index f17c9636a859..52090105b828 100644
--- a/include/uapi/linux/quota.h
+++ b/include/uapi/linux/quota.h
@@ -77,6 +77,7 @@
#define QFMT_VFS_V0 2
#define QFMT_OCFS2 3
#define QFMT_VFS_V1 4
+#define QFMT_SHMEM 5
/* Size of block in which space limits are passed through the quota
* interface */
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 0fdc6ef02b94..dbfc9b37fcae 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -115,6 +115,8 @@ struct seccomp_notif_resp {
__u32 flags;
};
+#define SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP (1UL << 0)
+
/* valid flags for seccomp_notif_addfd */
#define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */
#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */
@@ -150,4 +152,6 @@ struct seccomp_notif_addfd {
#define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \
struct seccomp_notif_addfd)
+#define SECCOMP_IOCTL_NOTIF_SET_FLAGS SECCOMP_IOW(4, __u64)
+
#endif /* _UAPI_LINUX_SECCOMP_H */
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index 7837ba4fe728..7c3fc3980881 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -45,3 +45,7 @@
TYPE NAME[]; \
}
#endif
+
+#ifndef __counted_by
+#define __counted_by(m)
+#endif
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index d2029556083e..375718ba4ab6 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -98,6 +98,18 @@ struct privcmd_mmap_resource {
__u64 addr;
};
+/* For privcmd_irqfd::flags */
+#define PRIVCMD_IRQFD_FLAG_DEASSIGN (1 << 0)
+
+struct privcmd_irqfd {
+ void __user *dm_op;
+ __u32 size; /* Size of structure pointed by dm_op */
+ __u32 fd;
+ __u32 flags;
+ domid_t dom;
+ __u8 pad[2];
+};
+
/*
* @cmd: IOCTL_PRIVCMD_HYPERCALL
* @arg: &privcmd_hypercall_t
@@ -125,5 +137,7 @@ struct privcmd_mmap_resource {
_IOC(_IOC_NONE, 'P', 6, sizeof(domid_t))
#define IOCTL_PRIVCMD_MMAP_RESOURCE \
_IOC(_IOC_NONE, 'P', 7, sizeof(struct privcmd_mmap_resource))
+#define IOCTL_PRIVCMD_IRQFD \
+ _IOC(_IOC_NONE, 'P', 8, sizeof(struct privcmd_irqfd))
#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
diff --git a/include/xen/events.h b/include/xen/events.h
index 95970a2f7695..95d5e28de324 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -75,7 +75,6 @@ void evtchn_put(evtchn_port_t evtchn);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
void rebind_evtchn_irq(evtchn_port_t evtchn, int irq);
-int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
static inline void notify_remote_via_evtchn(evtchn_port_t port)
{
diff --git a/init/Kconfig b/init/Kconfig
index f7f65af4ee12..5e7d4885d1bf 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -629,6 +629,7 @@ config TASK_IO_ACCOUNTING
config PSI
bool "Pressure stall information tracking"
+ select KERNFS
help
Collect metrics that indicate how overcommitted the CPU, memory,
and IO capacity are in the system.
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 1aa015883519..5dfd30b13f48 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/ramfs.h>
#include <linux/shmem_fs.h>
+#include <linux/ktime.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_fs_sb.h>
@@ -71,12 +72,37 @@ static int __init rootwait_setup(char *str)
{
if (*str)
return 0;
- root_wait = 1;
+ root_wait = -1;
return 1;
}
__setup("rootwait", rootwait_setup);
+static int __init rootwait_timeout_setup(char *str)
+{
+ int sec;
+
+ if (kstrtoint(str, 0, &sec) || sec < 0) {
+ pr_warn("ignoring invalid rootwait value\n");
+ goto ignore;
+ }
+
+ if (check_mul_overflow(sec, MSEC_PER_SEC, &root_wait)) {
+ pr_warn("ignoring excessive rootwait value\n");
+ goto ignore;
+ }
+
+ return 1;
+
+ignore:
+ /* Fallback to indefinite wait */
+ root_wait = -1;
+
+ return 1;
+}
+
+__setup("rootwait=", rootwait_timeout_setup);
+
static char * __initdata root_mount_data;
static int __init root_data_setup(char *str)
{
@@ -384,14 +410,22 @@ void __init mount_root(char *root_device_name)
/* wait for any asynchronous scanning to complete */
static void __init wait_for_root(char *root_device_name)
{
+ ktime_t end;
+
if (ROOT_DEV != 0)
return;
pr_info("Waiting for root device %s...\n", root_device_name);
+ end = ktime_add_ms(ktime_get_raw(), root_wait);
+
while (!driver_probe_done() ||
- early_lookup_bdev(root_device_name, &ROOT_DEV) < 0)
+ early_lookup_bdev(root_device_name, &ROOT_DEV) < 0) {
msleep(5);
+ if (root_wait > 0 && ktime_after(ktime_get_raw(), end))
+ break;
+ }
+
async_synchronize_full();
}
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 1bce2208b65c..b3435033fadf 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -105,6 +105,7 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} else {
rw->kiocb.ki_ioprio = get_current_ioprio();
}
+ rw->kiocb.dio_complete = NULL;
rw->addr = READ_ONCE(sqe->addr);
rw->len = READ_ONCE(sqe->len);
@@ -220,17 +221,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
}
#endif
-static void kiocb_end_write(struct io_kiocb *req)
+static void io_req_end_write(struct io_kiocb *req)
{
- /*
- * Tell lockdep we inherited freeze protection from submission
- * thread.
- */
if (req->flags & REQ_F_ISREG) {
- struct super_block *sb = file_inode(req->file)->i_sb;
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
- __sb_writers_acquired(sb, SB_FREEZE_WRITE);
- sb_end_write(sb);
+ kiocb_end_write(&rw->kiocb);
}
}
@@ -243,7 +239,7 @@ static void io_req_io_end(struct io_kiocb *req)
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
if (rw->kiocb.ki_flags & IOCB_WRITE) {
- kiocb_end_write(req);
+ io_req_end_write(req);
fsnotify_modify(req->file);
} else {
fsnotify_access(req->file);
@@ -285,6 +281,15 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
{
+ struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
+ struct kiocb *kiocb = &rw->kiocb;
+
+ if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
+ long res = kiocb->dio_complete(rw->kiocb.private);
+
+ io_req_set_res(req, io_fixup_rw_res(req, res), 0);
+ }
+
io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
@@ -300,9 +305,11 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
struct io_kiocb *req = cmd_to_io_kiocb(rw);
- if (__io_complete_rw_common(req, res))
- return;
- io_req_set_res(req, io_fixup_rw_res(req, res), 0);
+ if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
+ if (__io_complete_rw_common(req, res))
+ return;
+ io_req_set_res(req, io_fixup_rw_res(req, res), 0);
+ }
req->io_task_work.func = io_req_rw_complete;
__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
}
@@ -313,7 +320,7 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
struct io_kiocb *req = cmd_to_io_kiocb(rw);
if (kiocb->ki_flags & IOCB_WRITE)
- kiocb_end_write(req);
+ io_req_end_write(req);
if (unlikely(res != req->cqe.res)) {
if (res == -EAGAIN && io_rw_should_reissue(req)) {
req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
@@ -902,19 +909,18 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
return ret;
}
+ if (req->flags & REQ_F_ISREG)
+ kiocb_start_write(kiocb);
+ kiocb->ki_flags |= IOCB_WRITE;
+
/*
- * Open-code file_start_write here to grab freeze protection,
- * which will be released by another thread in
- * io_complete_rw(). Fool lockdep by telling it the lock got
- * released so that it doesn't complain about the held lock when
- * we return to userspace.
+ * For non-polled IO, set IOCB_DIO_CALLER_COMP, stating that our handler
+ * groks deferring the completion to task context. This isn't
+ * necessary and useful for polled IO as that can always complete
+ * directly.
*/
- if (req->flags & REQ_F_ISREG) {
- sb_start_write(file_inode(req->file)->i_sb);
- __sb_writers_release(file_inode(req->file)->i_sb,
- SB_FREEZE_WRITE);
- }
- kiocb->ki_flags |= IOCB_WRITE;
+ if (!(kiocb->ki_flags & IOCB_HIPRI))
+ kiocb->ki_flags |= IOCB_DIO_CALLER_COMP;
if (likely(req->file->f_op->write_iter))
ret2 = call_write_iter(req->file, kiocb, &s->iter);
@@ -961,7 +967,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
io->bytes_done += ret2;
if (kiocb->ki_flags & IOCB_WRITE)
- kiocb_end_write(req);
+ io_req_end_write(req);
return ret ? ret : -EAGAIN;
}
done:
@@ -972,7 +978,7 @@ copy_iov:
ret = io_setup_async_rw(req, iovec, s, false);
if (!ret) {
if (kiocb->ki_flags & IOCB_WRITE)
- kiocb_end_write(req);
+ io_req_end_write(req);
return -EAGAIN;
}
return ret;
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 71881bddad25..ba8215ed663a 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -302,7 +302,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
if (S_ISREG(mode)) {
struct mqueue_inode_info *info;
@@ -596,7 +596,7 @@ static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
put_ipc_ns(ipc_ns);
dir->i_size += DIRENT_SIZE;
- dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ dir->i_mtime = dir->i_atime = inode_set_ctime_current(dir);
d_instantiate(dentry, inode);
dget(dentry);
@@ -618,7 +618,7 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
- dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
+ dir->i_mtime = dir->i_atime = inode_set_ctime_current(dir);
dir->i_size -= DIRENT_SIZE;
drop_nlink(inode);
dput(dentry);
@@ -635,7 +635,8 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
size_t count, loff_t *off)
{
- struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
+ struct inode *inode = file_inode(filp);
+ struct mqueue_inode_info *info = MQUEUE_I(inode);
char buffer[FILENT_SIZE];
ssize_t ret;
@@ -656,7 +657,7 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
if (ret <= 0)
return ret;
- file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
+ inode->i_atime = inode_set_ctime_current(inode);
return ret;
}
@@ -1162,8 +1163,7 @@ static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
goto out_unlock;
__do_notify(info);
}
- inode->i_atime = inode->i_mtime = inode->i_ctime =
- current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
}
out_unlock:
spin_unlock(&info->lock);
@@ -1257,8 +1257,7 @@ static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
msg_ptr = msg_get(info);
- inode->i_atime = inode->i_mtime = inode->i_ctime =
- current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
/* There is now free space in queue. */
pipelined_receive(&wake_q, info);
@@ -1396,7 +1395,7 @@ retry:
if (notification == NULL) {
if (info->notify_owner == task_tgid(current)) {
remove_notification(info);
- inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode_set_ctime_current(inode);
}
} else if (info->notify_owner != NULL) {
ret = -EBUSY;
@@ -1422,7 +1421,7 @@ retry:
info->notify_owner = get_pid(task_tgid(current));
info->notify_user_ns = get_user_ns(current_user_ns());
- inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode_set_ctime_current(inode);
}
spin_unlock(&info->lock);
out_fput:
@@ -1485,7 +1484,7 @@ static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
f.file->f_flags &= ~O_NONBLOCK;
spin_unlock(&f.file->f_lock);
- inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode_set_ctime_current(inode);
}
spin_unlock(&info->lock);
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 4174f76133df..99d0625b6c82 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -118,9 +118,8 @@ static struct inode *bpf_get_inode(struct super_block *sb,
return ERR_PTR(-ENOSPC);
inode->i_ino = get_next_ino();
- inode->i_atime = current_time(inode);
+ inode->i_atime = inode_set_ctime_current(inode);
inode->i_mtime = inode->i_atime;
- inode->i_ctime = inode->i_atime;
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
@@ -148,8 +147,7 @@ static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
d_instantiate(dentry, inode);
dget(dentry);
- dir->i_mtime = current_time(dir);
- dir->i_ctime = dir->i_mtime;
+ dir->i_mtime = inode_set_ctime_current(dir);
}
static int bpf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index f55a40db065f..5fa95f86cb4d 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3685,6 +3685,36 @@ static int cpu_stat_show(struct seq_file *seq, void *v)
return ret;
}
+static int __maybe_unused cgroup_local_stat_show(struct seq_file *seq,
+ struct cgroup *cgrp, int ssid)
+{
+ struct cgroup_subsys *ss = cgroup_subsys[ssid];
+ struct cgroup_subsys_state *css;
+ int ret;
+
+ if (!ss->css_local_stat_show)
+ return 0;
+
+ css = cgroup_tryget_css(cgrp, ss);
+ if (!css)
+ return 0;
+
+ ret = ss->css_local_stat_show(seq, css);
+ css_put(css);
+ return ret;
+}
+
+static int cpu_local_stat_show(struct seq_file *seq, void *v)
+{
+ struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup;
+ int ret = 0;
+
+#ifdef CONFIG_CGROUP_SCHED
+ ret = cgroup_local_stat_show(seq, cgrp, cpu_cgrp_id);
+#endif
+ return ret;
+}
+
#ifdef CONFIG_PSI
static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
{
@@ -5235,6 +5265,10 @@ static struct cftype cgroup_base_files[] = {
.name = "cpu.stat",
.seq_show = cpu_stat_show,
},
+ {
+ .name = "cpu.stat.local",
+ .seq_show = cpu_local_stat_show,
+ },
{ } /* terminate */
};
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 88a7ede322bd..f6811c857102 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -592,7 +592,10 @@ static void lockdep_release_cpus_lock(void)
void __weak arch_smt_update(void) { }
#ifdef CONFIG_HOTPLUG_SMT
+
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+static unsigned int cpu_smt_max_threads __ro_after_init;
+unsigned int cpu_smt_num_threads __read_mostly = UINT_MAX;
void __init cpu_smt_disable(bool force)
{
@@ -606,16 +609,33 @@ void __init cpu_smt_disable(bool force)
pr_info("SMT: disabled\n");
cpu_smt_control = CPU_SMT_DISABLED;
}
+ cpu_smt_num_threads = 1;
}
/*
* The decision whether SMT is supported can only be done after the full
* CPU identification. Called from architecture code.
*/
-void __init cpu_smt_check_topology(void)
+void __init cpu_smt_set_num_threads(unsigned int num_threads,
+ unsigned int max_threads)
{
- if (!topology_smt_supported())
+ WARN_ON(!num_threads || (num_threads > max_threads));
+
+ if (max_threads == 1)
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+
+ cpu_smt_max_threads = max_threads;
+
+ /*
+ * If SMT has been disabled via the kernel command line or SMT is
+ * not supported, set cpu_smt_num_threads to 1 for consistency.
+ * If enabled, take the architecture requested number of threads
+ * to bring up into account.
+ */
+ if (cpu_smt_control != CPU_SMT_ENABLED)
+ cpu_smt_num_threads = 1;
+ else if (num_threads < cpu_smt_num_threads)
+ cpu_smt_num_threads = num_threads;
}
static int __init smt_cmdline_disable(char *str)
@@ -625,9 +645,23 @@ static int __init smt_cmdline_disable(char *str)
}
early_param("nosmt", smt_cmdline_disable);
+/*
+ * For Archicture supporting partial SMT states check if the thread is allowed.
+ * Otherwise this has already been checked through cpu_smt_max_threads when
+ * setting the SMT level.
+ */
+static inline bool cpu_smt_thread_allowed(unsigned int cpu)
+{
+#ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC
+ return topology_smt_thread_allowed(cpu);
+#else
+ return true;
+#endif
+}
+
static inline bool cpu_smt_allowed(unsigned int cpu)
{
- if (cpu_smt_control == CPU_SMT_ENABLED)
+ if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
return true;
if (topology_is_primary_thread(cpu))
@@ -642,7 +676,7 @@ static inline bool cpu_smt_allowed(unsigned int cpu)
return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
}
-/* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
+/* Returns true if SMT is supported and not forcefully (irreversibly) disabled */
bool cpu_smt_possible(void)
{
return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
@@ -650,22 +684,8 @@ bool cpu_smt_possible(void)
}
EXPORT_SYMBOL_GPL(cpu_smt_possible);
-static inline bool cpuhp_smt_aware(void)
-{
- return topology_smt_supported();
-}
-
-static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
-{
- return cpu_primary_thread_mask;
-}
#else
static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
-static inline bool cpuhp_smt_aware(void) { return false; }
-static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
-{
- return cpu_present_mask;
-}
#endif
static inline enum cpuhp_state
@@ -1793,6 +1813,16 @@ static int __init parallel_bringup_parse_param(char *arg)
}
early_param("cpuhp.parallel", parallel_bringup_parse_param);
+static inline bool cpuhp_smt_aware(void)
+{
+ return cpu_smt_max_threads > 1;
+}
+
+static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
+{
+ return cpu_primary_thread_mask;
+}
+
/*
* On architectures which have enabled parallel bringup this invokes all BP
* prepare states for each of the to be onlined APs first. The last state
@@ -2626,6 +2656,12 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
for_each_online_cpu(cpu) {
if (topology_is_primary_thread(cpu))
continue;
+ /*
+ * Disable can be called with CPU_SMT_ENABLED when changing
+ * from a higher to lower number of SMT threads per core.
+ */
+ if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
+ continue;
ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
if (ret)
break;
@@ -2660,6 +2696,8 @@ int cpuhp_smt_enable(void)
/* Skip online CPUs and CPUs on offline nodes */
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
continue;
+ if (!cpu_smt_thread_allowed(cpu))
+ continue;
ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
if (ret)
break;
@@ -2838,20 +2876,19 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = {
#ifdef CONFIG_HOTPLUG_SMT
+static bool cpu_smt_num_threads_valid(unsigned int threads)
+{
+ if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC))
+ return threads >= 1 && threads <= cpu_smt_max_threads;
+ return threads == 1 || threads == cpu_smt_max_threads;
+}
+
static ssize_t
__store_smt_control(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int ctrlval, ret;
-
- if (sysfs_streq(buf, "on"))
- ctrlval = CPU_SMT_ENABLED;
- else if (sysfs_streq(buf, "off"))
- ctrlval = CPU_SMT_DISABLED;
- else if (sysfs_streq(buf, "forceoff"))
- ctrlval = CPU_SMT_FORCE_DISABLED;
- else
- return -EINVAL;
+ int ctrlval, ret, num_threads, orig_threads;
+ bool force_off;
if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
return -EPERM;
@@ -2859,21 +2896,39 @@ __store_smt_control(struct device *dev, struct device_attribute *attr,
if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
return -ENODEV;
+ if (sysfs_streq(buf, "on")) {
+ ctrlval = CPU_SMT_ENABLED;
+ num_threads = cpu_smt_max_threads;
+ } else if (sysfs_streq(buf, "off")) {
+ ctrlval = CPU_SMT_DISABLED;
+ num_threads = 1;
+ } else if (sysfs_streq(buf, "forceoff")) {
+ ctrlval = CPU_SMT_FORCE_DISABLED;
+ num_threads = 1;
+ } else if (kstrtoint(buf, 10, &num_threads) == 0) {
+ if (num_threads == 1)
+ ctrlval = CPU_SMT_DISABLED;
+ else if (cpu_smt_num_threads_valid(num_threads))
+ ctrlval = CPU_SMT_ENABLED;
+ else
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
- if (ctrlval != cpu_smt_control) {
- switch (ctrlval) {
- case CPU_SMT_ENABLED:
- ret = cpuhp_smt_enable();
- break;
- case CPU_SMT_DISABLED:
- case CPU_SMT_FORCE_DISABLED:
- ret = cpuhp_smt_disable(ctrlval);
- break;
- }
- }
+ orig_threads = cpu_smt_num_threads;
+ cpu_smt_num_threads = num_threads;
+
+ force_off = ctrlval != cpu_smt_control && ctrlval == CPU_SMT_FORCE_DISABLED;
+
+ if (num_threads > orig_threads)
+ ret = cpuhp_smt_enable();
+ else if (num_threads < orig_threads || force_off)
+ ret = cpuhp_smt_disable(ctrlval);
unlock_device_hotplug();
return ret ? ret : count;
@@ -2901,6 +2956,17 @@ static ssize_t control_show(struct device *dev,
{
const char *state = smt_states[cpu_smt_control];
+#ifdef CONFIG_HOTPLUG_SMT
+ /*
+ * If SMT is enabled but not all threads are enabled then show the
+ * number of threads. If all threads are enabled show "on". Otherwise
+ * show the state name.
+ */
+ if (cpu_smt_control == CPU_SMT_ENABLED &&
+ cpu_smt_num_threads != cpu_smt_max_threads)
+ return sysfs_emit(buf, "%d\n", cpu_smt_num_threads);
+#endif
+
return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
}
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index be61332c66b5..d7ee4bc3f2ba 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -205,8 +205,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
arch_exit_to_user_mode_prepare(regs, ti_work);
- /* Ensure that the address limit is intact and no locks are held */
- addr_limit_user_check();
+ /* Ensure that kernel state is sane for a return to userspace */
kmap_assert_nomap();
lockdep_assert_irqs_disabled();
lockdep_sys_exit();
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 78ae7b6f90fd..93015cb64d4e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8249,7 +8249,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
unsigned int size;
memset(comm, 0, sizeof(comm));
- strlcpy(comm, comm_event->task->comm, sizeof(comm));
+ strscpy(comm, comm_event->task->comm, sizeof(comm));
size = ALIGN(strlen(comm)+1, sizeof(u64));
comm_event->comm = comm;
@@ -8704,7 +8704,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
}
cpy_name:
- strlcpy(tmp, name, sizeof(tmp));
+ strscpy(tmp, name, sizeof(tmp));
name = tmp;
got_name:
/*
@@ -9128,7 +9128,7 @@ void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
ksym_type == PERF_RECORD_KSYMBOL_TYPE_UNKNOWN)
goto err;
- strlcpy(name, sym, KSYM_NAME_LEN);
+ strscpy(name, sym, KSYM_NAME_LEN);
name_len = strlen(name) + 1;
while (!IS_ALIGNED(name_len, sizeof(u64)))
name[name_len++] = '\0';
@@ -9595,16 +9595,16 @@ u64 perf_swevent_set_period(struct perf_event *event)
hwc->last_period = hwc->sample_period;
-again:
- old = val = local64_read(&hwc->period_left);
- if (val < 0)
- return 0;
+ old = local64_read(&hwc->period_left);
+ do {
+ val = old;
+ if (val < 0)
+ return 0;
- nr = div64_u64(period + val, period);
- offset = nr * period;
- val -= offset;
- if (local64_cmpxchg(&hwc->period_left, old, val) != old)
- goto again;
+ nr = div64_u64(period + val, period);
+ offset = nr * period;
+ val -= offset;
+ } while (!local64_try_cmpxchg(&hwc->period_left, &old, val));
return nr;
}
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index a0433f37b024..fb1e180b5f0a 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -191,9 +191,10 @@ __perf_output_begin(struct perf_output_handle *handle,
perf_output_get_handle(handle);
+ offset = local_read(&rb->head);
do {
+ head = offset;
tail = READ_ONCE(rb->user_page->data_tail);
- offset = head = local_read(&rb->head);
if (!rb->overwrite) {
if (unlikely(!ring_buffer_has_space(head, tail,
perf_data_size(rb),
@@ -217,7 +218,7 @@ __perf_output_begin(struct perf_output_handle *handle,
head += size;
else
head -= size;
- } while (local_cmpxchg(&rb->head, offset, head) != offset);
+ } while (!local_try_cmpxchg(&rb->head, &offset, head));
if (backward) {
offset = head;
diff --git a/kernel/fork.c b/kernel/fork.c
index d2e12b6d2b18..f81149739eb9 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -985,6 +985,14 @@ void __put_task_struct(struct task_struct *tsk)
}
EXPORT_SYMBOL_GPL(__put_task_struct);
+void __put_task_struct_rcu_cb(struct rcu_head *rhp)
+{
+ struct task_struct *task = container_of(rhp, struct task_struct, rcu);
+
+ __put_task_struct(task);
+}
+EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb);
+
void __init __weak arch_task_cache_init(void) { }
/*
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ee8c0acf39df..dc94e0bf2c94 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -473,11 +473,12 @@ void handle_nested_irq(unsigned int irq)
action = desc->action;
if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
- goto out_unlock;
+ raw_spin_unlock_irq(&desc->lock);
+ return;
}
kstat_incr_irqs_this_cpu(desc);
- irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+ atomic_inc(&desc->threads_active);
raw_spin_unlock_irq(&desc->lock);
action_ret = IRQ_NONE;
@@ -487,11 +488,7 @@ void handle_nested_irq(unsigned int irq)
if (!irq_settings_no_debug(desc))
note_interrupt(desc, action_ret);
- raw_spin_lock_irq(&desc->lock);
- irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
-
-out_unlock:
- raw_spin_unlock_irq(&desc->lock);
+ wake_threads_waitq(desc);
}
EXPORT_SYMBOL_GPL(handle_nested_irq);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index bdd35bb9c735..bcc7f21db9ee 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -108,8 +108,6 @@ extern int __irq_get_irqchip_state(struct irq_data *data,
enum irqchip_irq_state which,
bool *state);
-extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
-
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc);
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
irqreturn_t handle_irq_event(struct irq_desc *desc);
@@ -121,6 +119,8 @@ void irq_resend_init(struct irq_desc *desc);
bool irq_wait_for_poll(struct irq_desc *desc);
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
+void wake_threads_waitq(struct irq_desc *desc);
+
#ifdef CONFIG_PROC_FS
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index d2742af0f0fd..d309ba84e08a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -108,6 +108,16 @@ bool synchronize_hardirq(unsigned int irq)
}
EXPORT_SYMBOL(synchronize_hardirq);
+static void __synchronize_irq(struct irq_desc *desc)
+{
+ __synchronize_hardirq(desc, true);
+ /*
+ * We made sure that no hardirq handler is running. Now verify that no
+ * threaded handlers are active.
+ */
+ wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
+}
+
/**
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
* @irq: interrupt number to wait for
@@ -127,16 +137,8 @@ void synchronize_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
- if (desc) {
- __synchronize_hardirq(desc, true);
- /*
- * We made sure that no hardirq handler is
- * running. Now verify that no threaded handlers are
- * active.
- */
- wait_event(desc->wait_for_threads,
- !atomic_read(&desc->threads_active));
- }
+ if (desc)
+ __synchronize_irq(desc);
}
EXPORT_SYMBOL(synchronize_irq);
@@ -1216,7 +1218,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
return ret;
}
-static void wake_threads_waitq(struct irq_desc *desc)
+void wake_threads_waitq(struct irq_desc *desc)
{
if (atomic_dec_and_test(&desc->threads_active))
wake_up(&desc->wait_for_threads);
@@ -1944,7 +1946,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
* supports it also make sure that there is no (not yet serviced)
* interrupt in flight at the hardware level.
*/
- __synchronize_hardirq(desc, true);
+ __synchronize_irq(desc);
#ifdef CONFIG_DEBUG_SHIRQ
/*
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index edec335c0a7a..5f2c66860ac6 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -68,11 +68,16 @@ static int irq_sw_resend(struct irq_desc *desc)
*/
if (!desc->parent_irq)
return -EINVAL;
+
+ desc = irq_to_desc(desc->parent_irq);
+ if (!desc)
+ return -EINVAL;
}
/* Add to resend_list and activate the softirq: */
raw_spin_lock(&irq_resend_lock);
- hlist_add_head(&desc->resend_node, &irq_resend_list);
+ if (hlist_unhashed(&desc->resend_node))
+ hlist_add_head(&desc->resend_node, &irq_resend_list);
raw_spin_unlock(&irq_resend_lock);
tasklet_schedule(&resend_tasklet);
return 0;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 016d997131d4..18edd57b5fe8 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -163,12 +163,12 @@ unsigned long kallsyms_sym_address(int idx)
return kallsyms_relative_base - 1 - kallsyms_offsets[idx];
}
-static bool cleanup_symbol_name(char *s)
+static void cleanup_symbol_name(char *s)
{
char *res;
if (!IS_ENABLED(CONFIG_LTO_CLANG))
- return false;
+ return;
/*
* LLVM appends various suffixes for local functions and variables that
@@ -178,26 +178,21 @@ static bool cleanup_symbol_name(char *s)
* - foo.llvm.[0-9a-f]+
*/
res = strstr(s, ".llvm.");
- if (res) {
+ if (res)
*res = '\0';
- return true;
- }
- return false;
+ return;
}
static int compare_symbol_name(const char *name, char *namebuf)
{
- int ret;
-
- ret = strcmp(name, namebuf);
- if (!ret)
- return ret;
-
- if (cleanup_symbol_name(namebuf) && !strcmp(name, namebuf))
- return 0;
-
- return ret;
+ /* The kallsyms_seqs_of_names is sorted based on names after
+ * cleanup_symbol_name() (see scripts/kallsyms.c) if clang lto is enabled.
+ * To ensure correct bisection in kallsyms_lookup_names(), do
+ * cleanup_symbol_name(namebuf) before comparing name and namebuf.
+ */
+ cleanup_symbol_name(namebuf);
+ return strcmp(name, namebuf);
}
static unsigned int get_symbol_seq(int index)
diff --git a/kernel/kallsyms_selftest.c b/kernel/kallsyms_selftest.c
index a2e3745d15c4..e05ddc33a752 100644
--- a/kernel/kallsyms_selftest.c
+++ b/kernel/kallsyms_selftest.c
@@ -196,7 +196,7 @@ static bool match_cleanup_name(const char *s, const char *name)
if (!IS_ENABLED(CONFIG_LTO_CLANG))
return false;
- p = strchr(s, '.');
+ p = strstr(s, ".llvm.");
if (!p)
return false;
@@ -344,27 +344,6 @@ static int test_kallsyms_basic_function(void)
goto failed;
}
- /*
- * The first '.' may be the initial letter, in which case the
- * entire symbol name will be truncated to an empty string in
- * cleanup_symbol_name(). Do not test these symbols.
- *
- * For example:
- * cat /proc/kallsyms | awk '{print $3}' | grep -E "^\." | head
- * .E_read_words
- * .E_leading_bytes
- * .E_trailing_bytes
- * .E_write_words
- * .E_copy
- * .str.292.llvm.12122243386960820698
- * .str.24.llvm.12122243386960820698
- * .str.29.llvm.12122243386960820698
- * .str.75.llvm.12122243386960820698
- * .str.99.llvm.12122243386960820698
- */
- if (IS_ENABLED(CONFIG_LTO_CLANG) && !namebuf[0])
- continue;
-
lookup_addr = kallsyms_lookup_name(namebuf);
memset(stat, 0, sizeof(*stat));
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 949d3deae506..270c7f80ce84 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -45,6 +45,7 @@ torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
torture_param(int, rt_boost, 2,
"Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
+torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
/* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
@@ -809,7 +810,8 @@ static int lock_torture_writer(void *arg)
bool skip_main_lock;
VERBOSE_TOROUT_STRING("lock_torture_writer task started");
- set_user_nice(current, MAX_NICE);
+ if (!rt_task(current))
+ set_user_nice(current, MAX_NICE);
do {
if ((torture_random(&rand) & 0xfffff) == 0)
@@ -1015,8 +1017,7 @@ static void lock_torture_cleanup(void)
if (writer_tasks) {
for (i = 0; i < cxt.nrealwriters_stress; i++)
- torture_stop_kthread(lock_torture_writer,
- writer_tasks[i]);
+ torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
kfree(writer_tasks);
writer_tasks = NULL;
}
@@ -1244,8 +1245,9 @@ static int __init lock_torture_init(void)
goto create_reader;
/* Create writer. */
- firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
- writer_tasks[i]);
+ firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
+ writer_tasks[i],
+ writer_fifo ? sched_set_fifo : NULL);
if (torture_init_error(firsterr))
goto unwind;
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 6afc249ce697..6a0184e9c234 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -486,6 +486,16 @@ gotlock:
}
/*
+ * Include the architecture specific callee-save thunk of the
+ * __pv_queued_spin_unlock(). This thunk is put together with
+ * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
+ * function close to each other sharing consecutive instruction cachelines.
+ * Alternatively, architecture specific version of __pv_queued_spin_unlock()
+ * can be defined.
+ */
+#include <asm/qspinlock_paravirt.h>
+
+/*
* PV versions of the unlock fastpath and slowpath functions to be used
* instead of queued_spin_unlock().
*/
@@ -533,16 +543,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
pv_kick(node->cpu);
}
-/*
- * Include the architecture specific callee-save thunk of the
- * __pv_queued_spin_unlock(). This thunk is put together with
- * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
- * function close to each other sharing consecutive instruction cachelines.
- * Alternatively, architecture specific version of __pv_queued_spin_unlock()
- * can be defined.
- */
-#include <asm/qspinlock_paravirt.h>
-
#ifndef __pv_queued_spin_unlock
__visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock)
{
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 80d9c6d77a45..15781acaac1c 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -30,7 +30,7 @@
static struct kmem_cache *nsproxy_cachep;
struct nsproxy init_nsproxy = {
- .count = ATOMIC_INIT(1),
+ .count = REFCOUNT_INIT(1),
.uts_ns = &init_uts_ns,
#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
.ipc_ns = &init_ipc_ns,
@@ -55,7 +55,7 @@ static inline struct nsproxy *create_nsproxy(void)
nsproxy = kmem_cache_alloc(nsproxy_cachep, GFP_KERNEL);
if (nsproxy)
- atomic_set(&nsproxy->count, 1);
+ refcount_set(&nsproxy->count, 1);
return nsproxy;
}
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 782d3b41c1f3..4244b069442e 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -220,6 +220,11 @@ static struct pm_qos_constraints cpu_latency_constraints = {
.type = PM_QOS_MIN,
};
+static inline bool cpu_latency_qos_value_invalid(s32 value)
+{
+ return value < 0 && value != PM_QOS_DEFAULT_VALUE;
+}
+
/**
* cpu_latency_qos_limit - Return current system-wide CPU latency QoS limit.
*/
@@ -263,7 +268,7 @@ static void cpu_latency_qos_apply(struct pm_qos_request *req,
*/
void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value)
{
- if (!req)
+ if (!req || cpu_latency_qos_value_invalid(value))
return;
if (cpu_latency_qos_request_active(req)) {
@@ -289,7 +294,7 @@ EXPORT_SYMBOL_GPL(cpu_latency_qos_add_request);
*/
void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value)
{
- if (!req)
+ if (!req || cpu_latency_qos_value_invalid(new_value))
return;
if (!cpu_latency_qos_request_active(req)) {
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 0415d5ecb977..87e9f7e2bdc0 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -404,6 +404,7 @@ struct bm_position {
struct mem_zone_bm_rtree *zone;
struct rtree_node *node;
unsigned long node_pfn;
+ unsigned long cur_pfn;
int node_bit;
};
@@ -589,6 +590,7 @@ static void memory_bm_position_reset(struct memory_bitmap *bm)
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
struct rtree_node, list);
bm->cur.node_pfn = 0;
+ bm->cur.cur_pfn = BM_END_OF_MAP;
bm->cur.node_bit = 0;
}
@@ -799,6 +801,7 @@ node_found:
bm->cur.zone = zone;
bm->cur.node = node;
bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
+ bm->cur.cur_pfn = pfn;
/* Set return values */
*addr = node->data;
@@ -850,6 +853,11 @@ static void memory_bm_clear_current(struct memory_bitmap *bm)
clear_bit(bit, bm->cur.node->data);
}
+static unsigned long memory_bm_get_current(struct memory_bitmap *bm)
+{
+ return bm->cur.cur_pfn;
+}
+
static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
@@ -929,10 +937,12 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
if (bit < bits) {
pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
bm->cur.node_bit = bit + 1;
+ bm->cur.cur_pfn = pfn;
return pfn;
}
} while (rtree_next_node(bm));
+ bm->cur.cur_pfn = BM_END_OF_MAP;
return BM_END_OF_MAP;
}
@@ -1423,14 +1433,19 @@ static unsigned int count_data_pages(void)
/*
* This is needed, because copy_page and memcpy are not usable for copying
- * task structs.
+ * task structs. Returns true if the page was filled with only zeros,
+ * otherwise false.
*/
-static inline void do_copy_page(long *dst, long *src)
+static inline bool do_copy_page(long *dst, long *src)
{
+ long z = 0;
int n;
- for (n = PAGE_SIZE / sizeof(long); n; n--)
+ for (n = PAGE_SIZE / sizeof(long); n; n--) {
+ z |= *src;
*dst++ = *src++;
+ }
+ return !z;
}
/**
@@ -1439,17 +1454,21 @@ static inline void do_copy_page(long *dst, long *src)
* Check if the page we are going to copy is marked as present in the kernel
* page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
* CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
- * always returns 'true'.
+ * always returns 'true'. Returns true if the page was entirely composed of
+ * zeros, otherwise it will return false.
*/
-static void safe_copy_page(void *dst, struct page *s_page)
+static bool safe_copy_page(void *dst, struct page *s_page)
{
+ bool zeros_only;
+
if (kernel_page_present(s_page)) {
- do_copy_page(dst, page_address(s_page));
+ zeros_only = do_copy_page(dst, page_address(s_page));
} else {
hibernate_map_page(s_page);
- do_copy_page(dst, page_address(s_page));
+ zeros_only = do_copy_page(dst, page_address(s_page));
hibernate_unmap_page(s_page);
}
+ return zeros_only;
}
#ifdef CONFIG_HIGHMEM
@@ -1459,17 +1478,18 @@ static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn
saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
}
-static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
+static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
{
struct page *s_page, *d_page;
void *src, *dst;
+ bool zeros_only;
s_page = pfn_to_page(src_pfn);
d_page = pfn_to_page(dst_pfn);
if (PageHighMem(s_page)) {
src = kmap_atomic(s_page);
dst = kmap_atomic(d_page);
- do_copy_page(dst, src);
+ zeros_only = do_copy_page(dst, src);
kunmap_atomic(dst);
kunmap_atomic(src);
} else {
@@ -1478,30 +1498,39 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
* The page pointed to by src may contain some kernel
* data modified by kmap_atomic()
*/
- safe_copy_page(buffer, s_page);
+ zeros_only = safe_copy_page(buffer, s_page);
dst = kmap_atomic(d_page);
copy_page(dst, buffer);
kunmap_atomic(dst);
} else {
- safe_copy_page(page_address(d_page), s_page);
+ zeros_only = safe_copy_page(page_address(d_page), s_page);
}
}
+ return zeros_only;
}
#else
#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
-static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
+static inline int copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
{
- safe_copy_page(page_address(pfn_to_page(dst_pfn)),
+ return safe_copy_page(page_address(pfn_to_page(dst_pfn)),
pfn_to_page(src_pfn));
}
#endif /* CONFIG_HIGHMEM */
-static void copy_data_pages(struct memory_bitmap *copy_bm,
- struct memory_bitmap *orig_bm)
+/*
+ * Copy data pages will copy all pages into pages pulled from the copy_bm.
+ * If a page was entirely filled with zeros it will be marked in the zero_bm.
+ *
+ * Returns the number of pages copied.
+ */
+static unsigned long copy_data_pages(struct memory_bitmap *copy_bm,
+ struct memory_bitmap *orig_bm,
+ struct memory_bitmap *zero_bm)
{
+ unsigned long copied_pages = 0;
struct zone *zone;
- unsigned long pfn;
+ unsigned long pfn, copy_pfn;
for_each_populated_zone(zone) {
unsigned long max_zone_pfn;
@@ -1514,18 +1543,29 @@ static void copy_data_pages(struct memory_bitmap *copy_bm,
}
memory_bm_position_reset(orig_bm);
memory_bm_position_reset(copy_bm);
+ copy_pfn = memory_bm_next_pfn(copy_bm);
for(;;) {
pfn = memory_bm_next_pfn(orig_bm);
if (unlikely(pfn == BM_END_OF_MAP))
break;
- copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
+ if (copy_data_page(copy_pfn, pfn)) {
+ memory_bm_set_bit(zero_bm, pfn);
+ /* Use this copy_pfn for a page that is not full of zeros */
+ continue;
+ }
+ copied_pages++;
+ copy_pfn = memory_bm_next_pfn(copy_bm);
}
+ return copied_pages;
}
/* Total number of image pages */
static unsigned int nr_copy_pages;
/* Number of pages needed for saving the original pfns of the image pages */
static unsigned int nr_meta_pages;
+/* Number of zero pages */
+static unsigned int nr_zero_pages;
+
/*
* Numbers of normal and highmem page frames allocated for hibernation image
* before suspending devices.
@@ -1546,6 +1586,9 @@ static struct memory_bitmap orig_bm;
*/
static struct memory_bitmap copy_bm;
+/* Memory bitmap which tracks which saveable pages were zero filled. */
+static struct memory_bitmap zero_bm;
+
/**
* swsusp_free - Free pages allocated for hibernation image.
*
@@ -1590,6 +1633,7 @@ loop:
out:
nr_copy_pages = 0;
nr_meta_pages = 0;
+ nr_zero_pages = 0;
restore_pblist = NULL;
buffer = NULL;
alloc_normal = 0;
@@ -1808,8 +1852,15 @@ int hibernate_preallocate_memory(void)
goto err_out;
}
+ error = memory_bm_create(&zero_bm, GFP_IMAGE, PG_ANY);
+ if (error) {
+ pr_err("Cannot allocate zero bitmap\n");
+ goto err_out;
+ }
+
alloc_normal = 0;
alloc_highmem = 0;
+ nr_zero_pages = 0;
/* Count the number of saveable data pages. */
save_highmem = count_highmem_pages();
@@ -2089,19 +2140,19 @@ asmlinkage __visible int swsusp_save(void)
* Kill them.
*/
drain_local_pages(NULL);
- copy_data_pages(&copy_bm, &orig_bm);
+ nr_copy_pages = copy_data_pages(&copy_bm, &orig_bm, &zero_bm);
/*
* End of critical section. From now on, we can write to memory,
* but we should not touch disk. This specially means we must _not_
* touch swap space! Except we must write out our image of course.
*/
-
nr_pages += nr_highmem;
- nr_copy_pages = nr_pages;
+ /* We don't actually copy the zero pages */
+ nr_zero_pages = nr_pages - nr_copy_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
- pr_info("Image created (%d pages copied)\n", nr_pages);
+ pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
return 0;
}
@@ -2146,15 +2197,22 @@ static int init_header(struct swsusp_info *info)
return init_header_complete(info);
}
+#define ENCODED_PFN_ZERO_FLAG ((unsigned long)1 << (BITS_PER_LONG - 1))
+#define ENCODED_PFN_MASK (~ENCODED_PFN_ZERO_FLAG)
+
/**
* pack_pfns - Prepare PFNs for saving.
* @bm: Memory bitmap.
* @buf: Memory buffer to store the PFNs in.
+ * @zero_bm: Memory bitmap containing PFNs of zero pages.
*
* PFNs corresponding to set bits in @bm are stored in the area of memory
- * pointed to by @buf (1 page at a time).
+ * pointed to by @buf (1 page at a time). Pages which were filled with only
+ * zeros will have the highest bit set in the packed format to distinguish
+ * them from PFNs which will be contained in the image file.
*/
-static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
+static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm,
+ struct memory_bitmap *zero_bm)
{
int j;
@@ -2162,6 +2220,8 @@ static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
buf[j] = memory_bm_next_pfn(bm);
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
+ if (memory_bm_test_bit(zero_bm, buf[j]))
+ buf[j] |= ENCODED_PFN_ZERO_FLAG;
}
}
@@ -2203,7 +2263,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
memory_bm_position_reset(&copy_bm);
} else if (handle->cur <= nr_meta_pages) {
clear_page(buffer);
- pack_pfns(buffer, &orig_bm);
+ pack_pfns(buffer, &orig_bm, &zero_bm);
} else {
struct page *page;
@@ -2299,24 +2359,35 @@ static int load_header(struct swsusp_info *info)
* unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
* @bm: Memory bitmap.
* @buf: Area of memory containing the PFNs.
+ * @zero_bm: Memory bitmap with the zero PFNs marked.
*
* For each element of the array pointed to by @buf (1 page at a time), set the
- * corresponding bit in @bm.
+ * corresponding bit in @bm. If the page was originally populated with only
+ * zeros then a corresponding bit will also be set in @zero_bm.
*/
-static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
+static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm,
+ struct memory_bitmap *zero_bm)
{
+ unsigned long decoded_pfn;
+ bool zero;
int j;
for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
- if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j])) {
- memory_bm_set_bit(bm, buf[j]);
+ zero = !!(buf[j] & ENCODED_PFN_ZERO_FLAG);
+ decoded_pfn = buf[j] & ENCODED_PFN_MASK;
+ if (pfn_valid(decoded_pfn) && memory_bm_pfn_present(bm, decoded_pfn)) {
+ memory_bm_set_bit(bm, decoded_pfn);
+ if (zero) {
+ memory_bm_set_bit(zero_bm, decoded_pfn);
+ nr_zero_pages++;
+ }
} else {
- if (!pfn_valid(buf[j]))
+ if (!pfn_valid(decoded_pfn))
pr_err(FW_BUG "Memory map mismatch at 0x%llx after hibernation\n",
- (unsigned long long)PFN_PHYS(buf[j]));
+ (unsigned long long)PFN_PHYS(decoded_pfn));
return -EFAULT;
}
}
@@ -2538,6 +2609,7 @@ static inline void free_highmem_data(void) {}
* prepare_image - Make room for loading hibernation image.
* @new_bm: Uninitialized memory bitmap structure.
* @bm: Memory bitmap with unsafe pages marked.
+ * @zero_bm: Memory bitmap containing the zero pages.
*
* Use @bm to mark the pages that will be overwritten in the process of
* restoring the system memory state from the suspend image ("unsafe" pages)
@@ -2548,10 +2620,15 @@ static inline void free_highmem_data(void) {}
* pages will be used for just yet. Instead, we mark them all as allocated and
* create a lists of "safe" pages to be used later. On systems with high
* memory a list of "safe" highmem pages is created too.
+ *
+ * Because it was not known which pages were unsafe when @zero_bm was created,
+ * make a copy of it and recreate it within safe pages.
*/
-static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
+static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm,
+ struct memory_bitmap *zero_bm)
{
unsigned int nr_pages, nr_highmem;
+ struct memory_bitmap tmp;
struct linked_page *lp;
int error;
@@ -2568,6 +2645,24 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
duplicate_memory_bitmap(new_bm, bm);
memory_bm_free(bm, PG_UNSAFE_KEEP);
+
+ /* Make a copy of zero_bm so it can be created in safe pages */
+ error = memory_bm_create(&tmp, GFP_ATOMIC, PG_ANY);
+ if (error)
+ goto Free;
+
+ duplicate_memory_bitmap(&tmp, zero_bm);
+ memory_bm_free(zero_bm, PG_UNSAFE_KEEP);
+
+ /* Recreate zero_bm in safe pages */
+ error = memory_bm_create(zero_bm, GFP_ATOMIC, PG_SAFE);
+ if (error)
+ goto Free;
+
+ duplicate_memory_bitmap(zero_bm, &tmp);
+ memory_bm_free(&tmp, PG_UNSAFE_KEEP);
+ /* At this point zero_bm is in safe pages and it can be used for restoring. */
+
if (nr_highmem > 0) {
error = prepare_highmem_image(bm, &nr_highmem);
if (error)
@@ -2582,7 +2677,7 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
*
* nr_copy_pages cannot be less than allocated_unsafe_pages too.
*/
- nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
+ nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
while (nr_pages > 0) {
lp = get_image_page(GFP_ATOMIC, PG_SAFE);
@@ -2595,7 +2690,7 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
nr_pages--;
}
/* Preallocate memory for the image */
- nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
+ nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
while (nr_pages > 0) {
lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
if (!lp) {
@@ -2683,8 +2778,9 @@ int snapshot_write_next(struct snapshot_handle *handle)
static struct chain_allocator ca;
int error = 0;
+next:
/* Check if we have already loaded the entire image */
- if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
+ if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
return 0;
handle->sync_read = 1;
@@ -2709,19 +2805,26 @@ int snapshot_write_next(struct snapshot_handle *handle)
if (error)
return error;
+ error = memory_bm_create(&zero_bm, GFP_ATOMIC, PG_ANY);
+ if (error)
+ return error;
+
+ nr_zero_pages = 0;
+
hibernate_restore_protection_begin();
} else if (handle->cur <= nr_meta_pages + 1) {
- error = unpack_orig_pfns(buffer, &copy_bm);
+ error = unpack_orig_pfns(buffer, &copy_bm, &zero_bm);
if (error)
return error;
if (handle->cur == nr_meta_pages + 1) {
- error = prepare_image(&orig_bm, &copy_bm);
+ error = prepare_image(&orig_bm, &copy_bm, &zero_bm);
if (error)
return error;
chain_init(&ca, GFP_ATOMIC, PG_SAFE);
memory_bm_position_reset(&orig_bm);
+ memory_bm_position_reset(&zero_bm);
restore_pblist = NULL;
handle->buffer = get_buffer(&orig_bm, &ca);
handle->sync_read = 0;
@@ -2738,6 +2841,14 @@ int snapshot_write_next(struct snapshot_handle *handle)
handle->sync_read = 0;
}
handle->cur++;
+
+ /* Zero pages were not included in the image, memset it and move on. */
+ if (handle->cur > nr_meta_pages + 1 &&
+ memory_bm_test_bit(&zero_bm, memory_bm_get_current(&orig_bm))) {
+ memset(handle->buffer, 0, PAGE_SIZE);
+ goto next;
+ }
+
return PAGE_SIZE;
}
@@ -2754,7 +2865,7 @@ void snapshot_write_finalize(struct snapshot_handle *handle)
copy_last_highmem_page();
hibernate_restore_protect_page(handle->buffer);
/* Do that only if we have loaded the image entirely */
- if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
+ if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
memory_bm_recycle(&orig_bm);
free_highmem_data();
}
@@ -2763,7 +2874,7 @@ void snapshot_write_finalize(struct snapshot_handle *handle)
int snapshot_image_loaded(struct snapshot_handle *handle)
{
return !(!nr_copy_pages || !last_highmem_page_copied() ||
- handle->cur <= nr_meta_pages + nr_copy_pages);
+ handle->cur <= nr_meta_pages + nr_copy_pages + nr_zero_pages);
}
#ifdef CONFIG_HIGHMEM
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index f95cfb5bf2ee..98e13be411af 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -509,6 +509,14 @@ static inline void show_rcu_tasks_gp_kthreads(void) {}
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
#endif /* #else #ifdef CONFIG_TINY_RCU */
+#ifdef CONFIG_TASKS_RCU
+struct task_struct *get_rcu_tasks_gp_kthread(void);
+#endif // # ifdef CONFIG_TASKS_RCU
+
+#ifdef CONFIG_TASKS_RUDE_RCU
+struct task_struct *get_rcu_tasks_rude_gp_kthread(void);
+#endif // # ifdef CONFIG_TASKS_RUDE_RCU
+
#define RCU_SCHEDULER_INACTIVE 0
#define RCU_SCHEDULER_INIT 1
#define RCU_SCHEDULER_RUNNING 2
diff --git a/kernel/rcu/rcuscale.c b/kernel/rcu/rcuscale.c
index d1221731c7cf..ffdb30495e3c 100644
--- a/kernel/rcu/rcuscale.c
+++ b/kernel/rcu/rcuscale.c
@@ -84,15 +84,17 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
#endif
torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
-torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
+torture_param(int, gp_async_max, 1000, "Max # outstanding waits per writer");
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
+torture_param(int, minruntime, 0, "Minimum run time (s)");
torture_param(int, nreaders, -1, "Number of RCU reader threads");
torture_param(int, nwriters, -1, "Number of RCU updater threads");
torture_param(bool, shutdown, RCUSCALE_SHUTDOWN,
"Shutdown at end of scalability tests.");
torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
+torture_param(int, writer_holdoff_jiffies, 0, "Holdoff (jiffies) between GPs, zero to disable");
torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?");
torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate.");
torture_param(int, kfree_by_call_rcu, 0, "Use call_rcu() to emulate kfree_rcu()?");
@@ -139,6 +141,7 @@ struct rcu_scale_ops {
void (*gp_barrier)(void);
void (*sync)(void);
void (*exp_sync)(void);
+ struct task_struct *(*rso_gp_kthread)(void);
const char *name;
};
@@ -295,6 +298,7 @@ static struct rcu_scale_ops tasks_ops = {
.gp_barrier = rcu_barrier_tasks,
.sync = synchronize_rcu_tasks,
.exp_sync = synchronize_rcu_tasks,
+ .rso_gp_kthread = get_rcu_tasks_gp_kthread,
.name = "tasks"
};
@@ -306,6 +310,44 @@ static struct rcu_scale_ops tasks_ops = {
#endif // #else // #ifdef CONFIG_TASKS_RCU
+#ifdef CONFIG_TASKS_RUDE_RCU
+
+/*
+ * Definitions for RCU-tasks-rude scalability testing.
+ */
+
+static int tasks_rude_scale_read_lock(void)
+{
+ return 0;
+}
+
+static void tasks_rude_scale_read_unlock(int idx)
+{
+}
+
+static struct rcu_scale_ops tasks_rude_ops = {
+ .ptype = RCU_TASKS_RUDE_FLAVOR,
+ .init = rcu_sync_scale_init,
+ .readlock = tasks_rude_scale_read_lock,
+ .readunlock = tasks_rude_scale_read_unlock,
+ .get_gp_seq = rcu_no_completed,
+ .gp_diff = rcu_seq_diff,
+ .async = call_rcu_tasks_rude,
+ .gp_barrier = rcu_barrier_tasks_rude,
+ .sync = synchronize_rcu_tasks_rude,
+ .exp_sync = synchronize_rcu_tasks_rude,
+ .rso_gp_kthread = get_rcu_tasks_rude_gp_kthread,
+ .name = "tasks-rude"
+};
+
+#define TASKS_RUDE_OPS &tasks_rude_ops,
+
+#else // #ifdef CONFIG_TASKS_RUDE_RCU
+
+#define TASKS_RUDE_OPS
+
+#endif // #else // #ifdef CONFIG_TASKS_RUDE_RCU
+
#ifdef CONFIG_TASKS_TRACE_RCU
/*
@@ -334,6 +376,7 @@ static struct rcu_scale_ops tasks_tracing_ops = {
.gp_barrier = rcu_barrier_tasks_trace,
.sync = synchronize_rcu_tasks_trace,
.exp_sync = synchronize_rcu_tasks_trace,
+ .rso_gp_kthread = get_rcu_tasks_trace_gp_kthread,
.name = "tasks-tracing"
};
@@ -410,10 +453,12 @@ rcu_scale_writer(void *arg)
{
int i = 0;
int i_max;
+ unsigned long jdone;
long me = (long)arg;
struct rcu_head *rhp = NULL;
bool started = false, done = false, alldone = false;
u64 t;
+ DEFINE_TORTURE_RANDOM(tr);
u64 *wdp;
u64 *wdpp = writer_durations[me];
@@ -424,7 +469,7 @@ rcu_scale_writer(void *arg)
sched_set_fifo_low(current);
if (holdoff)
- schedule_timeout_uninterruptible(holdoff * HZ);
+ schedule_timeout_idle(holdoff * HZ);
/*
* Wait until rcu_end_inkernel_boot() is called for normal GP tests
@@ -445,9 +490,12 @@ rcu_scale_writer(void *arg)
}
}
+ jdone = jiffies + minruntime * HZ;
do {
if (writer_holdoff)
udelay(writer_holdoff);
+ if (writer_holdoff_jiffies)
+ schedule_timeout_idle(torture_random(&tr) % writer_holdoff_jiffies + 1);
wdp = &wdpp[i];
*wdp = ktime_get_mono_fast_ns();
if (gp_async) {
@@ -475,7 +523,7 @@ retry:
if (!started &&
atomic_read(&n_rcu_scale_writer_started) >= nrealwriters)
started = true;
- if (!done && i >= MIN_MEAS) {
+ if (!done && i >= MIN_MEAS && time_after(jiffies, jdone)) {
done = true;
sched_set_normal(current, 0);
pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n",
@@ -518,8 +566,8 @@ static void
rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
{
pr_alert("%s" SCALE_FLAG
- "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
- scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
+ "--- %s: gp_async=%d gp_async_max=%d gp_exp=%d holdoff=%d minruntime=%d nreaders=%d nwriters=%d writer_holdoff=%d writer_holdoff_jiffies=%d verbose=%d shutdown=%d\n",
+ scale_type, tag, gp_async, gp_async_max, gp_exp, holdoff, minruntime, nrealreaders, nrealwriters, writer_holdoff, writer_holdoff_jiffies, verbose, shutdown);
}
/*
@@ -556,6 +604,8 @@ static struct task_struct **kfree_reader_tasks;
static int kfree_nrealthreads;
static atomic_t n_kfree_scale_thread_started;
static atomic_t n_kfree_scale_thread_ended;
+static struct task_struct *kthread_tp;
+static u64 kthread_stime;
struct kfree_obj {
char kfree_obj[8];
@@ -701,6 +751,10 @@ kfree_scale_init(void)
unsigned long jif_start;
unsigned long orig_jif;
+ pr_alert("%s" SCALE_FLAG
+ "--- kfree_rcu_test: kfree_mult=%d kfree_by_call_rcu=%d kfree_nthreads=%d kfree_alloc_num=%d kfree_loops=%d kfree_rcu_test_double=%d kfree_rcu_test_single=%d\n",
+ scale_type, kfree_mult, kfree_by_call_rcu, kfree_nthreads, kfree_alloc_num, kfree_loops, kfree_rcu_test_double, kfree_rcu_test_single);
+
// Also, do a quick self-test to ensure laziness is as much as
// expected.
if (kfree_by_call_rcu && !IS_ENABLED(CONFIG_RCU_LAZY)) {
@@ -797,6 +851,18 @@ rcu_scale_cleanup(void)
if (gp_exp && gp_async)
SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
+ // If built-in, just report all of the GP kthread's CPU time.
+ if (IS_BUILTIN(CONFIG_RCU_SCALE_TEST) && !kthread_tp && cur_ops->rso_gp_kthread)
+ kthread_tp = cur_ops->rso_gp_kthread();
+ if (kthread_tp) {
+ u32 ns;
+ u64 us;
+
+ kthread_stime = kthread_tp->stime - kthread_stime;
+ us = div_u64_rem(kthread_stime, 1000, &ns);
+ pr_info("rcu_scale: Grace-period kthread CPU time: %llu.%03u us\n", us, ns);
+ show_rcu_gp_kthreads();
+ }
if (kfree_rcu_test) {
kfree_scale_cleanup();
return;
@@ -885,7 +951,7 @@ rcu_scale_init(void)
long i;
int firsterr = 0;
static struct rcu_scale_ops *scale_ops[] = {
- &rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_TRACING_OPS
+ &rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
};
if (!torture_init_begin(scale_type, verbose))
@@ -910,6 +976,11 @@ rcu_scale_init(void)
if (cur_ops->init)
cur_ops->init();
+ if (cur_ops->rso_gp_kthread) {
+ kthread_tp = cur_ops->rso_gp_kthread();
+ if (kthread_tp)
+ kthread_stime = kthread_tp->stime;
+ }
if (kfree_rcu_test)
return kfree_scale_init();
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 147551c23baf..ade42d6a9d9b 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1581,6 +1581,7 @@ rcu_torture_writer(void *arg)
rcu_access_pointer(rcu_torture_current) !=
&rcu_tortures[i]) {
tracing_off();
+ show_rcu_gp_kthreads();
WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
rcu_ftrace_dump(DUMP_ALL);
}
@@ -1876,7 +1877,7 @@ static int
rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
{
int mask = rcutorture_extend_mask_max();
- unsigned long randmask1 = torture_random(trsp) >> 8;
+ unsigned long randmask1 = torture_random(trsp);
unsigned long randmask2 = randmask1 >> 3;
unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
@@ -1935,7 +1936,7 @@ rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
if (!((mask - 1) & mask))
return rtrsp; /* Current RCU reader not extendable. */
/* Bias towards larger numbers of loops. */
- i = (torture_random(trsp) >> 3);
+ i = torture_random(trsp);
i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
for (j = 0; j < i; j++) {
mask = rcutorture_extend_mask(*readstate, trsp);
@@ -2136,7 +2137,7 @@ static int rcu_nocb_toggle(void *arg)
toggle_fuzz = NSEC_PER_USEC;
do {
r = torture_random(&rand);
- cpu = (r >> 4) % (maxcpu + 1);
+ cpu = (r >> 1) % (maxcpu + 1);
if (r & 0x1) {
rcu_nocb_cpu_offload(cpu);
atomic_long_inc(&n_nocb_offload);
diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c
index 1970ce5f22d4..91a0fd0d4d9a 100644
--- a/kernel/rcu/refscale.c
+++ b/kernel/rcu/refscale.c
@@ -528,6 +528,38 @@ static struct ref_scale_ops clock_ops = {
.name = "clock"
};
+static void ref_jiffies_section(const int nloops)
+{
+ u64 x = 0;
+ int i;
+
+ preempt_disable();
+ for (i = nloops; i >= 0; i--)
+ x += jiffies;
+ preempt_enable();
+ stopopts = x;
+}
+
+static void ref_jiffies_delay_section(const int nloops, const int udl, const int ndl)
+{
+ u64 x = 0;
+ int i;
+
+ preempt_disable();
+ for (i = nloops; i >= 0; i--) {
+ x += jiffies;
+ un_delay(udl, ndl);
+ }
+ preempt_enable();
+ stopopts = x;
+}
+
+static struct ref_scale_ops jiffies_ops = {
+ .readsection = ref_jiffies_section,
+ .delaysection = ref_jiffies_delay_section,
+ .name = "jiffies"
+};
+
////////////////////////////////////////////////////////////////////////
//
// Methods leveraging SLAB_TYPESAFE_BY_RCU.
@@ -1047,7 +1079,7 @@ ref_scale_init(void)
int firsterr = 0;
static struct ref_scale_ops *scale_ops[] = {
&rcu_ops, &srcu_ops, RCU_TRACE_OPS RCU_TASKS_OPS &refcnt_ops, &rwlock_ops,
- &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops,
+ &rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops, &jiffies_ops,
&typesafe_ref_ops, &typesafe_lock_ops, &typesafe_seqlock_ops,
};
@@ -1107,12 +1139,11 @@ ref_scale_init(void)
VERBOSE_SCALEOUT("Starting %d reader threads", nreaders);
for (i = 0; i < nreaders; i++) {
+ init_waitqueue_head(&reader_tasks[i].wq);
firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
reader_tasks[i].task);
if (torture_init_error(firsterr))
goto unwind;
-
- init_waitqueue_head(&(reader_tasks[i].wq));
}
// Main Task
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index b770add3f843..8d65f7d576a3 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -25,6 +25,8 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
* @cblist: Callback list.
* @lock: Lock protecting per-CPU callback list.
* @rtp_jiffies: Jiffies counter value for statistics.
+ * @lazy_timer: Timer to unlazify callbacks.
+ * @urgent_gp: Number of additional non-lazy grace periods.
* @rtp_n_lock_retries: Rough lock-contention statistic.
* @rtp_work: Work queue for invoking callbacks.
* @rtp_irq_work: IRQ work queue for deferred wakeups.
@@ -38,6 +40,8 @@ struct rcu_tasks_percpu {
raw_spinlock_t __private lock;
unsigned long rtp_jiffies;
unsigned long rtp_n_lock_retries;
+ struct timer_list lazy_timer;
+ unsigned int urgent_gp;
struct work_struct rtp_work;
struct irq_work rtp_irq_work;
struct rcu_head barrier_q_head;
@@ -51,7 +55,6 @@ struct rcu_tasks_percpu {
* @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
* @cbs_gbl_lock: Lock protecting callback list.
* @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
- * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
* @gp_func: This flavor's grace-period-wait function.
* @gp_state: Grace period's most recent state transition (debugging).
* @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
@@ -61,6 +64,8 @@ struct rcu_tasks_percpu {
* @tasks_gp_seq: Number of grace periods completed since boot.
* @n_ipis: Number of IPIs sent to encourage grace periods to end.
* @n_ipis_fails: Number of IPI-send failures.
+ * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
+ * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
* @pregp_func: This flavor's pre-grace-period function (optional).
* @pertask_func: This flavor's per-task scan function (optional).
* @postscan_func: This flavor's post-task scan function (optional).
@@ -92,6 +97,7 @@ struct rcu_tasks {
unsigned long n_ipis;
unsigned long n_ipis_fails;
struct task_struct *kthread_ptr;
+ unsigned long lazy_jiffies;
rcu_tasks_gp_func_t gp_func;
pregp_func_t pregp_func;
pertask_func_t pertask_func;
@@ -127,6 +133,7 @@ static struct rcu_tasks rt_name = \
.gp_func = gp, \
.call_func = call, \
.rtpcpu = &rt_name ## __percpu, \
+ .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \
.name = n, \
.percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
.percpu_enqueue_lim = 1, \
@@ -139,9 +146,7 @@ static struct rcu_tasks rt_name = \
#ifdef CONFIG_TASKS_RCU
/* Track exiting tasks in order to allow them to be waited for. */
DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
-#endif
-#ifdef CONFIG_TASKS_RCU
/* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */
static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
@@ -171,6 +176,8 @@ static int rcu_task_contend_lim __read_mostly = 100;
module_param(rcu_task_contend_lim, int, 0444);
static int rcu_task_collapse_lim __read_mostly = 10;
module_param(rcu_task_collapse_lim, int, 0444);
+static int rcu_task_lazy_lim __read_mostly = 32;
+module_param(rcu_task_lazy_lim, int, 0444);
/* RCU tasks grace-period state for debugging. */
#define RTGS_INIT 0
@@ -229,7 +236,7 @@ static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
#endif /* #ifndef CONFIG_TINY_RCU */
// Initialize per-CPU callback lists for the specified flavor of
-// Tasks RCU.
+// Tasks RCU. Do not enqueue callbacks before this function is invoked.
static void cblist_init_generic(struct rcu_tasks *rtp)
{
int cpu;
@@ -237,7 +244,6 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
int lim;
int shift;
- raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
if (rcu_task_enqueue_lim < 0) {
rcu_task_enqueue_lim = 1;
rcu_task_cb_adjust = true;
@@ -260,22 +266,48 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
WARN_ON_ONCE(!rtpcp);
if (cpu)
raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
- raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
+ local_irq_save(flags); // serialize initialization
if (rcu_segcblist_empty(&rtpcp->cblist))
rcu_segcblist_init(&rtpcp->cblist);
+ local_irq_restore(flags);
INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
rtpcp->cpu = cpu;
rtpcp->rtpp = rtp;
if (!rtpcp->rtp_blkd_tasks.next)
INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
- raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
}
- raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust);
}
+// Compute wakeup time for lazy callback timer.
+static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp)
+{
+ return jiffies + rtp->lazy_jiffies;
+}
+
+// Timer handler that unlazifies lazy callbacks.
+static void call_rcu_tasks_generic_timer(struct timer_list *tlp)
+{
+ unsigned long flags;
+ bool needwake = false;
+ struct rcu_tasks *rtp;
+ struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer);
+
+ rtp = rtpcp->rtpp;
+ raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
+ if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) {
+ if (!rtpcp->urgent_gp)
+ rtpcp->urgent_gp = 1;
+ needwake = true;
+ mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
+ }
+ raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
+ if (needwake)
+ rcuwait_wake_up(&rtp->cbs_wait);
+}
+
// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
{
@@ -292,6 +324,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
{
int chosen_cpu;
unsigned long flags;
+ bool havekthread = smp_load_acquire(&rtp->kthread_ptr);
int ideal_cpu;
unsigned long j;
bool needadjust = false;
@@ -316,12 +349,19 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
needadjust = true; // Defer adjustment to avoid deadlock.
}
- if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
- raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
- cblist_init_generic(rtp);
- raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
+ // Queuing callbacks before initialization not yet supported.
+ if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
+ rcu_segcblist_init(&rtpcp->cblist);
+ needwake = (func == wakeme_after_rcu) ||
+ (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
+ if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {
+ if (rtp->lazy_jiffies)
+ mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
+ else
+ needwake = rcu_segcblist_empty(&rtpcp->cblist);
}
- needwake = rcu_segcblist_empty(&rtpcp->cblist);
+ if (needwake)
+ rtpcp->urgent_gp = 3;
rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
if (unlikely(needadjust)) {
@@ -415,9 +455,14 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
}
rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
(void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
- if (rcu_segcblist_pend_cbs(&rtpcp->cblist))
+ if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
+ if (rtp->lazy_jiffies)
+ rtpcp->urgent_gp--;
needgpcb |= 0x3;
- if (!rcu_segcblist_empty(&rtpcp->cblist))
+ } else if (rcu_segcblist_empty(&rtpcp->cblist)) {
+ rtpcp->urgent_gp = 0;
+ }
+ if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
needgpcb |= 0x1;
raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
}
@@ -525,10 +570,12 @@ static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
if (unlikely(midboot)) {
needgpcb = 0x2;
} else {
+ mutex_unlock(&rtp->tasks_gp_mutex);
set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
rcuwait_wait_event(&rtp->cbs_wait,
(needgpcb = rcu_tasks_need_gpcb(rtp)),
TASK_IDLE);
+ mutex_lock(&rtp->tasks_gp_mutex);
}
if (needgpcb & 0x2) {
@@ -549,11 +596,19 @@ static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
// RCU-tasks kthread that detects grace periods and invokes callbacks.
static int __noreturn rcu_tasks_kthread(void *arg)
{
+ int cpu;
struct rcu_tasks *rtp = arg;
+ for_each_possible_cpu(cpu) {
+ struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
+
+ timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0);
+ rtpcp->urgent_gp = 1;
+ }
+
/* Run on housekeeping CPUs by default. Sysadm can move if desired. */
housekeeping_affine(current, HK_TYPE_RCU);
- WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
+ smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
/*
* Each pass through the following loop makes one check for
@@ -635,16 +690,22 @@ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
{
int cpu;
bool havecbs = false;
+ bool haveurgent = false;
+ bool haveurgentcbs = false;
for_each_possible_cpu(cpu) {
struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
- if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) {
+ if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
havecbs = true;
+ if (data_race(rtpcp->urgent_gp))
+ haveurgent = true;
+ if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
+ haveurgentcbs = true;
+ if (havecbs && haveurgent && haveurgentcbs)
break;
- }
}
- pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
+ pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
rtp->kname,
tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
jiffies - data_race(rtp->gp_jiffies),
@@ -652,6 +713,9 @@ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
".k"[!!data_race(rtp->kthread_ptr)],
".C"[havecbs],
+ ".u"[haveurgent],
+ ".U"[haveurgentcbs],
+ rtp->lazy_jiffies,
s);
}
#endif // #ifndef CONFIG_TINY_RCU
@@ -1020,11 +1084,16 @@ void rcu_barrier_tasks(void)
}
EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
+int rcu_tasks_lazy_ms = -1;
+module_param(rcu_tasks_lazy_ms, int, 0444);
+
static int __init rcu_spawn_tasks_kthread(void)
{
cblist_init_generic(&rcu_tasks);
rcu_tasks.gp_sleep = HZ / 10;
rcu_tasks.init_fract = HZ / 10;
+ if (rcu_tasks_lazy_ms >= 0)
+ rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms);
rcu_tasks.pregp_func = rcu_tasks_pregp_step;
rcu_tasks.pertask_func = rcu_tasks_pertask;
rcu_tasks.postscan_func = rcu_tasks_postscan;
@@ -1042,6 +1111,12 @@ void show_rcu_tasks_classic_gp_kthread(void)
EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
#endif // !defined(CONFIG_TINY_RCU)
+struct task_struct *get_rcu_tasks_gp_kthread(void)
+{
+ return rcu_tasks.kthread_ptr;
+}
+EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
+
/*
* Contribute to protect against tasklist scan blind spot while the
* task is exiting and may be removed from the tasklist. See
@@ -1173,10 +1248,15 @@ void rcu_barrier_tasks_rude(void)
}
EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
+int rcu_tasks_rude_lazy_ms = -1;
+module_param(rcu_tasks_rude_lazy_ms, int, 0444);
+
static int __init rcu_spawn_tasks_rude_kthread(void)
{
cblist_init_generic(&rcu_tasks_rude);
rcu_tasks_rude.gp_sleep = HZ / 10;
+ if (rcu_tasks_rude_lazy_ms >= 0)
+ rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms);
rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
return 0;
}
@@ -1188,6 +1268,13 @@ void show_rcu_tasks_rude_gp_kthread(void)
}
EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
#endif // !defined(CONFIG_TINY_RCU)
+
+struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
+{
+ return rcu_tasks_rude.kthread_ptr;
+}
+EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread);
+
#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
////////////////////////////////////////////////////////////////////////
@@ -1793,6 +1880,9 @@ void rcu_barrier_tasks_trace(void)
}
EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
+int rcu_tasks_trace_lazy_ms = -1;
+module_param(rcu_tasks_trace_lazy_ms, int, 0444);
+
static int __init rcu_spawn_tasks_trace_kthread(void)
{
cblist_init_generic(&rcu_tasks_trace);
@@ -1807,6 +1897,8 @@ static int __init rcu_spawn_tasks_trace_kthread(void)
if (rcu_tasks_trace.init_fract <= 0)
rcu_tasks_trace.init_fract = 1;
}
+ if (rcu_tasks_trace_lazy_ms >= 0)
+ rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms);
rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
@@ -1830,6 +1922,12 @@ void show_rcu_tasks_trace_gp_kthread(void)
EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
#endif // !defined(CONFIG_TINY_RCU)
+struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
+{
+ return rcu_tasks_trace.kthread_ptr;
+}
+EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
+
#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 1449cb69a0e0..cb1caefa8bd0 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -632,7 +632,7 @@ void __rcu_irq_enter_check_tick(void)
// prevents self-deadlock. So we can safely recheck under the lock.
// Note that the nohz_full state currently cannot change.
raw_spin_lock_rcu_node(rdp->mynode);
- if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
+ if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
// A nohz_full CPU is in the kernel and RCU needs a
// quiescent state. Turn on the tick!
WRITE_ONCE(rdp->rcu_forced_tick, true);
@@ -677,12 +677,16 @@ static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
}
/**
- * rcu_is_watching - see if RCU thinks that the current CPU is not idle
+ * rcu_is_watching - RCU read-side critical sections permitted on current CPU?
*
- * Return true if RCU is watching the running CPU, which means that this
- * CPU can safely enter RCU read-side critical sections. In other words,
- * if the current CPU is not in its idle loop or is in an interrupt or
- * NMI handler, return true.
+ * Return @true if RCU is watching the running CPU and @false otherwise.
+ * An @true return means that this CPU can safely enter RCU read-side
+ * critical sections.
+ *
+ * Although calls to rcu_is_watching() from most parts of the kernel
+ * will return @true, there are important exceptions. For example, if the
+ * current CPU is deep within its idle loop, in kernel entry/exit code,
+ * or offline, rcu_is_watching() will return @false.
*
* Make notrace because it can be called by the internal functions of
* ftrace, and making this notrace removes unnecessary recursion calls.
diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 43229d2b0c44..5598212d1f27 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -77,9 +77,9 @@ __setup("rcu_nocbs", rcu_nocb_setup);
static int __init parse_rcu_nocb_poll(char *arg)
{
rcu_nocb_poll = true;
- return 0;
+ return 1;
}
-early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
+__setup("rcu_nocb_poll", parse_rcu_nocb_poll);
/*
* Don't bother bypassing ->cblist if the call_rcu() rate is low.
diff --git a/kernel/scftorture.c b/kernel/scftorture.c
index 5d113aa59e77..59032aaccd18 100644
--- a/kernel/scftorture.c
+++ b/kernel/scftorture.c
@@ -171,7 +171,8 @@ static void scf_torture_stats_print(void)
scfs.n_all_wait += scf_stats_p[i].n_all_wait;
}
if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) ||
- atomic_read(&n_mb_out_errs) || atomic_read(&n_alloc_errs))
+ atomic_read(&n_mb_out_errs) ||
+ (!IS_ENABLED(CONFIG_KASAN) && atomic_read(&n_alloc_errs)))
bangstr = "!!! ";
pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld ",
SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count, scfs.n_resched,
@@ -312,6 +313,7 @@ static void scf_handler_1(void *scfc_in)
// Randomly do an smp_call_function*() invocation.
static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_random_state *trsp)
{
+ bool allocfail = false;
uintptr_t cpu;
int ret = 0;
struct scf_check *scfcp = NULL;
@@ -323,8 +325,10 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
preempt_disable();
if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) {
scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC);
- if (WARN_ON_ONCE(!scfcp)) {
+ if (!scfcp) {
+ WARN_ON_ONCE(!IS_ENABLED(CONFIG_KASAN));
atomic_inc(&n_alloc_errs);
+ allocfail = true;
} else {
scfcp->scfc_cpu = -1;
scfcp->scfc_wait = scfsp->scfs_wait;
@@ -431,7 +435,9 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
cpus_read_unlock();
else
preempt_enable();
- if (!(torture_random(trsp) & 0xfff))
+ if (allocfail)
+ schedule_timeout_idle((1 + longwait) * HZ); // Let no-wait handlers complete.
+ else if (!(torture_random(trsp) & 0xfff))
schedule_timeout_uninterruptible(1);
}
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index d57a5c1c1cd9..3561ab533dd4 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -13,6 +13,23 @@
* Waiting for completion is a typically sync point, but not an exclusion point.
*/
+static void complete_with_flags(struct completion *x, int wake_flags)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
+
+ if (x->done != UINT_MAX)
+ x->done++;
+ swake_up_locked(&x->wait, wake_flags);
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+}
+
+void complete_on_current_cpu(struct completion *x)
+{
+ return complete_with_flags(x, WF_CURRENT_CPU);
+}
+
/**
* complete: - signals a single thread waiting on this completion
* @x: holds the state of this particular completion
@@ -27,14 +44,7 @@
*/
void complete(struct completion *x)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&x->wait.lock, flags);
-
- if (x->done != UINT_MAX)
- x->done++;
- swake_up_locked(&x->wait);
- raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+ complete_with_flags(x, 0);
}
EXPORT_SYMBOL(complete);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c52c2eba7c73..2299a5cfbfb9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1097,25 +1097,22 @@ int get_nohz_timer_target(void)
hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
- rcu_read_lock();
+ guard(rcu)();
+
for_each_domain(cpu, sd) {
for_each_cpu_and(i, sched_domain_span(sd), hk_mask) {
if (cpu == i)
continue;
- if (!idle_cpu(i)) {
- cpu = i;
- goto unlock;
- }
+ if (!idle_cpu(i))
+ return i;
}
}
if (default_cpu == -1)
default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
- cpu = default_cpu;
-unlock:
- rcu_read_unlock();
- return cpu;
+
+ return default_cpu;
}
/*
@@ -1194,6 +1191,20 @@ static void nohz_csd_func(void *info)
#endif /* CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
+static inline bool __need_bw_check(struct rq *rq, struct task_struct *p)
+{
+ if (rq->nr_running != 1)
+ return false;
+
+ if (p->sched_class != &fair_sched_class)
+ return false;
+
+ if (!task_on_rq_queued(p))
+ return false;
+
+ return true;
+}
+
bool sched_can_stop_tick(struct rq *rq)
{
int fifo_nr_running;
@@ -1229,6 +1240,18 @@ bool sched_can_stop_tick(struct rq *rq)
if (rq->nr_running > 1)
return false;
+ /*
+ * If there is one task and it has CFS runtime bandwidth constraints
+ * and it's on the cpu now we don't want to stop the tick.
+ * This check prevents clearing the bit if a newly enqueued task here is
+ * dequeued by migrating while the constrained task continues to run.
+ * E.g. going from 2->1 without going through pick_next_task().
+ */
+ if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) {
+ if (cfs_task_bw_constrained(rq->curr))
+ return false;
+ }
+
return true;
}
#endif /* CONFIG_NO_HZ_FULL */
@@ -1804,7 +1827,8 @@ static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
int old_min, old_max, old_min_rt;
int result;
- mutex_lock(&uclamp_mutex);
+ guard(mutex)(&uclamp_mutex);
+
old_min = sysctl_sched_uclamp_util_min;
old_max = sysctl_sched_uclamp_util_max;
old_min_rt = sysctl_sched_uclamp_util_min_rt_default;
@@ -1813,7 +1837,7 @@ static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
if (result)
goto undo;
if (!write)
- goto done;
+ return 0;
if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max ||
sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE ||
@@ -1849,16 +1873,12 @@ static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
* Otherwise, keep it simple and do just a lazy update at each next
* task enqueue time.
*/
-
- goto done;
+ return 0;
undo:
sysctl_sched_uclamp_util_min = old_min;
sysctl_sched_uclamp_util_max = old_max;
sysctl_sched_uclamp_util_min_rt_default = old_min_rt;
-done:
- mutex_unlock(&uclamp_mutex);
-
return result;
}
#endif
@@ -3413,7 +3433,6 @@ static int migrate_swap_stop(void *data)
{
struct migration_swap_arg *arg = data;
struct rq *src_rq, *dst_rq;
- int ret = -EAGAIN;
if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu))
return -EAGAIN;
@@ -3421,33 +3440,25 @@ static int migrate_swap_stop(void *data)
src_rq = cpu_rq(arg->src_cpu);
dst_rq = cpu_rq(arg->dst_cpu);
- double_raw_lock(&arg->src_task->pi_lock,
- &arg->dst_task->pi_lock);
- double_rq_lock(src_rq, dst_rq);
+ guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock);
+ guard(double_rq_lock)(src_rq, dst_rq);
if (task_cpu(arg->dst_task) != arg->dst_cpu)
- goto unlock;
+ return -EAGAIN;
if (task_cpu(arg->src_task) != arg->src_cpu)
- goto unlock;
+ return -EAGAIN;
if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
- goto unlock;
+ return -EAGAIN;
if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
- goto unlock;
+ return -EAGAIN;
__migrate_swap_task(arg->src_task, arg->dst_cpu);
__migrate_swap_task(arg->dst_task, arg->src_cpu);
- ret = 0;
-
-unlock:
- double_rq_unlock(src_rq, dst_rq);
- raw_spin_unlock(&arg->dst_task->pi_lock);
- raw_spin_unlock(&arg->src_task->pi_lock);
-
- return ret;
+ return 0;
}
/*
@@ -3722,14 +3733,14 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
struct sched_domain *sd;
__schedstat_inc(p->stats.nr_wakeups_remote);
- rcu_read_lock();
+
+ guard(rcu)();
for_each_domain(rq->cpu, sd) {
if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
__schedstat_inc(sd->ttwu_wake_remote);
break;
}
}
- rcu_read_unlock();
}
if (wake_flags & WF_MIGRATED)
@@ -3928,21 +3939,13 @@ static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags
void wake_up_if_idle(int cpu)
{
struct rq *rq = cpu_rq(cpu);
- struct rq_flags rf;
-
- rcu_read_lock();
- if (!is_idle_task(rcu_dereference(rq->curr)))
- goto out;
-
- rq_lock_irqsave(rq, &rf);
- if (is_idle_task(rq->curr))
- resched_curr(rq);
- /* Else CPU is not idle, do nothing here: */
- rq_unlock_irqrestore(rq, &rf);
-
-out:
- rcu_read_unlock();
+ guard(rcu)();
+ if (is_idle_task(rcu_dereference(rq->curr))) {
+ guard(rq_lock_irqsave)(rq);
+ if (is_idle_task(rq->curr))
+ resched_curr(rq);
+ }
}
bool cpus_share_cache(int this_cpu, int that_cpu)
@@ -4193,13 +4196,11 @@ bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
* Return: %true if @p->state changes (an actual wakeup was done),
* %false otherwise.
*/
-static int
-try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
{
- unsigned long flags;
+ guard(preempt)();
int cpu, success = 0;
- preempt_disable();
if (p == current) {
/*
* We're waking current, this means 'p->on_rq' and 'task_cpu(p)
@@ -4226,129 +4227,127 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* reordered with p->state check below. This pairs with smp_store_mb()
* in set_current_state() that the waiting thread does.
*/
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- smp_mb__after_spinlock();
- if (!ttwu_state_match(p, state, &success))
- goto unlock;
+ scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
+ smp_mb__after_spinlock();
+ if (!ttwu_state_match(p, state, &success))
+ break;
- trace_sched_waking(p);
+ trace_sched_waking(p);
- /*
- * Ensure we load p->on_rq _after_ p->state, otherwise it would
- * be possible to, falsely, observe p->on_rq == 0 and get stuck
- * in smp_cond_load_acquire() below.
- *
- * sched_ttwu_pending() try_to_wake_up()
- * STORE p->on_rq = 1 LOAD p->state
- * UNLOCK rq->lock
- *
- * __schedule() (switch to task 'p')
- * LOCK rq->lock smp_rmb();
- * smp_mb__after_spinlock();
- * UNLOCK rq->lock
- *
- * [task p]
- * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
- *
- * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
- * __schedule(). See the comment for smp_mb__after_spinlock().
- *
- * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
- */
- smp_rmb();
- if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
- goto unlock;
+ /*
+ * Ensure we load p->on_rq _after_ p->state, otherwise it would
+ * be possible to, falsely, observe p->on_rq == 0 and get stuck
+ * in smp_cond_load_acquire() below.
+ *
+ * sched_ttwu_pending() try_to_wake_up()
+ * STORE p->on_rq = 1 LOAD p->state
+ * UNLOCK rq->lock
+ *
+ * __schedule() (switch to task 'p')
+ * LOCK rq->lock smp_rmb();
+ * smp_mb__after_spinlock();
+ * UNLOCK rq->lock
+ *
+ * [task p]
+ * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
+ *
+ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+ * __schedule(). See the comment for smp_mb__after_spinlock().
+ *
+ * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
+ */
+ smp_rmb();
+ if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
+ break;
#ifdef CONFIG_SMP
- /*
- * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
- * possible to, falsely, observe p->on_cpu == 0.
- *
- * One must be running (->on_cpu == 1) in order to remove oneself
- * from the runqueue.
- *
- * __schedule() (switch to task 'p') try_to_wake_up()
- * STORE p->on_cpu = 1 LOAD p->on_rq
- * UNLOCK rq->lock
- *
- * __schedule() (put 'p' to sleep)
- * LOCK rq->lock smp_rmb();
- * smp_mb__after_spinlock();
- * STORE p->on_rq = 0 LOAD p->on_cpu
- *
- * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
- * __schedule(). See the comment for smp_mb__after_spinlock().
- *
- * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
- * schedule()'s deactivate_task() has 'happened' and p will no longer
- * care about it's own p->state. See the comment in __schedule().
- */
- smp_acquire__after_ctrl_dep();
+ /*
+ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
+ * possible to, falsely, observe p->on_cpu == 0.
+ *
+ * One must be running (->on_cpu == 1) in order to remove oneself
+ * from the runqueue.
+ *
+ * __schedule() (switch to task 'p') try_to_wake_up()
+ * STORE p->on_cpu = 1 LOAD p->on_rq
+ * UNLOCK rq->lock
+ *
+ * __schedule() (put 'p' to sleep)
+ * LOCK rq->lock smp_rmb();
+ * smp_mb__after_spinlock();
+ * STORE p->on_rq = 0 LOAD p->on_cpu
+ *
+ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+ * __schedule(). See the comment for smp_mb__after_spinlock().
+ *
+ * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
+ * schedule()'s deactivate_task() has 'happened' and p will no longer
+ * care about it's own p->state. See the comment in __schedule().
+ */
+ smp_acquire__after_ctrl_dep();
- /*
- * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
- * == 0), which means we need to do an enqueue, change p->state to
- * TASK_WAKING such that we can unlock p->pi_lock before doing the
- * enqueue, such as ttwu_queue_wakelist().
- */
- WRITE_ONCE(p->__state, TASK_WAKING);
+ /*
+ * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
+ * == 0), which means we need to do an enqueue, change p->state to
+ * TASK_WAKING such that we can unlock p->pi_lock before doing the
+ * enqueue, such as ttwu_queue_wakelist().
+ */
+ WRITE_ONCE(p->__state, TASK_WAKING);
- /*
- * If the owning (remote) CPU is still in the middle of schedule() with
- * this task as prev, considering queueing p on the remote CPUs wake_list
- * which potentially sends an IPI instead of spinning on p->on_cpu to
- * let the waker make forward progress. This is safe because IRQs are
- * disabled and the IPI will deliver after on_cpu is cleared.
- *
- * Ensure we load task_cpu(p) after p->on_cpu:
- *
- * set_task_cpu(p, cpu);
- * STORE p->cpu = @cpu
- * __schedule() (switch to task 'p')
- * LOCK rq->lock
- * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
- * STORE p->on_cpu = 1 LOAD p->cpu
- *
- * to ensure we observe the correct CPU on which the task is currently
- * scheduling.
- */
- if (smp_load_acquire(&p->on_cpu) &&
- ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
- goto unlock;
+ /*
+ * If the owning (remote) CPU is still in the middle of schedule() with
+ * this task as prev, considering queueing p on the remote CPUs wake_list
+ * which potentially sends an IPI instead of spinning on p->on_cpu to
+ * let the waker make forward progress. This is safe because IRQs are
+ * disabled and the IPI will deliver after on_cpu is cleared.
+ *
+ * Ensure we load task_cpu(p) after p->on_cpu:
+ *
+ * set_task_cpu(p, cpu);
+ * STORE p->cpu = @cpu
+ * __schedule() (switch to task 'p')
+ * LOCK rq->lock
+ * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
+ * STORE p->on_cpu = 1 LOAD p->cpu
+ *
+ * to ensure we observe the correct CPU on which the task is currently
+ * scheduling.
+ */
+ if (smp_load_acquire(&p->on_cpu) &&
+ ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
+ break;
- /*
- * If the owning (remote) CPU is still in the middle of schedule() with
- * this task as prev, wait until it's done referencing the task.
- *
- * Pairs with the smp_store_release() in finish_task().
- *
- * This ensures that tasks getting woken will be fully ordered against
- * their previous state and preserve Program Order.
- */
- smp_cond_load_acquire(&p->on_cpu, !VAL);
+ /*
+ * If the owning (remote) CPU is still in the middle of schedule() with
+ * this task as prev, wait until it's done referencing the task.
+ *
+ * Pairs with the smp_store_release() in finish_task().
+ *
+ * This ensures that tasks getting woken will be fully ordered against
+ * their previous state and preserve Program Order.
+ */
+ smp_cond_load_acquire(&p->on_cpu, !VAL);
- cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
- if (task_cpu(p) != cpu) {
- if (p->in_iowait) {
- delayacct_blkio_end(p);
- atomic_dec(&task_rq(p)->nr_iowait);
- }
+ cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
+ if (task_cpu(p) != cpu) {
+ if (p->in_iowait) {
+ delayacct_blkio_end(p);
+ atomic_dec(&task_rq(p)->nr_iowait);
+ }
- wake_flags |= WF_MIGRATED;
- psi_ttwu_dequeue(p);
- set_task_cpu(p, cpu);
- }
+ wake_flags |= WF_MIGRATED;
+ psi_ttwu_dequeue(p);
+ set_task_cpu(p, cpu);
+ }
#else
- cpu = task_cpu(p);
+ cpu = task_cpu(p);
#endif /* CONFIG_SMP */
- ttwu_queue(p, cpu, wake_flags);
-unlock:
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ ttwu_queue(p, cpu, wake_flags);
+ }
out:
if (success)
ttwu_stat(p, task_cpu(p), wake_flags);
- preempt_enable();
return success;
}
@@ -4501,6 +4500,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
+ p->se.vlag = 0;
+ p->se.slice = sysctl_sched_base_slice;
INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -5496,23 +5497,20 @@ unsigned int nr_iowait(void)
void sched_exec(void)
{
struct task_struct *p = current;
- unsigned long flags;
+ struct migration_arg arg;
int dest_cpu;
- raw_spin_lock_irqsave(&p->pi_lock, flags);
- dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
- if (dest_cpu == smp_processor_id())
- goto unlock;
+ scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
+ dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
+ if (dest_cpu == smp_processor_id())
+ return;
- if (likely(cpu_active(dest_cpu))) {
- struct migration_arg arg = { p, dest_cpu };
+ if (unlikely(!cpu_active(dest_cpu)))
+ return;
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
- return;
+ arg = (struct migration_arg){ p, dest_cpu };
}
-unlock:
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
}
#endif
@@ -5722,9 +5720,6 @@ static void sched_tick_remote(struct work_struct *work)
struct tick_work *twork = container_of(dwork, struct tick_work, work);
int cpu = twork->cpu;
struct rq *rq = cpu_rq(cpu);
- struct task_struct *curr;
- struct rq_flags rf;
- u64 delta;
int os;
/*
@@ -5734,30 +5729,26 @@ static void sched_tick_remote(struct work_struct *work)
* statistics and checks timeslices in a time-independent way, regardless
* of when exactly it is running.
*/
- if (!tick_nohz_tick_stopped_cpu(cpu))
- goto out_requeue;
+ if (tick_nohz_tick_stopped_cpu(cpu)) {
+ guard(rq_lock_irq)(rq);
+ struct task_struct *curr = rq->curr;
- rq_lock_irq(rq, &rf);
- curr = rq->curr;
- if (cpu_is_offline(cpu))
- goto out_unlock;
+ if (cpu_online(cpu)) {
+ update_rq_clock(rq);
- update_rq_clock(rq);
+ if (!is_idle_task(curr)) {
+ /*
+ * Make sure the next tick runs within a
+ * reasonable amount of time.
+ */
+ u64 delta = rq_clock_task(rq) - curr->se.exec_start;
+ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+ }
+ curr->sched_class->task_tick(rq, curr, 0);
- if (!is_idle_task(curr)) {
- /*
- * Make sure the next tick runs within a reasonable
- * amount of time.
- */
- delta = rq_clock_task(rq) - curr->se.exec_start;
- WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+ calc_load_nohz_remote(rq);
+ }
}
- curr->sched_class->task_tick(rq, curr, 0);
-
- calc_load_nohz_remote(rq);
-out_unlock:
- rq_unlock_irq(rq, &rf);
-out_requeue:
/*
* Run the remote tick once per second (1Hz). This arbitrary
@@ -6306,19 +6297,19 @@ static bool try_steal_cookie(int this, int that)
unsigned long cookie;
bool success = false;
- local_irq_disable();
- double_rq_lock(dst, src);
+ guard(irq)();
+ guard(double_rq_lock)(dst, src);
cookie = dst->core->core_cookie;
if (!cookie)
- goto unlock;
+ return false;
if (dst->curr != dst->idle)
- goto unlock;
+ return false;
p = sched_core_find(src, cookie);
if (!p)
- goto unlock;
+ return false;
do {
if (p == src->core_pick || p == src->curr)
@@ -6330,9 +6321,10 @@ static bool try_steal_cookie(int this, int that)
if (p->core_occupation > dst->idle->core_occupation)
goto next;
/*
- * sched_core_find() and sched_core_next() will ensure that task @p
- * is not throttled now, we also need to check whether the runqueue
- * of the destination CPU is being throttled.
+ * sched_core_find() and sched_core_next() will ensure
+ * that task @p is not throttled now, we also need to
+ * check whether the runqueue of the destination CPU is
+ * being throttled.
*/
if (sched_task_is_throttled(p, this))
goto next;
@@ -6350,10 +6342,6 @@ next:
p = sched_core_next(p, cookie);
} while (p);
-unlock:
- double_rq_unlock(dst, src);
- local_irq_enable();
-
return success;
}
@@ -6411,20 +6399,24 @@ static void queue_core_balance(struct rq *rq)
queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
}
+DEFINE_LOCK_GUARD_1(core_lock, int,
+ sched_core_lock(*_T->lock, &_T->flags),
+ sched_core_unlock(*_T->lock, &_T->flags),
+ unsigned long flags)
+
static void sched_core_cpu_starting(unsigned int cpu)
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
- unsigned long flags;
int t;
- sched_core_lock(cpu, &flags);
+ guard(core_lock)(&cpu);
WARN_ON_ONCE(rq->core != rq);
/* if we're the first, we'll be our own leader */
if (cpumask_weight(smt_mask) == 1)
- goto unlock;
+ return;
/* find the leader */
for_each_cpu(t, smt_mask) {
@@ -6438,7 +6430,7 @@ static void sched_core_cpu_starting(unsigned int cpu)
}
if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
- goto unlock;
+ return;
/* install and validate core_rq */
for_each_cpu(t, smt_mask) {
@@ -6449,29 +6441,25 @@ static void sched_core_cpu_starting(unsigned int cpu)
WARN_ON_ONCE(rq->core != core_rq);
}
-
-unlock:
- sched_core_unlock(cpu, &flags);
}
static void sched_core_cpu_deactivate(unsigned int cpu)
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
- unsigned long flags;
int t;
- sched_core_lock(cpu, &flags);
+ guard(core_lock)(&cpu);
/* if we're the last man standing, nothing to do */
if (cpumask_weight(smt_mask) == 1) {
WARN_ON_ONCE(rq->core != rq);
- goto unlock;
+ return;
}
/* if we're not the leader, nothing to do */
if (rq->core != rq)
- goto unlock;
+ return;
/* find a new leader */
for_each_cpu(t, smt_mask) {
@@ -6482,7 +6470,7 @@ static void sched_core_cpu_deactivate(unsigned int cpu)
}
if (WARN_ON_ONCE(!core_rq)) /* impossible */
- goto unlock;
+ return;
/* copy the shared state to the new leader */
core_rq->core_task_seq = rq->core_task_seq;
@@ -6504,9 +6492,6 @@ static void sched_core_cpu_deactivate(unsigned int cpu)
rq = cpu_rq(t);
rq->core = core_rq;
}
-
-unlock:
- sched_core_unlock(cpu, &flags);
}
static inline void sched_core_cpu_dying(unsigned int cpu)
@@ -7030,7 +7015,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
void *key)
{
- WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU));
return try_to_wake_up(curr->private, mode, wake_flags);
}
EXPORT_SYMBOL(default_wake_function);
@@ -7383,6 +7368,19 @@ struct task_struct *idle_task(int cpu)
return cpu_rq(cpu)->idle;
}
+#ifdef CONFIG_SCHED_CORE
+int sched_core_idle_cpu(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (sched_core_enabled(rq) && rq->curr == rq->idle)
+ return 1;
+
+ return idle_cpu(cpu);
+}
+
+#endif
+
#ifdef CONFIG_SMP
/*
* This function computes an effective utilization for the given CPU, to be
@@ -9940,7 +9938,7 @@ void __init sched_init(void)
ptr += nr_cpu_ids * sizeof(void **);
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
- init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
+ init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL);
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
root_task_group.rt_se = (struct sched_rt_entity **)ptr;
@@ -11074,11 +11072,16 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
/*
* Ensure max(child_quota) <= parent_quota. On cgroup2,
- * always take the min. On cgroup1, only inherit when no
- * limit is set:
+ * always take the non-RUNTIME_INF min. On cgroup1, only
+ * inherit when no limit is set. In both cases this is used
+ * by the scheduler to determine if a given CFS task has a
+ * bandwidth constraint at some higher level.
*/
if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) {
- quota = min(quota, parent_quota);
+ if (quota == RUNTIME_INF)
+ quota = parent_quota;
+ else if (parent_quota != RUNTIME_INF)
+ quota = min(quota, parent_quota);
} else {
if (quota == RUNTIME_INF)
quota = parent_quota;
@@ -11139,6 +11142,27 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
return 0;
}
+
+static u64 throttled_time_self(struct task_group *tg)
+{
+ int i;
+ u64 total = 0;
+
+ for_each_possible_cpu(i) {
+ total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time);
+ }
+
+ return total;
+}
+
+static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v)
+{
+ struct task_group *tg = css_tg(seq_css(sf));
+
+ seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg));
+
+ return 0;
+}
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -11215,6 +11239,10 @@ static struct cftype cpu_legacy_files[] = {
.name = "stat",
.seq_show = cpu_cfs_stat_show,
},
+ {
+ .name = "stat.local",
+ .seq_show = cpu_cfs_local_stat_show,
+ },
#endif
#ifdef CONFIG_RT_GROUP_SCHED
{
@@ -11271,6 +11299,24 @@ static int cpu_extra_stat_show(struct seq_file *sf,
return 0;
}
+static int cpu_local_stat_show(struct seq_file *sf,
+ struct cgroup_subsys_state *css)
+{
+#ifdef CONFIG_CFS_BANDWIDTH
+ {
+ struct task_group *tg = css_tg(css);
+ u64 throttled_self_usec;
+
+ throttled_self_usec = throttled_time_self(tg);
+ do_div(throttled_self_usec, NSEC_PER_USEC);
+
+ seq_printf(sf, "throttled_usec %llu\n",
+ throttled_self_usec);
+ }
+#endif
+ return 0;
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
@@ -11449,6 +11495,7 @@ struct cgroup_subsys cpu_cgrp_subsys = {
.css_released = cpu_cgroup_css_released,
.css_free = cpu_cgroup_css_free,
.css_extra_stat_show = cpu_extra_stat_show,
+ .css_local_stat_show = cpu_local_stat_show,
#ifdef CONFIG_RT_GROUP_SCHED
.can_attach = cpu_cgroup_can_attach,
#endif
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 066ff1c8ae4e..4c3d0d9f3db6 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -347,10 +347,7 @@ static __init int sched_init_debug(void)
debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
#endif
- debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
- debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
- debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
- debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
+ debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
@@ -427,6 +424,7 @@ static void register_sd(struct sched_domain *sd, struct dentry *parent)
#undef SDM
debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
+ debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops);
}
void update_sched_domain_debugfs(void)
@@ -581,9 +579,13 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
else
SEQ_printf(m, " %c", task_state_to_char(p));
- SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
+ SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
p->comm, task_pid_nr(p),
SPLIT_NS(p->se.vruntime),
+ entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
+ SPLIT_NS(p->se.deadline),
+ SPLIT_NS(p->se.slice),
+ SPLIT_NS(p->se.sum_exec_runtime),
(long long)(p->nvcsw + p->nivcsw),
p->prio);
@@ -626,10 +628,9 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
- s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
- spread, rq0_min_vruntime, spread0;
+ s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, spread;
+ struct sched_entity *last, *first;
struct rq *rq = cpu_rq(cpu);
- struct sched_entity *last;
unsigned long flags;
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -643,26 +644,25 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SPLIT_NS(cfs_rq->exec_clock));
raw_spin_rq_lock_irqsave(rq, flags);
- if (rb_first_cached(&cfs_rq->tasks_timeline))
- MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
+ first = __pick_first_entity(cfs_rq);
+ if (first)
+ left_vruntime = first->vruntime;
last = __pick_last_entity(cfs_rq);
if (last)
- max_vruntime = last->vruntime;
+ right_vruntime = last->vruntime;
min_vruntime = cfs_rq->min_vruntime;
- rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
raw_spin_rq_unlock_irqrestore(rq, flags);
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
- SPLIT_NS(MIN_vruntime));
+
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_vruntime",
+ SPLIT_NS(left_vruntime));
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
SPLIT_NS(min_vruntime));
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
- SPLIT_NS(max_vruntime));
- spread = max_vruntime - MIN_vruntime;
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
- SPLIT_NS(spread));
- spread0 = min_vruntime - rq0_min_vruntime;
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
- SPLIT_NS(spread0));
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime",
+ SPLIT_NS(avg_vruntime(cfs_rq)));
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime",
+ SPLIT_NS(right_vruntime));
+ spread = right_vruntime - left_vruntime;
+ SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
@@ -863,10 +863,7 @@ static void sched_debug_header(struct seq_file *m)
SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
- PN(sysctl_sched_latency);
- PN(sysctl_sched_min_granularity);
- PN(sysctl_sched_idle_min_granularity);
- PN(sysctl_sched_wakeup_granularity);
+ PN(sysctl_sched_base_slice);
P(sysctl_sched_child_runs_first);
P(sysctl_sched_features);
#undef PN
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b3e25be58e2b..911d0063763c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -47,6 +47,7 @@
#include <linux/psi.h>
#include <linux/ratelimit.h>
#include <linux/task_work.h>
+#include <linux/rbtree_augmented.h>
#include <asm/switch_to.h>
@@ -57,22 +58,6 @@
#include "autogroup.h"
/*
- * Targeted preemption latency for CPU-bound tasks:
- *
- * NOTE: this latency value is not the same as the concept of
- * 'timeslice length' - timeslices in CFS are of variable length
- * and have no persistent notion like in traditional, time-slice
- * based scheduling concepts.
- *
- * (to see the precise effective timeslice length of your workload,
- * run vmstat and monitor the context-switches (cs) field)
- *
- * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
- */
-unsigned int sysctl_sched_latency = 6000000ULL;
-static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
-
-/*
* The initial- and re-scaling of tunables is configurable
*
* Options are:
@@ -90,21 +75,8 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
*
* (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_min_granularity = 750000ULL;
-static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
-
-/*
- * Minimal preemption granularity for CPU-bound SCHED_IDLE tasks.
- * Applies only when SCHED_IDLE tasks compete with normal tasks.
- *
- * (default: 0.75 msec)
- */
-unsigned int sysctl_sched_idle_min_granularity = 750000ULL;
-
-/*
- * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
- */
-static unsigned int sched_nr_latency = 8;
+unsigned int sysctl_sched_base_slice = 750000ULL;
+static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
/*
* After fork, child runs first. If set to 0 (default) then
@@ -112,18 +84,6 @@ static unsigned int sched_nr_latency = 8;
*/
unsigned int sysctl_sched_child_runs_first __read_mostly;
-/*
- * SCHED_OTHER wake-up granularity.
- *
- * This option delays the preemption effects of decoupled workloads
- * and reduces their over-scheduling. Synchronous workloads will still
- * have immediate wakeup/sleep latencies.
- *
- * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
- */
-unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
-static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
-
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
int sched_thermal_decay_shift;
@@ -277,9 +237,7 @@ static void update_sysctl(void)
#define SET_SYSCTL(name) \
(sysctl_##name = (factor) * normalized_sysctl_##name)
- SET_SYSCTL(sched_min_granularity);
- SET_SYSCTL(sched_latency);
- SET_SYSCTL(sched_wakeup_granularity);
+ SET_SYSCTL(sched_base_slice);
#undef SET_SYSCTL
}
@@ -347,6 +305,16 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
return mul_u64_u32_shr(delta_exec, fact, shift);
}
+/*
+ * delta /= w
+ */
+static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
+{
+ if (unlikely(se->load.weight != NICE_0_LOAD))
+ delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
+
+ return delta;
+}
const struct sched_class fair_sched_class;
@@ -601,13 +569,198 @@ static inline bool entity_before(const struct sched_entity *a,
return (s64)(a->vruntime - b->vruntime) < 0;
}
+static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ return (s64)(se->vruntime - cfs_rq->min_vruntime);
+}
+
#define __node_2_se(node) \
rb_entry((node), struct sched_entity, run_node)
+/*
+ * Compute virtual time from the per-task service numbers:
+ *
+ * Fair schedulers conserve lag:
+ *
+ * \Sum lag_i = 0
+ *
+ * Where lag_i is given by:
+ *
+ * lag_i = S - s_i = w_i * (V - v_i)
+ *
+ * Where S is the ideal service time and V is it's virtual time counterpart.
+ * Therefore:
+ *
+ * \Sum lag_i = 0
+ * \Sum w_i * (V - v_i) = 0
+ * \Sum w_i * V - w_i * v_i = 0
+ *
+ * From which we can solve an expression for V in v_i (which we have in
+ * se->vruntime):
+ *
+ * \Sum v_i * w_i \Sum v_i * w_i
+ * V = -------------- = --------------
+ * \Sum w_i W
+ *
+ * Specifically, this is the weighted average of all entity virtual runtimes.
+ *
+ * [[ NOTE: this is only equal to the ideal scheduler under the condition
+ * that join/leave operations happen at lag_i = 0, otherwise the
+ * virtual time has non-continguous motion equivalent to:
+ *
+ * V +-= lag_i / W
+ *
+ * Also see the comment in place_entity() that deals with this. ]]
+ *
+ * However, since v_i is u64, and the multiplcation could easily overflow
+ * transform it into a relative form that uses smaller quantities:
+ *
+ * Substitute: v_i == (v_i - v0) + v0
+ *
+ * \Sum ((v_i - v0) + v0) * w_i \Sum (v_i - v0) * w_i
+ * V = ---------------------------- = --------------------- + v0
+ * W W
+ *
+ * Which we track using:
+ *
+ * v0 := cfs_rq->min_vruntime
+ * \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
+ * \Sum w_i := cfs_rq->avg_load
+ *
+ * Since min_vruntime is a monotonic increasing variable that closely tracks
+ * the per-task service, these deltas: (v_i - v), will be in the order of the
+ * maximal (virtual) lag induced in the system due to quantisation.
+ *
+ * Also, we use scale_load_down() to reduce the size.
+ *
+ * As measured, the max (key * weight) value was ~44 bits for a kernel build.
+ */
+static void
+avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ unsigned long weight = scale_load_down(se->load.weight);
+ s64 key = entity_key(cfs_rq, se);
+
+ cfs_rq->avg_vruntime += key * weight;
+ cfs_rq->avg_load += weight;
+}
+
+static void
+avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ unsigned long weight = scale_load_down(se->load.weight);
+ s64 key = entity_key(cfs_rq, se);
+
+ cfs_rq->avg_vruntime -= key * weight;
+ cfs_rq->avg_load -= weight;
+}
+
+static inline
+void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
+{
+ /*
+ * v' = v + d ==> avg_vruntime' = avg_runtime - d*avg_load
+ */
+ cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
+}
+
+u64 avg_vruntime(struct cfs_rq *cfs_rq)
+{
+ struct sched_entity *curr = cfs_rq->curr;
+ s64 avg = cfs_rq->avg_vruntime;
+ long load = cfs_rq->avg_load;
+
+ if (curr && curr->on_rq) {
+ unsigned long weight = scale_load_down(curr->load.weight);
+
+ avg += entity_key(cfs_rq, curr) * weight;
+ load += weight;
+ }
+
+ if (load)
+ avg = div_s64(avg, load);
+
+ return cfs_rq->min_vruntime + avg;
+}
+
+/*
+ * lag_i = S - s_i = w_i * (V - v_i)
+ *
+ * However, since V is approximated by the weighted average of all entities it
+ * is possible -- by addition/removal/reweight to the tree -- to move V around
+ * and end up with a larger lag than we started with.
+ *
+ * Limit this to either double the slice length with a minimum of TICK_NSEC
+ * since that is the timing granularity.
+ *
+ * EEVDF gives the following limit for a steady state system:
+ *
+ * -r_max < lag < max(r_max, q)
+ *
+ * XXX could add max_slice to the augmented data to track this.
+ */
+void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ s64 lag, limit;
+
+ SCHED_WARN_ON(!se->on_rq);
+ lag = avg_vruntime(cfs_rq) - se->vruntime;
+
+ limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+ se->vlag = clamp(lag, -limit, limit);
+}
+
+/*
+ * Entity is eligible once it received less service than it ought to have,
+ * eg. lag >= 0.
+ *
+ * lag_i = S - s_i = w_i*(V - v_i)
+ *
+ * lag_i >= 0 -> V >= v_i
+ *
+ * \Sum (v_i - v)*w_i
+ * V = ------------------ + v
+ * \Sum w_i
+ *
+ * lag_i >= 0 -> \Sum (v_i - v)*w_i >= (v_i - v)*(\Sum w_i)
+ *
+ * Note: using 'avg_vruntime() > se->vruntime' is inacurate due
+ * to the loss in precision caused by the division.
+ */
+int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ struct sched_entity *curr = cfs_rq->curr;
+ s64 avg = cfs_rq->avg_vruntime;
+ long load = cfs_rq->avg_load;
+
+ if (curr && curr->on_rq) {
+ unsigned long weight = scale_load_down(curr->load.weight);
+
+ avg += entity_key(cfs_rq, curr) * weight;
+ load += weight;
+ }
+
+ return avg >= entity_key(cfs_rq, se) * load;
+}
+
+static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime)
+{
+ u64 min_vruntime = cfs_rq->min_vruntime;
+ /*
+ * open coded max_vruntime() to allow updating avg_vruntime
+ */
+ s64 delta = (s64)(vruntime - min_vruntime);
+ if (delta > 0) {
+ avg_vruntime_update(cfs_rq, delta);
+ min_vruntime = vruntime;
+ }
+ return min_vruntime;
+}
+
static void update_min_vruntime(struct cfs_rq *cfs_rq)
{
+ struct sched_entity *se = __pick_first_entity(cfs_rq);
struct sched_entity *curr = cfs_rq->curr;
- struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
u64 vruntime = cfs_rq->min_vruntime;
@@ -618,9 +771,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
curr = NULL;
}
- if (leftmost) { /* non-empty tree */
- struct sched_entity *se = __node_2_se(leftmost);
-
+ if (se) {
if (!curr)
vruntime = se->vruntime;
else
@@ -629,7 +780,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
/* ensure we never gain time by being placed backwards. */
u64_u32_store(cfs_rq->min_vruntime,
- max_vruntime(cfs_rq->min_vruntime, vruntime));
+ __update_min_vruntime(cfs_rq, vruntime));
}
static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
@@ -637,17 +788,51 @@ static inline bool __entity_less(struct rb_node *a, const struct rb_node *b)
return entity_before(__node_2_se(a), __node_2_se(b));
}
+#define deadline_gt(field, lse, rse) ({ (s64)((lse)->field - (rse)->field) > 0; })
+
+static inline void __update_min_deadline(struct sched_entity *se, struct rb_node *node)
+{
+ if (node) {
+ struct sched_entity *rse = __node_2_se(node);
+ if (deadline_gt(min_deadline, se, rse))
+ se->min_deadline = rse->min_deadline;
+ }
+}
+
+/*
+ * se->min_deadline = min(se->deadline, left->min_deadline, right->min_deadline)
+ */
+static inline bool min_deadline_update(struct sched_entity *se, bool exit)
+{
+ u64 old_min_deadline = se->min_deadline;
+ struct rb_node *node = &se->run_node;
+
+ se->min_deadline = se->deadline;
+ __update_min_deadline(se, node->rb_right);
+ __update_min_deadline(se, node->rb_left);
+
+ return se->min_deadline == old_min_deadline;
+}
+
+RB_DECLARE_CALLBACKS(static, min_deadline_cb, struct sched_entity,
+ run_node, min_deadline, min_deadline_update);
+
/*
* Enqueue an entity into the rb-tree:
*/
static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less);
+ avg_vruntime_add(cfs_rq, se);
+ se->min_deadline = se->deadline;
+ rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
+ __entity_less, &min_deadline_cb);
}
static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
+ rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
+ &min_deadline_cb);
+ avg_vruntime_sub(cfs_rq, se);
}
struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
@@ -660,14 +845,88 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
return __node_2_se(left);
}
-static struct sched_entity *__pick_next_entity(struct sched_entity *se)
+/*
+ * Earliest Eligible Virtual Deadline First
+ *
+ * In order to provide latency guarantees for different request sizes
+ * EEVDF selects the best runnable task from two criteria:
+ *
+ * 1) the task must be eligible (must be owed service)
+ *
+ * 2) from those tasks that meet 1), we select the one
+ * with the earliest virtual deadline.
+ *
+ * We can do this in O(log n) time due to an augmented RB-tree. The
+ * tree keeps the entries sorted on service, but also functions as a
+ * heap based on the deadline by keeping:
+ *
+ * se->min_deadline = min(se->deadline, se->{left,right}->min_deadline)
+ *
+ * Which allows an EDF like search on (sub)trees.
+ */
+static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
{
- struct rb_node *next = rb_next(&se->run_node);
+ struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
+ struct sched_entity *curr = cfs_rq->curr;
+ struct sched_entity *best = NULL;
- if (!next)
- return NULL;
+ if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
+ curr = NULL;
+
+ /*
+ * Once selected, run a task until it either becomes non-eligible or
+ * until it gets a new slice. See the HACK in set_next_entity().
+ */
+ if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
+ return curr;
+
+ while (node) {
+ struct sched_entity *se = __node_2_se(node);
+
+ /*
+ * If this entity is not eligible, try the left subtree.
+ */
+ if (!entity_eligible(cfs_rq, se)) {
+ node = node->rb_left;
+ continue;
+ }
+
+ /*
+ * If this entity has an earlier deadline than the previous
+ * best, take this one. If it also has the earliest deadline
+ * of its subtree, we're done.
+ */
+ if (!best || deadline_gt(deadline, best, se)) {
+ best = se;
+ if (best->deadline == best->min_deadline)
+ break;
+ }
- return __node_2_se(next);
+ /*
+ * If the earlest deadline in this subtree is in the fully
+ * eligible left half of our space, go there.
+ */
+ if (node->rb_left &&
+ __node_2_se(node->rb_left)->min_deadline == se->min_deadline) {
+ node = node->rb_left;
+ continue;
+ }
+
+ node = node->rb_right;
+ }
+
+ if (!best || (curr && deadline_gt(deadline, best, curr)))
+ best = curr;
+
+ if (unlikely(!best)) {
+ struct sched_entity *left = __pick_first_entity(cfs_rq);
+ if (left) {
+ pr_err("EEVDF scheduling fail, picking leftmost\n");
+ return left;
+ }
+ }
+
+ return best;
}
#ifdef CONFIG_SCHED_DEBUG
@@ -684,109 +943,51 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
/**************************************************************
* Scheduling class statistics methods:
*/
-
+#ifdef CONFIG_SMP
int sched_update_scaling(void)
{
unsigned int factor = get_update_sysctl_factor();
- sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
- sysctl_sched_min_granularity);
-
#define WRT_SYSCTL(name) \
(normalized_sysctl_##name = sysctl_##name / (factor))
- WRT_SYSCTL(sched_min_granularity);
- WRT_SYSCTL(sched_latency);
- WRT_SYSCTL(sched_wakeup_granularity);
+ WRT_SYSCTL(sched_base_slice);
#undef WRT_SYSCTL
return 0;
}
#endif
+#endif
-/*
- * delta /= w
- */
-static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
-{
- if (unlikely(se->load.weight != NICE_0_LOAD))
- delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
-
- return delta;
-}
-
-/*
- * The idea is to set a period in which each task runs once.
- *
- * When there are too many tasks (sched_nr_latency) we have to stretch
- * this period because otherwise the slices get too small.
- *
- * p = (nr <= nl) ? l : l*nr/nl
- */
-static u64 __sched_period(unsigned long nr_running)
-{
- if (unlikely(nr_running > sched_nr_latency))
- return nr_running * sysctl_sched_min_granularity;
- else
- return sysctl_sched_latency;
-}
-
-static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq);
+static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
/*
- * We calculate the wall-time slice from the period by taking a part
- * proportional to the weight.
- *
- * s = p*P[w/rw]
+ * XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i
+ * this is probably good enough.
*/
-static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- unsigned int nr_running = cfs_rq->nr_running;
- struct sched_entity *init_se = se;
- unsigned int min_gran;
- u64 slice;
-
- if (sched_feat(ALT_PERIOD))
- nr_running = rq_of(cfs_rq)->cfs.h_nr_running;
-
- slice = __sched_period(nr_running + !se->on_rq);
-
- for_each_sched_entity(se) {
- struct load_weight *load;
- struct load_weight lw;
- struct cfs_rq *qcfs_rq;
-
- qcfs_rq = cfs_rq_of(se);
- load = &qcfs_rq->load;
-
- if (unlikely(!se->on_rq)) {
- lw = qcfs_rq->load;
+ if ((s64)(se->vruntime - se->deadline) < 0)
+ return;
- update_load_add(&lw, se->load.weight);
- load = &lw;
- }
- slice = __calc_delta(slice, se->load.weight, load);
- }
+ /*
+ * For EEVDF the virtual time slope is determined by w_i (iow.
+ * nice) while the request time r_i is determined by
+ * sysctl_sched_base_slice.
+ */
+ se->slice = sysctl_sched_base_slice;
- if (sched_feat(BASE_SLICE)) {
- if (se_is_idle(init_se) && !sched_idle_cfs_rq(cfs_rq))
- min_gran = sysctl_sched_idle_min_granularity;
- else
- min_gran = sysctl_sched_min_granularity;
+ /*
+ * EEVDF: vd_i = ve_i + r_i / w_i
+ */
+ se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
- slice = max_t(u64, slice, min_gran);
+ /*
+ * The task has consumed its request, reschedule.
+ */
+ if (cfs_rq->nr_running > 1) {
+ resched_curr(rq_of(cfs_rq));
+ clear_buddies(cfs_rq, se);
}
-
- return slice;
-}
-
-/*
- * We calculate the vruntime slice of a to-be-inserted task.
- *
- * vs = s/w
- */
-static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
#include "pelt.h"
@@ -921,6 +1122,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
schedstat_add(cfs_rq->exec_clock, delta_exec);
curr->vruntime += calc_delta_fair(delta_exec, curr);
+ update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
if (entity_is_task(curr)) {
@@ -3375,16 +3577,36 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight)
{
+ unsigned long old_weight = se->load.weight;
+
if (se->on_rq) {
/* commit outstanding execution time */
if (cfs_rq->curr == se)
update_curr(cfs_rq);
+ else
+ avg_vruntime_sub(cfs_rq, se);
update_load_sub(&cfs_rq->load, se->load.weight);
}
dequeue_load_avg(cfs_rq, se);
update_load_set(&se->load, weight);
+ if (!se->on_rq) {
+ /*
+ * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
+ * we need to scale se->vlag when w_i changes.
+ */
+ se->vlag = div_s64(se->vlag * old_weight, weight);
+ } else {
+ s64 deadline = se->deadline - se->vruntime;
+ /*
+ * When the weight changes, the virtual time slope changes and
+ * we should adjust the relative virtual deadline accordingly.
+ */
+ deadline = div_s64(deadline * old_weight, weight);
+ se->deadline = se->vruntime + deadline;
+ }
+
#ifdef CONFIG_SMP
do {
u32 divider = get_pelt_divider(&se->avg);
@@ -3394,9 +3616,11 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
#endif
enqueue_load_avg(cfs_rq, se);
- if (se->on_rq)
+ if (se->on_rq) {
update_load_add(&cfs_rq->load, se->load.weight);
-
+ if (cfs_rq->curr != se)
+ avg_vruntime_add(cfs_rq, se);
+ }
}
void reweight_task(struct task_struct *p, int prio)
@@ -4692,159 +4916,125 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
#endif /* CONFIG_SMP */
-static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-#ifdef CONFIG_SCHED_DEBUG
- s64 d = se->vruntime - cfs_rq->min_vruntime;
-
- if (d < 0)
- d = -d;
-
- if (d > 3*sysctl_sched_latency)
- schedstat_inc(cfs_rq->nr_spread_over);
-#endif
-}
-
-static inline bool entity_is_long_sleeper(struct sched_entity *se)
+static void
+place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- struct cfs_rq *cfs_rq;
- u64 sleep_time;
+ u64 vslice = calc_delta_fair(se->slice, se);
+ u64 vruntime = avg_vruntime(cfs_rq);
+ s64 lag = 0;
- if (se->exec_start == 0)
- return false;
-
- cfs_rq = cfs_rq_of(se);
-
- sleep_time = rq_clock_task(rq_of(cfs_rq));
+ /*
+ * Due to how V is constructed as the weighted average of entities,
+ * adding tasks with positive lag, or removing tasks with negative lag
+ * will move 'time' backwards, this can screw around with the lag of
+ * other tasks.
+ *
+ * EEVDF: placement strategy #1 / #2
+ */
+ if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
+ struct sched_entity *curr = cfs_rq->curr;
+ unsigned long load;
- /* Happen while migrating because of clock task divergence */
- if (sleep_time <= se->exec_start)
- return false;
+ lag = se->vlag;
- sleep_time -= se->exec_start;
- if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
- return true;
+ /*
+ * If we want to place a task and preserve lag, we have to
+ * consider the effect of the new entity on the weighted
+ * average and compensate for this, otherwise lag can quickly
+ * evaporate.
+ *
+ * Lag is defined as:
+ *
+ * lag_i = S - s_i = w_i * (V - v_i)
+ *
+ * To avoid the 'w_i' term all over the place, we only track
+ * the virtual lag:
+ *
+ * vl_i = V - v_i <=> v_i = V - vl_i
+ *
+ * And we take V to be the weighted average of all v:
+ *
+ * V = (\Sum w_j*v_j) / W
+ *
+ * Where W is: \Sum w_j
+ *
+ * Then, the weighted average after adding an entity with lag
+ * vl_i is given by:
+ *
+ * V' = (\Sum w_j*v_j + w_i*v_i) / (W + w_i)
+ * = (W*V + w_i*(V - vl_i)) / (W + w_i)
+ * = (W*V + w_i*V - w_i*vl_i) / (W + w_i)
+ * = (V*(W + w_i) - w_i*l) / (W + w_i)
+ * = V - w_i*vl_i / (W + w_i)
+ *
+ * And the actual lag after adding an entity with vl_i is:
+ *
+ * vl'_i = V' - v_i
+ * = V - w_i*vl_i / (W + w_i) - (V - vl_i)
+ * = vl_i - w_i*vl_i / (W + w_i)
+ *
+ * Which is strictly less than vl_i. So in order to preserve lag
+ * we should inflate the lag before placement such that the
+ * effective lag after placement comes out right.
+ *
+ * As such, invert the above relation for vl'_i to get the vl_i
+ * we need to use such that the lag after placement is the lag
+ * we computed before dequeue.
+ *
+ * vl'_i = vl_i - w_i*vl_i / (W + w_i)
+ * = ((W + w_i)*vl_i - w_i*vl_i) / (W + w_i)
+ *
+ * (W + w_i)*vl'_i = (W + w_i)*vl_i - w_i*vl_i
+ * = W*vl_i
+ *
+ * vl_i = (W + w_i)*vl'_i / W
+ */
+ load = cfs_rq->avg_load;
+ if (curr && curr->on_rq)
+ load += scale_load_down(curr->load.weight);
- return false;
-}
+ lag *= load + scale_load_down(se->load.weight);
+ if (WARN_ON_ONCE(!load))
+ load = 1;
+ lag = div_s64(lag, load);
+ }
-static void
-place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
-{
- u64 vruntime = cfs_rq->min_vruntime;
+ se->vruntime = vruntime - lag;
/*
- * The 'current' period is already promised to the current tasks,
- * however the extra weight of the new task will slow them down a
- * little, place the new task so that it fits in the slot that
- * stays open at the end.
+ * When joining the competition; the exisiting tasks will be,
+ * on average, halfway through their slice, as such start tasks
+ * off with half a slice to ease into the competition.
*/
- if (initial && sched_feat(START_DEBIT))
- vruntime += sched_vslice(cfs_rq, se);
-
- /* sleeps up to a single latency don't count. */
- if (!initial) {
- unsigned long thresh;
+ if (sched_feat(PLACE_DEADLINE_INITIAL) && (flags & ENQUEUE_INITIAL))
+ vslice /= 2;
- if (se_is_idle(se))
- thresh = sysctl_sched_min_granularity;
- else
- thresh = sysctl_sched_latency;
-
- /*
- * Halve their sleep time's effect, to allow
- * for a gentler effect of sleepers:
- */
- if (sched_feat(GENTLE_FAIR_SLEEPERS))
- thresh >>= 1;
-
- vruntime -= thresh;
- }
-
- /*
- * Pull vruntime of the entity being placed to the base level of
- * cfs_rq, to prevent boosting it if placed backwards.
- * However, min_vruntime can advance much faster than real time, with
- * the extreme being when an entity with the minimal weight always runs
- * on the cfs_rq. If the waking entity slept for a long time, its
- * vruntime difference from min_vruntime may overflow s64 and their
- * comparison may get inversed, so ignore the entity's original
- * vruntime in that case.
- * The maximal vruntime speedup is given by the ratio of normal to
- * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
- * When placing a migrated waking entity, its exec_start has been set
- * from a different rq. In order to take into account a possible
- * divergence between new and prev rq's clocks task because of irq and
- * stolen time, we take an additional margin.
- * So, cutting off on the sleep time of
- * 2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
- * should be safe.
- */
- if (entity_is_long_sleeper(se))
- se->vruntime = vruntime;
- else
- se->vruntime = max_vruntime(se->vruntime, vruntime);
+ /*
+ * EEVDF: vd_i = ve_i + r_i/w_i
+ */
+ se->deadline = se->vruntime + vslice;
}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
static inline bool cfs_bandwidth_used(void);
-/*
- * MIGRATION
- *
- * dequeue
- * update_curr()
- * update_min_vruntime()
- * vruntime -= min_vruntime
- *
- * enqueue
- * update_curr()
- * update_min_vruntime()
- * vruntime += min_vruntime
- *
- * this way the vruntime transition between RQs is done when both
- * min_vruntime are up-to-date.
- *
- * WAKEUP (remote)
- *
- * ->migrate_task_rq_fair() (p->state == TASK_WAKING)
- * vruntime -= min_vruntime
- *
- * enqueue
- * update_curr()
- * update_min_vruntime()
- * vruntime += min_vruntime
- *
- * this way we don't have the most up-to-date min_vruntime on the originating
- * CPU and an up-to-date min_vruntime on the destination CPU.
- */
-
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
bool curr = cfs_rq->curr == se;
/*
* If we're the current task, we must renormalise before calling
* update_curr().
*/
- if (renorm && curr)
- se->vruntime += cfs_rq->min_vruntime;
+ if (curr)
+ place_entity(cfs_rq, se, flags);
update_curr(cfs_rq);
/*
- * Otherwise, renormalise after, such that we're placed at the current
- * moment in time, instead of some random moment in the past. Being
- * placed in the past could significantly boost this task to the
- * fairness detriment of existing tasks.
- */
- if (renorm && !curr)
- se->vruntime += cfs_rq->min_vruntime;
-
- /*
* When enqueuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
* - For group_entity, update its runnable_weight to reflect the new
@@ -4855,37 +5045,46 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
se_update_runnable(se);
+ /*
+ * XXX update_load_avg() above will have attached us to the pelt sum;
+ * but update_cfs_group() here will re-adjust the weight and have to
+ * undo/redo all that. Seems wasteful.
+ */
update_cfs_group(se);
+
+ /*
+ * XXX now that the entity has been re-weighted, and it's lag adjusted,
+ * we can place the entity.
+ */
+ if (!curr)
+ place_entity(cfs_rq, se, flags);
+
account_entity_enqueue(cfs_rq, se);
- if (flags & ENQUEUE_WAKEUP)
- place_entity(cfs_rq, se, 0);
/* Entity has migrated, no longer consider this task hot */
if (flags & ENQUEUE_MIGRATED)
se->exec_start = 0;
check_schedstat_required();
update_stats_enqueue_fair(cfs_rq, se, flags);
- check_spread(cfs_rq, se);
if (!curr)
__enqueue_entity(cfs_rq, se);
se->on_rq = 1;
if (cfs_rq->nr_running == 1) {
check_enqueue_throttle(cfs_rq);
- if (!throttled_hierarchy(cfs_rq))
+ if (!throttled_hierarchy(cfs_rq)) {
list_add_leaf_cfs_rq(cfs_rq);
- }
-}
-
-static void __clear_buddies_last(struct sched_entity *se)
-{
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq->last != se)
- break;
+ } else {
+#ifdef CONFIG_CFS_BANDWIDTH
+ struct rq *rq = rq_of(cfs_rq);
- cfs_rq->last = NULL;
+ if (cfs_rq_throttled(cfs_rq) && !cfs_rq->throttled_clock)
+ cfs_rq->throttled_clock = rq_clock(rq);
+ if (!cfs_rq->throttled_clock_self)
+ cfs_rq->throttled_clock_self = rq_clock(rq);
+#endif
+ }
}
}
@@ -4900,27 +5099,10 @@ static void __clear_buddies_next(struct sched_entity *se)
}
}
-static void __clear_buddies_skip(struct sched_entity *se)
-{
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq->skip != se)
- break;
-
- cfs_rq->skip = NULL;
- }
-}
-
static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- if (cfs_rq->last == se)
- __clear_buddies_last(se);
-
if (cfs_rq->next == se)
__clear_buddies_next(se);
-
- if (cfs_rq->skip == se)
- __clear_buddies_skip(se);
}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
@@ -4954,20 +5136,12 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
clear_buddies(cfs_rq, se);
+ update_entity_lag(cfs_rq, se);
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);
- /*
- * Normalize after update_curr(); which will also have moved
- * min_vruntime if @se is the one holding it back. But before doing
- * update_min_vruntime() again, which will discount @se's position and
- * can move min_vruntime forward still more.
- */
- if (!(flags & DEQUEUE_SLEEP))
- se->vruntime -= cfs_rq->min_vruntime;
-
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);
@@ -4986,52 +5160,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_idle_cfs_rq_clock_pelt(cfs_rq);
}
-/*
- * Preempt the current task with a newly woken task if needed:
- */
-static void
-check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
-{
- unsigned long ideal_runtime, delta_exec;
- struct sched_entity *se;
- s64 delta;
-
- /*
- * When many tasks blow up the sched_period; it is possible that
- * sched_slice() reports unusually large results (when many tasks are
- * very light for example). Therefore impose a maximum.
- */
- ideal_runtime = min_t(u64, sched_slice(cfs_rq, curr), sysctl_sched_latency);
-
- delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
- if (delta_exec > ideal_runtime) {
- resched_curr(rq_of(cfs_rq));
- /*
- * The current task ran long enough, ensure it doesn't get
- * re-elected due to buddy favours.
- */
- clear_buddies(cfs_rq, curr);
- return;
- }
-
- /*
- * Ensure that a task that missed wakeup preemption by a
- * narrow margin doesn't have to wait for a full slice.
- * This also mitigates buddy induced latencies under load.
- */
- if (delta_exec < sysctl_sched_min_granularity)
- return;
-
- se = __pick_first_entity(cfs_rq);
- delta = curr->vruntime - se->vruntime;
-
- if (delta < 0)
- return;
-
- if (delta > ideal_runtime)
- resched_curr(rq_of(cfs_rq));
-}
-
static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
@@ -5047,6 +5175,11 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_stats_wait_end_fair(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
update_load_avg(cfs_rq, se, UPDATE_TG);
+ /*
+ * HACK, stash a copy of deadline at the point of pick in vlag,
+ * which isn't used until dequeue.
+ */
+ se->vlag = se->deadline;
}
update_stats_curr_start(cfs_rq, se);
@@ -5070,9 +5203,6 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
-static int
-wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
-
/*
* Pick the next process, keeping these things in mind, in this order:
* 1) keep things fair between processes/task groups
@@ -5083,50 +5213,14 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
static struct sched_entity *
pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
- struct sched_entity *left = __pick_first_entity(cfs_rq);
- struct sched_entity *se;
-
/*
- * If curr is set we have to see if its left of the leftmost entity
- * still in the tree, provided there was anything in the tree at all.
+ * Enabling NEXT_BUDDY will affect latency but not fairness.
*/
- if (!left || (curr && entity_before(curr, left)))
- left = curr;
-
- se = left; /* ideally we run the leftmost entity */
-
- /*
- * Avoid running the skip buddy, if running something else can
- * be done without getting too unfair.
- */
- if (cfs_rq->skip && cfs_rq->skip == se) {
- struct sched_entity *second;
-
- if (se == curr) {
- second = __pick_first_entity(cfs_rq);
- } else {
- second = __pick_next_entity(se);
- if (!second || (curr && entity_before(curr, second)))
- second = curr;
- }
-
- if (second && wakeup_preempt_entity(second, left) < 1)
- se = second;
- }
+ if (sched_feat(NEXT_BUDDY) &&
+ cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
+ return cfs_rq->next;
- if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) {
- /*
- * Someone really wants this to run. If it's not unfair, run it.
- */
- se = cfs_rq->next;
- } else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) {
- /*
- * Prefer last buddy, try to return the CPU to a preempted task.
- */
- se = cfs_rq->last;
- }
-
- return se;
+ return pick_eevdf(cfs_rq);
}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
@@ -5143,8 +5237,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
/* throttle cfs_rqs exceeding runtime */
check_cfs_rq_runtime(cfs_rq);
- check_spread(cfs_rq, prev);
-
if (prev->on_rq) {
update_stats_wait_start_fair(cfs_rq, prev);
/* Put 'current' back into the tree. */
@@ -5185,9 +5277,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
return;
#endif
-
- if (cfs_rq->nr_running > 1)
- check_preempt_tick(cfs_rq, curr);
}
@@ -5377,6 +5466,17 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
/* Add cfs_rq with load or one or more already running entities to the list */
if (!cfs_rq_is_decayed(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
+
+ if (cfs_rq->throttled_clock_self) {
+ u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self;
+
+ cfs_rq->throttled_clock_self = 0;
+
+ if (SCHED_WARN_ON((s64)delta < 0))
+ delta = 0;
+
+ cfs_rq->throttled_clock_self_time += delta;
+ }
}
return 0;
@@ -5391,6 +5491,10 @@ static int tg_throttle_down(struct task_group *tg, void *data)
if (!cfs_rq->throttle_count) {
cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
list_del_leaf_cfs_rq(cfs_rq);
+
+ SCHED_WARN_ON(cfs_rq->throttled_clock_self);
+ if (cfs_rq->nr_running)
+ cfs_rq->throttled_clock_self = rq_clock(rq);
}
cfs_rq->throttle_count++;
@@ -5480,7 +5584,9 @@ done:
* throttled-list. rq->lock protects completion.
*/
cfs_rq->throttled = 1;
- cfs_rq->throttled_clock = rq_clock(rq);
+ SCHED_WARN_ON(cfs_rq->throttled_clock);
+ if (cfs_rq->nr_running)
+ cfs_rq->throttled_clock = rq_clock(rq);
return true;
}
@@ -5498,7 +5604,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
update_rq_clock(rq);
raw_spin_lock(&cfs_b->lock);
- cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
+ if (cfs_rq->throttled_clock) {
+ cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
+ cfs_rq->throttled_clock = 0;
+ }
list_del_rcu(&cfs_rq->throttled_list);
raw_spin_unlock(&cfs_b->lock);
@@ -6014,13 +6123,14 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
-void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
+void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent)
{
raw_spin_lock_init(&cfs_b->lock);
cfs_b->runtime = 0;
cfs_b->quota = RUNTIME_INF;
cfs_b->period = ns_to_ktime(default_cfs_period());
cfs_b->burst = 0;
+ cfs_b->hierarchical_quota = parent ? parent->hierarchical_quota : RUNTIME_INF;
INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
@@ -6157,6 +6267,46 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
rq_clock_stop_loop_update(rq);
}
+bool cfs_task_bw_constrained(struct task_struct *p)
+{
+ struct cfs_rq *cfs_rq = task_cfs_rq(p);
+
+ if (!cfs_bandwidth_used())
+ return false;
+
+ if (cfs_rq->runtime_enabled ||
+ tg_cfs_bandwidth(cfs_rq->tg)->hierarchical_quota != RUNTIME_INF)
+ return true;
+
+ return false;
+}
+
+#ifdef CONFIG_NO_HZ_FULL
+/* called from pick_next_task_fair() */
+static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p)
+{
+ int cpu = cpu_of(rq);
+
+ if (!sched_feat(HZ_BW) || !cfs_bandwidth_used())
+ return;
+
+ if (!tick_nohz_full_cpu(cpu))
+ return;
+
+ if (rq->nr_running != 1)
+ return;
+
+ /*
+ * We know there is only one task runnable and we've just picked it. The
+ * normal enqueue path will have cleared TICK_DEP_BIT_SCHED if we will
+ * be otherwise able to stop the tick. Just need to check if we are using
+ * bandwidth control.
+ */
+ if (cfs_task_bw_constrained(p))
+ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
+}
+#endif
+
#else /* CONFIG_CFS_BANDWIDTH */
static inline bool cfs_bandwidth_used(void)
@@ -6186,9 +6336,8 @@ static inline int throttled_lb_pair(struct task_group *tg,
return 0;
}
-void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
-
#ifdef CONFIG_FAIR_GROUP_SCHED
+void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent) {}
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
#endif
@@ -6199,9 +6348,18 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
static inline void update_runtime_enabled(struct rq *rq) {}
static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
-
+#ifdef CONFIG_CGROUP_SCHED
+bool cfs_task_bw_constrained(struct task_struct *p)
+{
+ return false;
+}
+#endif
#endif /* CONFIG_CFS_BANDWIDTH */
+#if !defined(CONFIG_CFS_BANDWIDTH) || !defined(CONFIG_NO_HZ_FULL)
+static inline void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) {}
+#endif
+
/**************************************************
* CFS operations on tasks:
*/
@@ -6210,13 +6368,12 @@ static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
{
struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
SCHED_WARN_ON(task_rq(p) != rq);
if (rq->cfs.h_nr_running > 1) {
- u64 slice = sched_slice(cfs_rq, se);
u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
+ u64 slice = se->slice;
s64 delta = slice - ran;
if (delta < 0) {
@@ -6240,8 +6397,7 @@ static void hrtick_update(struct rq *rq)
if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class)
return;
- if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
- hrtick_start_fair(rq, curr);
+ hrtick_start_fair(rq, curr);
}
#else /* !CONFIG_SCHED_HRTICK */
static inline void
@@ -6282,17 +6438,6 @@ static int sched_idle_rq(struct rq *rq)
rq->nr_running);
}
-/*
- * Returns true if cfs_rq only has SCHED_IDLE entities enqueued. Note the use
- * of idle_nr_running, which does not consider idle descendants of normal
- * entities.
- */
-static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq)
-{
- return cfs_rq->nr_running &&
- cfs_rq->nr_running == cfs_rq->idle_nr_running;
-}
-
#ifdef CONFIG_SMP
static int sched_idle_cpu(int cpu)
{
@@ -7065,7 +7210,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
util_min = uclamp_eff_value(p, UCLAMP_MIN);
util_max = uclamp_eff_value(p, UCLAMP_MAX);
- for_each_cpu_wrap(cpu, cpus, target + 1) {
+ for_each_cpu_wrap(cpu, cpus, target) {
unsigned long cpu_cap = capacity_of(cpu);
if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
@@ -7289,9 +7434,6 @@ cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
- if (boost)
- util_est = max(util_est, runnable);
-
/*
* During wake-up @p isn't enqueued yet and doesn't contribute
* to any cpu_rq(cpu)->cfs.avg.util_est.enqueued.
@@ -7741,6 +7883,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
if (wake_flags & WF_TTWU) {
record_wakee(p);
+ if ((wake_flags & WF_CURRENT_CPU) &&
+ cpumask_test_cpu(cpu, p->cpus_ptr))
+ return cpu;
+
if (sched_energy_enabled()) {
new_cpu = find_energy_efficient_cpu(p, prev_cpu);
if (new_cpu >= 0)
@@ -7798,18 +7944,6 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
struct sched_entity *se = &p->se;
- /*
- * As blocked tasks retain absolute vruntime the migration needs to
- * deal with this by subtracting the old and adding the new
- * min_vruntime -- the latter is done by enqueue_entity() when placing
- * the task on the new runqueue.
- */
- if (READ_ONCE(p->__state) == TASK_WAKING) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
- se->vruntime -= u64_u32_load(cfs_rq->min_vruntime);
- }
-
if (!task_on_rq_migrating(p)) {
remove_entity_load_avg(se);
@@ -7847,66 +7981,6 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
#endif /* CONFIG_SMP */
-static unsigned long wakeup_gran(struct sched_entity *se)
-{
- unsigned long gran = sysctl_sched_wakeup_granularity;
-
- /*
- * Since its curr running now, convert the gran from real-time
- * to virtual-time in his units.
- *
- * By using 'se' instead of 'curr' we penalize light tasks, so
- * they get preempted easier. That is, if 'se' < 'curr' then
- * the resulting gran will be larger, therefore penalizing the
- * lighter, if otoh 'se' > 'curr' then the resulting gran will
- * be smaller, again penalizing the lighter task.
- *
- * This is especially important for buddies when the leftmost
- * task is higher priority than the buddy.
- */
- return calc_delta_fair(gran, se);
-}
-
-/*
- * Should 'se' preempt 'curr'.
- *
- * |s1
- * |s2
- * |s3
- * g
- * |<--->|c
- *
- * w(c, s1) = -1
- * w(c, s2) = 0
- * w(c, s3) = 1
- *
- */
-static int
-wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
-{
- s64 gran, vdiff = curr->vruntime - se->vruntime;
-
- if (vdiff <= 0)
- return -1;
-
- gran = wakeup_gran(se);
- if (vdiff > gran)
- return 1;
-
- return 0;
-}
-
-static void set_last_buddy(struct sched_entity *se)
-{
- for_each_sched_entity(se) {
- if (SCHED_WARN_ON(!se->on_rq))
- return;
- if (se_is_idle(se))
- return;
- cfs_rq_of(se)->last = se;
- }
-}
-
static void set_next_buddy(struct sched_entity *se)
{
for_each_sched_entity(se) {
@@ -7918,12 +7992,6 @@ static void set_next_buddy(struct sched_entity *se)
}
}
-static void set_skip_buddy(struct sched_entity *se)
-{
- for_each_sched_entity(se)
- cfs_rq_of(se)->skip = se;
-}
-
/*
* Preempt the current task with a newly woken task if needed:
*/
@@ -7932,7 +8000,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
- int scale = cfs_rq->nr_running >= sched_nr_latency;
int next_buddy_marked = 0;
int cse_is_idle, pse_is_idle;
@@ -7948,7 +8015,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return;
- if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
+ if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) {
set_next_buddy(pse);
next_buddy_marked = 1;
}
@@ -7993,35 +8060,19 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (cse_is_idle != pse_is_idle)
return;
- update_curr(cfs_rq_of(se));
- if (wakeup_preempt_entity(se, pse) == 1) {
- /*
- * Bias pick_next to pick the sched entity that is
- * triggering this preemption.
- */
- if (!next_buddy_marked)
- set_next_buddy(pse);
+ cfs_rq = cfs_rq_of(se);
+ update_curr(cfs_rq);
+
+ /*
+ * XXX pick_eevdf(cfs_rq) != se ?
+ */
+ if (pick_eevdf(cfs_rq) == pse)
goto preempt;
- }
return;
preempt:
resched_curr(rq);
- /*
- * Only set the backward buddy when the current task is still
- * on the rq. This can happen when a wakeup gets interleaved
- * with schedule on the ->pre_schedule() or idle_balance()
- * point, either of which can * drop the rq lock.
- *
- * Also, during early boot the idle thread is in the fair class,
- * for obvious reasons its a bad idea to schedule back to it.
- */
- if (unlikely(!se->on_rq || curr == rq->idle))
- return;
-
- if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
- set_last_buddy(se);
}
#ifdef CONFIG_SMP
@@ -8172,6 +8223,7 @@ done: __maybe_unused;
hrtick_start_fair(rq, p);
update_misfit_status(p, rq);
+ sched_fair_update_stop_tick(rq, p);
return p;
@@ -8222,8 +8274,6 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
/*
* sched_yield() is very simple
- *
- * The magic of dealing with the ->skip buddy is in pick_next_entity.
*/
static void yield_task_fair(struct rq *rq)
{
@@ -8239,21 +8289,19 @@ static void yield_task_fair(struct rq *rq)
clear_buddies(cfs_rq, se);
- if (curr->policy != SCHED_BATCH) {
- update_rq_clock(rq);
- /*
- * Update run-time statistics of the 'current'.
- */
- update_curr(cfs_rq);
- /*
- * Tell update_rq_clock() that we've just updated,
- * so we don't do microscopic update in schedule()
- * and double the fastpath cost.
- */
- rq_clock_skip_update(rq);
- }
+ update_rq_clock(rq);
+ /*
+ * Update run-time statistics of the 'current'.
+ */
+ update_curr(cfs_rq);
+ /*
+ * Tell update_rq_clock() that we've just updated,
+ * so we don't do microscopic update in schedule()
+ * and double the fastpath cost.
+ */
+ rq_clock_skip_update(rq);
- set_skip_buddy(se);
+ se->deadline += calc_delta_fair(se->slice, se);
}
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
@@ -8416,6 +8464,11 @@ enum group_type {
*/
group_misfit_task,
/*
+ * Balance SMT group that's fully busy. Can benefit from migration
+ * a task on SMT with busy sibling to another CPU on idle core.
+ */
+ group_smt_balance,
+ /*
* SD_ASYM_PACKING only: One local CPU with higher capacity is available,
* and the task should be migrated to it instead of running on the
* current CPU.
@@ -8496,8 +8549,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
* Buddy candidates are cache hot:
*/
if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
- (&p->se == cfs_rq_of(&p->se)->next ||
- &p->se == cfs_rq_of(&p->se)->last))
+ (&p->se == cfs_rq_of(&p->se)->next))
return 1;
if (sysctl_sched_migration_cost == -1)
@@ -9123,6 +9175,7 @@ struct sg_lb_stats {
unsigned int group_weight;
enum group_type group_type;
unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
+ unsigned int group_smt_balance; /* Task on busy SMT be moved */
unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
@@ -9396,6 +9449,9 @@ group_type group_classify(unsigned int imbalance_pct,
if (sgs->group_asym_packing)
return group_asym_packing;
+ if (sgs->group_smt_balance)
+ return group_smt_balance;
+
if (sgs->group_misfit_task_load)
return group_misfit_task;
@@ -9465,6 +9521,71 @@ sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs
return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
}
+/* One group has more than one SMT CPU while the other group does not */
+static inline bool smt_vs_nonsmt_groups(struct sched_group *sg1,
+ struct sched_group *sg2)
+{
+ if (!sg1 || !sg2)
+ return false;
+
+ return (sg1->flags & SD_SHARE_CPUCAPACITY) !=
+ (sg2->flags & SD_SHARE_CPUCAPACITY);
+}
+
+static inline bool smt_balance(struct lb_env *env, struct sg_lb_stats *sgs,
+ struct sched_group *group)
+{
+ if (env->idle == CPU_NOT_IDLE)
+ return false;
+
+ /*
+ * For SMT source group, it is better to move a task
+ * to a CPU that doesn't have multiple tasks sharing its CPU capacity.
+ * Note that if a group has a single SMT, SD_SHARE_CPUCAPACITY
+ * will not be on.
+ */
+ if (group->flags & SD_SHARE_CPUCAPACITY &&
+ sgs->sum_h_nr_running > 1)
+ return true;
+
+ return false;
+}
+
+static inline long sibling_imbalance(struct lb_env *env,
+ struct sd_lb_stats *sds,
+ struct sg_lb_stats *busiest,
+ struct sg_lb_stats *local)
+{
+ int ncores_busiest, ncores_local;
+ long imbalance;
+
+ if (env->idle == CPU_NOT_IDLE || !busiest->sum_nr_running)
+ return 0;
+
+ ncores_busiest = sds->busiest->cores;
+ ncores_local = sds->local->cores;
+
+ if (ncores_busiest == ncores_local) {
+ imbalance = busiest->sum_nr_running;
+ lsub_positive(&imbalance, local->sum_nr_running);
+ return imbalance;
+ }
+
+ /* Balance such that nr_running/ncores ratio are same on both groups */
+ imbalance = ncores_local * busiest->sum_nr_running;
+ lsub_positive(&imbalance, ncores_busiest * local->sum_nr_running);
+ /* Normalize imbalance and do rounding on normalization */
+ imbalance = 2 * imbalance + ncores_local + ncores_busiest;
+ imbalance /= ncores_local + ncores_busiest;
+
+ /* Take advantage of resource in an empty sched group */
+ if (imbalance == 0 && local->sum_nr_running == 0 &&
+ busiest->sum_nr_running > 1)
+ imbalance = 2;
+
+ return imbalance;
+}
+
static inline bool
sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
{
@@ -9557,6 +9678,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_asym_packing = 1;
}
+ /* Check for loaded SMT group to be balanced to dst CPU */
+ if (!local_group && smt_balance(env, sgs, group))
+ sgs->group_smt_balance = 1;
+
sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs);
/* Computing avg_load makes sense only when group is overloaded */
@@ -9641,6 +9766,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
return false;
break;
+ case group_smt_balance:
case group_fully_busy:
/*
* Select the fully busy group with highest avg_load. In
@@ -9670,6 +9796,18 @@ static bool update_sd_pick_busiest(struct lb_env *env,
case group_has_spare:
/*
+ * Do not pick sg with SMT CPUs over sg with pure CPUs,
+ * as we do not want to pull task off SMT core with one task
+ * and make the core idle.
+ */
+ if (smt_vs_nonsmt_groups(sds->busiest, sg)) {
+ if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1)
+ return false;
+ else
+ return true;
+ }
+
+ /*
* Select not overloaded group with lowest number of idle cpus
* and highest number of running tasks. We could also compare
* the spare capacity which is more stable but it can end up
@@ -9865,6 +10003,7 @@ static bool update_pick_idlest(struct sched_group *idlest,
case group_imbalanced:
case group_asym_packing:
+ case group_smt_balance:
/* Those types are not used in the slow wakeup path */
return false;
@@ -9996,6 +10135,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
case group_imbalanced:
case group_asym_packing:
+ case group_smt_balance:
/* Those type are not used in the slow wakeup path */
return NULL;
@@ -10250,6 +10390,13 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
return;
}
+ if (busiest->group_type == group_smt_balance) {
+ /* Reduce number of tasks sharing CPU capacity */
+ env->migration_type = migrate_task;
+ env->imbalance = 1;
+ return;
+ }
+
if (busiest->group_type == group_imbalanced) {
/*
* In the group_imb case we cannot rely on group-wide averages
@@ -10297,14 +10444,12 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
}
if (busiest->group_weight == 1 || sds->prefer_sibling) {
- unsigned int nr_diff = busiest->sum_nr_running;
/*
* When prefer sibling, evenly spread running tasks on
* groups.
*/
env->migration_type = migrate_task;
- lsub_positive(&nr_diff, local->sum_nr_running);
- env->imbalance = nr_diff;
+ env->imbalance = sibling_imbalance(env, sds, busiest, local);
} else {
/*
@@ -10501,20 +10646,27 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
* group's child domain.
*/
if (sds.prefer_sibling && local->group_type == group_has_spare &&
- busiest->sum_nr_running > local->sum_nr_running + 1)
+ sibling_imbalance(env, &sds, busiest, local) > 1)
goto force_balance;
if (busiest->group_type != group_overloaded) {
- if (env->idle == CPU_NOT_IDLE)
+ if (env->idle == CPU_NOT_IDLE) {
/*
* If the busiest group is not overloaded (and as a
* result the local one too) but this CPU is already
* busy, let another idle CPU try to pull task.
*/
goto out_balanced;
+ }
+
+ if (busiest->group_type == group_smt_balance &&
+ smt_vs_nonsmt_groups(sds.local, sds.busiest)) {
+ /* Let non SMT CPU pull from SMT CPU sharing with sibling */
+ goto force_balance;
+ }
if (busiest->group_weight > 1 &&
- local->idle_cpus <= (busiest->idle_cpus + 1))
+ local->idle_cpus <= (busiest->idle_cpus + 1)) {
/*
* If the busiest group is not overloaded
* and there is no imbalance between this and busiest
@@ -10525,12 +10677,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
* there is more than 1 CPU per group.
*/
goto out_balanced;
+ }
- if (busiest->sum_h_nr_running == 1)
+ if (busiest->sum_h_nr_running == 1) {
/*
* busiest doesn't have any tasks waiting to run
*/
goto out_balanced;
+ }
}
force_balance:
@@ -10764,7 +10918,7 @@ static int active_load_balance_cpu_stop(void *data);
static int should_we_balance(struct lb_env *env)
{
struct sched_group *sg = env->sd->groups;
- int cpu;
+ int cpu, idle_smt = -1;
/*
* Ensure the balancing environment is consistent; can happen
@@ -10791,10 +10945,24 @@ static int should_we_balance(struct lb_env *env)
if (!idle_cpu(cpu))
continue;
+ /*
+ * Don't balance to idle SMT in busy core right away when
+ * balancing cores, but remember the first idle SMT CPU for
+ * later consideration. Find CPU on an idle core first.
+ */
+ if (!(env->sd->flags & SD_SHARE_CPUCAPACITY) && !is_core_idle(cpu)) {
+ if (idle_smt == -1)
+ idle_smt = cpu;
+ continue;
+ }
+
/* Are we the first idle CPU? */
return cpu == env->dst_cpu;
}
+ if (idle_smt == env->dst_cpu)
+ return true;
+
/* Are we the first CPU of this group ? */
return group_balance_cpu(sg) == env->dst_cpu;
}
@@ -12007,8 +12175,8 @@ static void rq_offline_fair(struct rq *rq)
static inline bool
__entity_slice_used(struct sched_entity *se, int min_nr_tasks)
{
- u64 slice = sched_slice(cfs_rq_of(se), se);
u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
+ u64 slice = se->slice;
return (rtime * min_nr_tasks > slice);
}
@@ -12164,8 +12332,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
*/
static void task_fork_fair(struct task_struct *p)
{
- struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se, *curr;
+ struct cfs_rq *cfs_rq;
struct rq *rq = this_rq();
struct rq_flags rf;
@@ -12174,22 +12342,9 @@ static void task_fork_fair(struct task_struct *p)
cfs_rq = task_cfs_rq(current);
curr = cfs_rq->curr;
- if (curr) {
+ if (curr)
update_curr(cfs_rq);
- se->vruntime = curr->vruntime;
- }
- place_entity(cfs_rq, se, 1);
-
- if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
- /*
- * Upon rescheduling, sched_class::put_prev_task() will place
- * 'current' within the tree based on its new key value.
- */
- swap(curr->vruntime, se->vruntime);
- resched_curr(rq);
- }
-
- se->vruntime -= cfs_rq->min_vruntime;
+ place_entity(cfs_rq, se, ENQUEUE_INITIAL);
rq_unlock(rq, &rf);
}
@@ -12218,34 +12373,6 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
check_preempt_curr(rq, p, 0);
}
-static inline bool vruntime_normalized(struct task_struct *p)
-{
- struct sched_entity *se = &p->se;
-
- /*
- * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
- * the dequeue_entity(.flags=0) will already have normalized the
- * vruntime.
- */
- if (p->on_rq)
- return true;
-
- /*
- * When !on_rq, vruntime of the task has usually NOT been normalized.
- * But there are some cases where it has already been normalized:
- *
- * - A forked child which is waiting for being woken up by
- * wake_up_new_task().
- * - A task which has been woken up by try_to_wake_up() and
- * waiting for actually being woken up by sched_ttwu_pending().
- */
- if (!se->sum_exec_runtime ||
- (READ_ONCE(p->__state) == TASK_WAKING && p->sched_remote_wakeup))
- return true;
-
- return false;
-}
-
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* Propagate the changes of the sched_entity across the tg tree to make it
@@ -12316,16 +12443,6 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
static void detach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
- if (!vruntime_normalized(p)) {
- /*
- * Fix up our vruntime so that the current sleep doesn't
- * cause 'unlimited' sleep bonus.
- */
- place_entity(cfs_rq, se, 0);
- se->vruntime -= cfs_rq->min_vruntime;
- }
detach_entity_cfs_rq(se);
}
@@ -12333,12 +12450,8 @@ static void detach_task_cfs_rq(struct task_struct *p)
static void attach_task_cfs_rq(struct task_struct *p)
{
struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
attach_entity_cfs_rq(se);
-
- if (!vruntime_normalized(p))
- se->vruntime += cfs_rq->min_vruntime;
}
static void switched_from_fair(struct rq *rq, struct task_struct *p)
@@ -12450,7 +12563,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
tg->shares = NICE_0_LOAD;
- init_cfs_bandwidth(tg_cfs_bandwidth(tg));
+ init_cfs_bandwidth(tg_cfs_bandwidth(tg), tg_cfs_bandwidth(parent));
for_each_possible_cpu(i) {
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
@@ -12703,7 +12816,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
* idle runqueue:
*/
if (rq->cfs.load.weight)
- rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
+ rr_interval = NS_TO_JIFFIES(se->slice);
return rr_interval;
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index ee7f23c76bd3..f770168230ae 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -1,16 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Only give sleepers 50% of their service deficit. This allows
- * them to run sooner, but does not allow tons of sleepers to
- * rip the spread apart.
- */
-SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
/*
- * Place new tasks ahead so that they do not starve already running
- * tasks
+ * Using the avg_vruntime, do the right thing and preserve lag across
+ * sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled.
*/
-SCHED_FEAT(START_DEBIT, true)
+SCHED_FEAT(PLACE_LAG, true)
+SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
+SCHED_FEAT(RUN_TO_PARITY, true)
/*
* Prefer to schedule the task we woke last (assuming it failed
@@ -20,13 +16,6 @@ SCHED_FEAT(START_DEBIT, true)
SCHED_FEAT(NEXT_BUDDY, false)
/*
- * Prefer to schedule the task that ran last (when we did
- * wake-preempt) as that likely will touch the same data, increases
- * cache locality.
- */
-SCHED_FEAT(LAST_BUDDY, true)
-
-/*
* Consider buddies to be cache hot, decreases the likeliness of a
* cache buddy being migrated away, increases cache locality.
*/
@@ -99,5 +88,4 @@ SCHED_FEAT(UTIL_EST_FASTUP, true)
SCHED_FEAT(LATENCY_WARN, false)
-SCHED_FEAT(ALT_PERIOD, true)
-SCHED_FEAT(BASE_SLICE, true)
+SCHED_FEAT(HZ_BW, true)
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 9bb3f2b3ccfc..1d0f634725a6 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -140,7 +140,7 @@
static int psi_bug __read_mostly;
DEFINE_STATIC_KEY_FALSE(psi_disabled);
-DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
+static DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
#ifdef CONFIG_PSI_DEFAULT_DISABLED
static bool psi_enable;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 00e0e5074115..0597ba0f85ff 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -25,7 +25,7 @@ unsigned int sysctl_sched_rt_period = 1000000;
int sysctl_sched_rt_runtime = 950000;
#ifdef CONFIG_SYSCTL
-static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
+static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
static int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
@@ -3062,6 +3062,9 @@ static int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
sched_rr_timeslice =
sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
msecs_to_jiffies(sysctl_sched_rr_timeslice);
+
+ if (sysctl_sched_rr_timeslice <= 0)
+ sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
}
mutex_unlock(&mutex);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e93e006a942b..04846272409c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -454,11 +454,12 @@ extern void unregister_fair_sched_group(struct task_group *tg);
extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu,
struct sched_entity *parent);
-extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
+extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent);
extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
+extern bool cfs_task_bw_constrained(struct task_struct *p);
extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu,
@@ -494,6 +495,7 @@ static inline void set_task_rq_fair(struct sched_entity *se,
#else /* CONFIG_CGROUP_SCHED */
struct cfs_bandwidth { };
+static inline bool cfs_task_bw_constrained(struct task_struct *p) { return false; }
#endif /* CONFIG_CGROUP_SCHED */
@@ -548,6 +550,9 @@ struct cfs_rq {
unsigned int idle_nr_running; /* SCHED_IDLE */
unsigned int idle_h_nr_running; /* SCHED_IDLE */
+ s64 avg_vruntime;
+ u64 avg_load;
+
u64 exec_clock;
u64 min_vruntime;
#ifdef CONFIG_SCHED_CORE
@@ -567,8 +572,6 @@ struct cfs_rq {
*/
struct sched_entity *curr;
struct sched_entity *next;
- struct sched_entity *last;
- struct sched_entity *skip;
#ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over;
@@ -636,6 +639,8 @@ struct cfs_rq {
u64 throttled_clock;
u64 throttled_clock_pelt;
u64 throttled_clock_pelt_time;
+ u64 throttled_clock_self;
+ u64 throttled_clock_self_time;
int throttled;
int throttle_count;
struct list_head throttled_list;
@@ -1245,6 +1250,7 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
bool fi);
+void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
/*
* Helpers to check if the CPU's core cookie matches with the task's cookie
@@ -1700,6 +1706,21 @@ rq_unlock(struct rq *rq, struct rq_flags *rf)
raw_spin_rq_unlock(rq);
}
+DEFINE_LOCK_GUARD_1(rq_lock, struct rq,
+ rq_lock(_T->lock, &_T->rf),
+ rq_unlock(_T->lock, &_T->rf),
+ struct rq_flags rf)
+
+DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq,
+ rq_lock_irq(_T->lock, &_T->rf),
+ rq_unlock_irq(_T->lock, &_T->rf),
+ struct rq_flags rf)
+
+DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
+ rq_lock_irqsave(_T->lock, &_T->rf),
+ rq_unlock_irqrestore(_T->lock, &_T->rf),
+ struct rq_flags rf)
+
static inline struct rq *
this_rq_lock_irq(struct rq_flags *rf)
__acquires(rq->lock)
@@ -1882,6 +1903,7 @@ struct sched_group {
atomic_t ref;
unsigned int group_weight;
+ unsigned int cores;
struct sched_group_capacity *sgc;
int asym_prefer_cpu; /* CPU of highest priority in group */
int flags;
@@ -2131,12 +2153,13 @@ static inline int task_on_rq_migrating(struct task_struct *p)
}
/* Wake flags. The first three directly map to some SD flag value */
-#define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
-#define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
-#define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */
+#define WF_EXEC 0x02 /* Wakeup after exec; maps to SD_BALANCE_EXEC */
+#define WF_FORK 0x04 /* Wakeup after fork; maps to SD_BALANCE_FORK */
+#define WF_TTWU 0x08 /* Wakeup; maps to SD_BALANCE_WAKE */
-#define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */
-#define WF_MIGRATED 0x20 /* Internal use, task got migrated */
+#define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */
+#define WF_MIGRATED 0x20 /* Internal use, task got migrated */
+#define WF_CURRENT_CPU 0x40 /* Prefer to move the wakee to the current CPU. */
#ifdef CONFIG_SMP
static_assert(WF_EXEC == SD_BALANCE_EXEC);
@@ -2195,6 +2218,7 @@ extern const u32 sched_prio_to_wmult[40];
#else
#define ENQUEUE_MIGRATED 0x00
#endif
+#define ENQUEUE_INITIAL 0x80
#define RETRY_TASK ((void *)-1UL)
@@ -2398,6 +2422,7 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
#endif
extern void schedule_idle(void);
+asmlinkage void schedule_user(void);
extern void sysrq_sched_debug_show(void);
extern void sched_init_granularity(void);
@@ -2499,11 +2524,9 @@ extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost;
+extern unsigned int sysctl_sched_base_slice;
+
#ifdef CONFIG_SCHED_DEBUG
-extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_min_granularity;
-extern unsigned int sysctl_sched_idle_min_granularity;
-extern unsigned int sysctl_sched_wakeup_granularity;
extern int sysctl_resched_latency_warn_ms;
extern int sysctl_resched_latency_warn_once;
@@ -2609,6 +2632,12 @@ static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2)
static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) {}
#endif
+#define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \
+__DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \
+static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \
+{ class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \
+ _lock; return _t; }
+
#ifdef CONFIG_SMP
static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
@@ -2738,6 +2767,16 @@ static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
}
+static inline void double_raw_unlock(raw_spinlock_t *l1, raw_spinlock_t *l2)
+{
+ raw_spin_unlock(l1);
+ raw_spin_unlock(l2);
+}
+
+DEFINE_LOCK_GUARD_2(double_raw_spinlock, raw_spinlock_t,
+ double_raw_lock(_T->lock, _T->lock2),
+ double_raw_unlock(_T->lock, _T->lock2))
+
/*
* double_rq_unlock - safely unlock two runqueues
*
@@ -2795,6 +2834,10 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
#endif
+DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq,
+ double_rq_lock(_T->lock, _T->lock2),
+ double_rq_unlock(_T->lock, _T->lock2))
+
extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
@@ -3229,6 +3272,8 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
extern void swake_up_all_locked(struct swait_queue_head *q);
extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
+extern int try_to_wake_up(struct task_struct *tsk, unsigned int state, int wake_flags);
+
#ifdef CONFIG_PREEMPT_DYNAMIC
extern int preempt_dynamic_mode;
extern int sched_dynamic_mode(const char *str);
@@ -3480,4 +3525,7 @@ static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
static inline void init_sched_mm_cid(struct task_struct *t) { }
#endif
+extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
+extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se);
+
#endif /* _KERNEL_SCHED_SCHED_H */
diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
index 76b9b796e695..72505cd3b60a 100644
--- a/kernel/sched/swait.c
+++ b/kernel/sched/swait.c
@@ -18,7 +18,7 @@ EXPORT_SYMBOL(__init_swait_queue_head);
* If for some reason it would return 0, that means the previously waiting
* task is already running, so it will observe condition true (or has already).
*/
-void swake_up_locked(struct swait_queue_head *q)
+void swake_up_locked(struct swait_queue_head *q, int wake_flags)
{
struct swait_queue *curr;
@@ -26,7 +26,7 @@ void swake_up_locked(struct swait_queue_head *q)
return;
curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
- wake_up_process(curr->task);
+ try_to_wake_up(curr->task, TASK_NORMAL, wake_flags);
list_del_init(&curr->task_list);
}
EXPORT_SYMBOL(swake_up_locked);
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(swake_up_locked);
void swake_up_all_locked(struct swait_queue_head *q)
{
while (!list_empty(&q->task_list))
- swake_up_locked(q);
+ swake_up_locked(q, 0);
}
void swake_up_one(struct swait_queue_head *q)
@@ -49,7 +49,7 @@ void swake_up_one(struct swait_queue_head *q)
unsigned long flags;
raw_spin_lock_irqsave(&q->lock, flags);
- swake_up_locked(q);
+ swake_up_locked(q, 0);
raw_spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(swake_up_one);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index d3a3b2646ec4..05a5bc678c08 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -722,8 +722,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
if (parent->parent) {
parent->parent->child = tmp;
- if (tmp->flags & SD_SHARE_CPUCAPACITY)
- parent->parent->groups->flags |= SD_SHARE_CPUCAPACITY;
+ parent->parent->groups->flags = tmp->flags;
}
/*
@@ -1275,14 +1274,24 @@ build_sched_groups(struct sched_domain *sd, int cpu)
static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
{
struct sched_group *sg = sd->groups;
+ struct cpumask *mask = sched_domains_tmpmask2;
WARN_ON(!sg);
do {
- int cpu, max_cpu = -1;
+ int cpu, cores = 0, max_cpu = -1;
sg->group_weight = cpumask_weight(sched_group_span(sg));
+ cpumask_copy(mask, sched_group_span(sg));
+ for_each_cpu(cpu, mask) {
+ cores++;
+#ifdef CONFIG_SCHED_SMT
+ cpumask_andnot(mask, mask, cpu_smt_mask(cpu));
+#endif
+ }
+ sg->cores = cores;
+
if (!(sd->flags & SD_ASYM_PACKING))
goto next;
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 48c53e4739ea..802d98cf2de3 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -161,6 +161,11 @@ int __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
}
EXPORT_SYMBOL(__wake_up);
+void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key)
+{
+ __wake_up_common_lock(wq_head, mode, 1, WF_CURRENT_CPU, key);
+}
+
/*
* Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index d3e584065c7f..255999ba9190 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -110,11 +110,13 @@ struct seccomp_knotif {
* @flags: The flags for the new file descriptor. At the moment, only O_CLOEXEC
* is allowed.
* @ioctl_flags: The flags used for the seccomp_addfd ioctl.
+ * @setfd: whether or not SECCOMP_ADDFD_FLAG_SETFD was set during notify_addfd
* @ret: The return value of the installing process. It is set to the fd num
* upon success (>= 0).
* @completion: Indicates that the installing process has completed fd
* installation, or gone away (either due to successful
* reply, or signal)
+ * @list: list_head for chaining seccomp_kaddfd together.
*
*/
struct seccomp_kaddfd {
@@ -138,14 +140,17 @@ struct seccomp_kaddfd {
* structure is fairly large, we store the notification-specific stuff in a
* separate structure.
*
- * @request: A semaphore that users of this notification can wait on for
- * changes. Actual reads and writes are still controlled with
- * filter->notify_lock.
+ * @requests: A semaphore that users of this notification can wait on for
+ * changes. Actual reads and writes are still controlled with
+ * filter->notify_lock.
+ * @flags: A set of SECCOMP_USER_NOTIF_FD_* flags.
* @next_id: The id of the next request.
* @notifications: A list of struct seccomp_knotif elements.
*/
+
struct notification {
- struct semaphore request;
+ atomic_t requests;
+ u32 flags;
u64 next_id;
struct list_head notifications;
};
@@ -555,6 +560,8 @@ static void __seccomp_filter_release(struct seccomp_filter *orig)
* drop its reference count, and notify
* about unused filters
*
+ * @tsk: task the filter should be released from.
+ *
* This function should only be called when the task is exiting as
* it detaches it from its filter tree. As such, READ_ONCE() and
* barriers are not needed here, as would normally be needed.
@@ -574,6 +581,8 @@ void seccomp_filter_release(struct task_struct *tsk)
/**
* seccomp_sync_threads: sets all threads to use current's filter
*
+ * @flags: SECCOMP_FILTER_FLAG_* flags to set during sync.
+ *
* Expects sighand and cred_guard_mutex locks to be held, and for
* seccomp_can_sync_threads() to have returned success already
* without dropping the locks.
@@ -1116,8 +1125,11 @@ static int seccomp_do_user_notification(int this_syscall,
list_add_tail(&n.list, &match->notif->notifications);
INIT_LIST_HEAD(&n.addfd);
- up(&match->notif->request);
- wake_up_poll(&match->wqh, EPOLLIN | EPOLLRDNORM);
+ atomic_inc(&match->notif->requests);
+ if (match->notif->flags & SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP)
+ wake_up_poll_on_current_cpu(&match->wqh, EPOLLIN | EPOLLRDNORM);
+ else
+ wake_up_poll(&match->wqh, EPOLLIN | EPOLLRDNORM);
/*
* This is where we wait for a reply from userspace.
@@ -1450,6 +1462,37 @@ find_notification(struct seccomp_filter *filter, u64 id)
return NULL;
}
+static int recv_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
+ void *key)
+{
+ /* Avoid a wakeup if event not interesting for us. */
+ if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR)))
+ return 0;
+ return autoremove_wake_function(wait, mode, sync, key);
+}
+
+static int recv_wait_event(struct seccomp_filter *filter)
+{
+ DEFINE_WAIT_FUNC(wait, recv_wake_function);
+ int ret;
+
+ if (atomic_dec_if_positive(&filter->notif->requests) >= 0)
+ return 0;
+
+ for (;;) {
+ ret = prepare_to_wait_event(&filter->wqh, &wait, TASK_INTERRUPTIBLE);
+
+ if (atomic_dec_if_positive(&filter->notif->requests) >= 0)
+ break;
+
+ if (ret)
+ return ret;
+
+ schedule();
+ }
+ finish_wait(&filter->wqh, &wait);
+ return 0;
+}
static long seccomp_notify_recv(struct seccomp_filter *filter,
void __user *buf)
@@ -1467,7 +1510,7 @@ static long seccomp_notify_recv(struct seccomp_filter *filter,
memset(&unotif, 0, sizeof(unotif));
- ret = down_interruptible(&filter->notif->request);
+ ret = recv_wait_event(filter);
if (ret < 0)
return ret;
@@ -1515,7 +1558,8 @@ out:
if (should_sleep_killable(filter, knotif))
complete(&knotif->ready);
knotif->state = SECCOMP_NOTIFY_INIT;
- up(&filter->notif->request);
+ atomic_inc(&filter->notif->requests);
+ wake_up_poll(&filter->wqh, EPOLLIN | EPOLLRDNORM);
}
mutex_unlock(&filter->notify_lock);
}
@@ -1561,7 +1605,10 @@ static long seccomp_notify_send(struct seccomp_filter *filter,
knotif->error = resp.error;
knotif->val = resp.val;
knotif->flags = resp.flags;
- complete(&knotif->ready);
+ if (filter->notif->flags & SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP)
+ complete_on_current_cpu(&knotif->ready);
+ else
+ complete(&knotif->ready);
out:
mutex_unlock(&filter->notify_lock);
return ret;
@@ -1591,6 +1638,22 @@ static long seccomp_notify_id_valid(struct seccomp_filter *filter,
return ret;
}
+static long seccomp_notify_set_flags(struct seccomp_filter *filter,
+ unsigned long flags)
+{
+ long ret;
+
+ if (flags & ~SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&filter->notify_lock);
+ if (ret < 0)
+ return ret;
+ filter->notif->flags = flags;
+ mutex_unlock(&filter->notify_lock);
+ return 0;
+}
+
static long seccomp_notify_addfd(struct seccomp_filter *filter,
struct seccomp_notif_addfd __user *uaddfd,
unsigned int size)
@@ -1720,6 +1783,8 @@ static long seccomp_notify_ioctl(struct file *file, unsigned int cmd,
case SECCOMP_IOCTL_NOTIF_ID_VALID_WRONG_DIR:
case SECCOMP_IOCTL_NOTIF_ID_VALID:
return seccomp_notify_id_valid(filter, buf);
+ case SECCOMP_IOCTL_NOTIF_SET_FLAGS:
+ return seccomp_notify_set_flags(filter, arg);
}
/* Extensible Argument ioctls */
@@ -1777,7 +1842,6 @@ static struct file *init_listener(struct seccomp_filter *filter)
if (!filter->notif)
goto out;
- sema_init(&filter->notif->request, 0);
filter->notif->next_id = get_random_u64();
INIT_LIST_HEAD(&filter->notif->notifications);
diff --git a/kernel/smp.c b/kernel/smp.c
index 385179dae360..8455a53465af 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -46,6 +46,8 @@ static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
+static DEFINE_PER_CPU(atomic_t, trigger_backtrace) = ATOMIC_INIT(1);
+
static void __flush_smp_call_function_queue(bool warn_cpu_offline);
int smpcfd_prepare_cpu(unsigned int cpu)
@@ -253,13 +255,15 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
*bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
}
if (cpu >= 0) {
- dump_cpu_task(cpu);
+ if (atomic_cmpxchg_acquire(&per_cpu(trigger_backtrace, cpu), 1, 0))
+ dump_cpu_task(cpu);
if (!cpu_cur_csd) {
pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
arch_send_call_function_single_ipi(cpu);
}
}
- dump_stack();
+ if (firsttime)
+ dump_stack();
*ts1 = ts2;
return false;
@@ -433,9 +437,14 @@ static void __flush_smp_call_function_queue(bool warn_cpu_offline)
struct llist_node *entry, *prev;
struct llist_head *head;
static bool warned;
+ atomic_t *tbt;
lockdep_assert_irqs_disabled();
+ /* Allow waiters to send backtrace NMI from here onwards */
+ tbt = this_cpu_ptr(&trigger_backtrace);
+ atomic_set_release(tbt, 1);
+
head = this_cpu_ptr(&call_single_queue);
entry = llist_del_all(head);
entry = llist_reverse_order(entry);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 807b34ccd797..210cf5f8d92c 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -612,7 +612,7 @@ static inline void tick_irq_exit(void)
int cpu = smp_processor_id();
/* Make sure that timer wheel updates are propagated */
- if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
+ if ((sched_core_idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
if (!in_hardirq())
tick_nohz_irq_exit();
}
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 88cbc1181b23..c108ed8a9804 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -473,8 +473,8 @@ static void clocksource_watchdog(struct timer_list *unused)
/* Check the deviation from the watchdog clocksource. */
md = cs->uncertainty_margin + watchdog->uncertainty_margin;
if (abs(cs_nsec - wd_nsec) > md) {
- u64 cs_wd_msec;
- u64 wd_msec;
+ s64 cs_wd_msec;
+ s64 wd_msec;
u32 wd_rem;
pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
@@ -483,8 +483,8 @@ static void clocksource_watchdog(struct timer_list *unused)
watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask);
pr_warn(" '%s' cs_nsec: %lld cs_now: %llx cs_last: %llx mask: %llx\n",
cs->name, cs_nsec, csnow, cslast, cs->mask);
- cs_wd_msec = div_u64_rem(cs_nsec - wd_nsec, 1000U * 1000U, &wd_rem);
- wd_msec = div_u64_rem(wd_nsec, 1000U * 1000U, &wd_rem);
+ cs_wd_msec = div_s64_rem(cs_nsec - wd_nsec, 1000 * 1000, &wd_rem);
+ wd_msec = div_s64_rem(wd_nsec, 1000 * 1000, &wd_rem);
pr_warn(" Clocksource '%s' skewed %lld ns (%lld ms) over watchdog '%s' interval of %lld ns (%lld ms)\n",
cs->name, cs_nsec - wd_nsec, cs_wd_msec, watchdog->name, wd_nsec, wd_msec);
if (curr_clocksource == cs)
diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c
index 831e8e779ace..ca058c8af6ba 100644
--- a/kernel/time/time_test.c
+++ b/kernel/time/time_test.c
@@ -86,7 +86,7 @@ static void time64_to_tm_test_date_range(struct kunit *test)
}
static struct kunit_case time_test_cases[] = {
- KUNIT_CASE(time64_to_tm_test_date_range),
+ KUNIT_CASE_SLOW(time64_to_tm_test_date_range),
{}
};
diff --git a/kernel/torture.c b/kernel/torture.c
index 1a0519b836ac..b28b05bbef02 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -37,6 +37,7 @@
#include <linux/ktime.h>
#include <asm/byteorder.h>
#include <linux/torture.h>
+#include <linux/sched/rt.h>
#include "rcu/rcu.h"
MODULE_LICENSE("GPL");
@@ -54,6 +55,9 @@ module_param(verbose_sleep_frequency, int, 0444);
static int verbose_sleep_duration = 1;
module_param(verbose_sleep_duration, int, 0444);
+static int random_shuffle;
+module_param(random_shuffle, int, 0444);
+
static char *torture_type;
static int verbose;
@@ -88,8 +92,8 @@ int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_s
ktime_t hto = baset_ns;
if (trsp)
- hto += (torture_random(trsp) >> 3) % fuzzt_ns;
- set_current_state(TASK_UNINTERRUPTIBLE);
+ hto += torture_random(trsp) % fuzzt_ns;
+ set_current_state(TASK_IDLE);
return schedule_hrtimeout(&hto, HRTIMER_MODE_REL);
}
EXPORT_SYMBOL_GPL(torture_hrtimeout_ns);
@@ -350,22 +354,22 @@ torture_onoff(void *arg)
if (onoff_holdoff > 0) {
VERBOSE_TOROUT_STRING("torture_onoff begin holdoff");
- schedule_timeout_interruptible(onoff_holdoff);
+ torture_hrtimeout_jiffies(onoff_holdoff, &rand);
VERBOSE_TOROUT_STRING("torture_onoff end holdoff");
}
while (!torture_must_stop()) {
if (disable_onoff_at_boot && !rcu_inkernel_boot_has_ended()) {
- schedule_timeout_interruptible(HZ / 10);
+ torture_hrtimeout_jiffies(HZ / 10, &rand);
continue;
}
- cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
+ cpu = torture_random(&rand) % (maxcpu + 1);
if (!torture_offline(cpu,
&n_offline_attempts, &n_offline_successes,
&sum_offline, &min_offline, &max_offline))
torture_online(cpu,
&n_online_attempts, &n_online_successes,
&sum_online, &min_online, &max_online);
- schedule_timeout_interruptible(onoff_interval);
+ torture_hrtimeout_jiffies(onoff_interval, &rand);
}
stop:
@@ -518,6 +522,7 @@ static void torture_shuffle_task_unregister_all(void)
*/
static void torture_shuffle_tasks(void)
{
+ DEFINE_TORTURE_RANDOM(rand);
struct shuffle_task *stp;
cpumask_setall(shuffle_tmp_mask);
@@ -537,8 +542,10 @@ static void torture_shuffle_tasks(void)
cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask);
mutex_lock(&shuffle_task_mutex);
- list_for_each_entry(stp, &shuffle_task_list, st_l)
- set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
+ list_for_each_entry(stp, &shuffle_task_list, st_l) {
+ if (!random_shuffle || torture_random(&rand) & 0x1)
+ set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
+ }
mutex_unlock(&shuffle_task_mutex);
cpus_read_unlock();
@@ -550,9 +557,11 @@ static void torture_shuffle_tasks(void)
*/
static int torture_shuffle(void *arg)
{
+ DEFINE_TORTURE_RANDOM(rand);
+
VERBOSE_TOROUT_STRING("torture_shuffle task started");
do {
- schedule_timeout_interruptible(shuffle_interval);
+ torture_hrtimeout_jiffies(shuffle_interval, &rand);
torture_shuffle_tasks();
torture_shutdown_absorb("torture_shuffle");
} while (!torture_must_stop());
@@ -728,12 +737,12 @@ bool stutter_wait(const char *title)
cond_resched_tasks_rcu_qs();
spt = READ_ONCE(stutter_pause_test);
for (; spt; spt = READ_ONCE(stutter_pause_test)) {
- if (!ret) {
+ if (!ret && !rt_task(current)) {
sched_set_normal(current, MAX_NICE);
ret = true;
}
if (spt == 1) {
- schedule_timeout_interruptible(1);
+ torture_hrtimeout_jiffies(1, NULL);
} else if (spt == 2) {
while (READ_ONCE(stutter_pause_test)) {
if (!(i++ & 0xffff))
@@ -741,7 +750,7 @@ bool stutter_wait(const char *title)
cond_resched();
}
} else {
- schedule_timeout_interruptible(round_jiffies_relative(HZ));
+ torture_hrtimeout_jiffies(round_jiffies_relative(HZ), NULL);
}
torture_shutdown_absorb(title);
}
@@ -926,7 +935,7 @@ EXPORT_SYMBOL_GPL(torture_kthread_stopping);
* it starts, you will need to open-code your own.
*/
int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
- char *f, struct task_struct **tp)
+ char *f, struct task_struct **tp, void (*cbf)(struct task_struct *tp))
{
int ret = 0;
@@ -938,6 +947,10 @@ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
*tp = NULL;
return ret;
}
+
+ if (cbf)
+ cbf(*tp);
+
wake_up_process(*tp); // Process is sleeping, so ordering provided.
torture_shuffle_task_register(*tp);
return ret;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b8870078ef58..8e64aaad5361 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4213,8 +4213,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
* will point to the same string as current_trace->name.
*/
mutex_lock(&trace_types_lock);
- if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
+ if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
+ /* Close iter->trace before switching to the new current tracer */
+ if (iter->trace->close)
+ iter->trace->close(iter);
*iter->trace = *tr->current_trace;
+ /* Reopen the new current tracer */
+ if (iter->trace->open)
+ iter->trace->open(iter);
+ }
mutex_unlock(&trace_types_lock);
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -5277,11 +5284,17 @@ int tracing_set_cpumask(struct trace_array *tr,
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
+#ifdef CONFIG_TRACER_MAX_TRACE
+ ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
+#endif
}
if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
+#ifdef CONFIG_TRACER_MAX_TRACE
+ ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
+#endif
}
}
arch_spin_unlock(&tr->max_lock);
@@ -6705,10 +6718,36 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
#endif
+static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
+{
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+ if (cpumask_empty(tr->pipe_cpumask)) {
+ cpumask_setall(tr->pipe_cpumask);
+ return 0;
+ }
+ } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
+ cpumask_set_cpu(cpu, tr->pipe_cpumask);
+ return 0;
+ }
+ return -EBUSY;
+}
+
+static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
+{
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+ WARN_ON(!cpumask_full(tr->pipe_cpumask));
+ cpumask_clear(tr->pipe_cpumask);
+ } else {
+ WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
+ cpumask_clear_cpu(cpu, tr->pipe_cpumask);
+ }
+}
+
static int tracing_open_pipe(struct inode *inode, struct file *filp)
{
struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
+ int cpu;
int ret;
ret = tracing_check_open_get_tr(tr);
@@ -6716,13 +6755,16 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
return ret;
mutex_lock(&trace_types_lock);
+ cpu = tracing_get_cpu(inode);
+ ret = open_pipe_on_cpu(tr, cpu);
+ if (ret)
+ goto fail_pipe_on_cpu;
/* create a buffer to store the information to pass to userspace */
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter) {
ret = -ENOMEM;
- __trace_array_put(tr);
- goto out;
+ goto fail_alloc_iter;
}
trace_seq_init(&iter->seq);
@@ -6745,7 +6787,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
iter->tr = tr;
iter->array_buffer = &tr->array_buffer;
- iter->cpu_file = tracing_get_cpu(inode);
+ iter->cpu_file = cpu;
mutex_init(&iter->mutex);
filp->private_data = iter;
@@ -6755,12 +6797,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp);
tr->trace_ref++;
-out:
+
mutex_unlock(&trace_types_lock);
return ret;
fail:
kfree(iter);
+fail_alloc_iter:
+ close_pipe_on_cpu(tr, cpu);
+fail_pipe_on_cpu:
__trace_array_put(tr);
mutex_unlock(&trace_types_lock);
return ret;
@@ -6777,7 +6822,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
if (iter->trace->pipe_close)
iter->trace->pipe_close(iter);
-
+ close_pipe_on_cpu(tr, iter->cpu_file);
mutex_unlock(&trace_types_lock);
free_cpumask_var(iter->started);
@@ -9441,6 +9486,9 @@ static struct trace_array *trace_array_create(const char *name)
if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
goto out_free_tr;
+ if (!alloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
+ goto out_free_tr;
+
tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
@@ -9482,6 +9530,7 @@ static struct trace_array *trace_array_create(const char *name)
out_free_tr:
ftrace_free_ftrace_ops(tr);
free_trace_buffers(tr);
+ free_cpumask_var(tr->pipe_cpumask);
free_cpumask_var(tr->tracing_cpumask);
kfree(tr->name);
kfree(tr);
@@ -9584,6 +9633,7 @@ static int __remove_instance(struct trace_array *tr)
}
kfree(tr->topts);
+ free_cpumask_var(tr->pipe_cpumask);
free_cpumask_var(tr->tracing_cpumask);
kfree(tr->name);
kfree(tr);
@@ -10381,12 +10431,14 @@ __init static int tracer_alloc_buffers(void)
if (trace_create_savedcmd() < 0)
goto out_free_temp_buffer;
+ if (!alloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
+ goto out_free_savedcmd;
+
/* TODO: make the number of buffers hot pluggable with CPUS */
if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
- goto out_free_savedcmd;
+ goto out_free_pipe_cpumask;
}
-
if (global_trace.buffer_disabled)
tracing_off();
@@ -10439,6 +10491,8 @@ __init static int tracer_alloc_buffers(void)
return 0;
+out_free_pipe_cpumask:
+ free_cpumask_var(global_trace.pipe_cpumask);
out_free_savedcmd:
free_saved_cmdlines_buffer(savedcmd);
out_free_temp_buffer:
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index e1edc2197fc8..73eaec158473 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -377,6 +377,8 @@ struct trace_array {
struct list_head events;
struct trace_event_file *trace_marker_file;
cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
+ /* one per_cpu trace_pipe can be opened by only one user */
+ cpumask_var_t pipe_cpumask;
int ref;
int trace_ref;
#ifdef CONFIG_FUNCTION_TRACER
@@ -1295,6 +1297,14 @@ static inline void trace_branch_disable(void)
/* set ring buffers to default size if not already done so */
int tracing_update_buffers(void);
+union trace_synth_field {
+ u8 as_u8;
+ u16 as_u16;
+ u32 as_u32;
+ u64 as_u64;
+ struct trace_dynamic_info as_dynamic;
+};
+
struct ftrace_event_field {
struct list_head link;
const char *name;
diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
index dd398afc8e25..9897d0bfcab7 100644
--- a/kernel/trace/trace_events_synth.c
+++ b/kernel/trace/trace_events_synth.c
@@ -127,7 +127,7 @@ static bool synth_event_match(const char *system, const char *event,
struct synth_trace_event {
struct trace_entry ent;
- u64 fields[];
+ union trace_synth_field fields[];
};
static int synth_event_define_fields(struct trace_event_call *call)
@@ -321,19 +321,19 @@ static const char *synth_field_fmt(char *type)
static void print_synth_event_num_val(struct trace_seq *s,
char *print_fmt, char *name,
- int size, u64 val, char *space)
+ int size, union trace_synth_field *val, char *space)
{
switch (size) {
case 1:
- trace_seq_printf(s, print_fmt, name, (u8)val, space);
+ trace_seq_printf(s, print_fmt, name, val->as_u8, space);
break;
case 2:
- trace_seq_printf(s, print_fmt, name, (u16)val, space);
+ trace_seq_printf(s, print_fmt, name, val->as_u16, space);
break;
case 4:
- trace_seq_printf(s, print_fmt, name, (u32)val, space);
+ trace_seq_printf(s, print_fmt, name, val->as_u32, space);
break;
default:
@@ -350,7 +350,7 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
struct trace_seq *s = &iter->seq;
struct synth_trace_event *entry;
struct synth_event *se;
- unsigned int i, n_u64;
+ unsigned int i, j, n_u64;
char print_fmt[32];
const char *fmt;
@@ -374,43 +374,28 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
/* parameter values */
if (se->fields[i]->is_string) {
if (se->fields[i]->is_dynamic) {
- u32 offset, data_offset;
- char *str_field;
-
- offset = (u32)entry->fields[n_u64];
- data_offset = offset & 0xffff;
-
- str_field = (char *)entry + data_offset;
+ union trace_synth_field *data = &entry->fields[n_u64];
trace_seq_printf(s, print_fmt, se->fields[i]->name,
STR_VAR_LEN_MAX,
- str_field,
+ (char *)entry + data->as_dynamic.offset,
i == se->n_fields - 1 ? "" : " ");
n_u64++;
} else {
trace_seq_printf(s, print_fmt, se->fields[i]->name,
STR_VAR_LEN_MAX,
- (char *)&entry->fields[n_u64],
+ (char *)&entry->fields[n_u64].as_u64,
i == se->n_fields - 1 ? "" : " ");
n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
}
} else if (se->fields[i]->is_stack) {
- u32 offset, data_offset, len;
- unsigned long *p, *end;
-
- offset = (u32)entry->fields[n_u64];
- data_offset = offset & 0xffff;
- len = offset >> 16;
-
- p = (void *)entry + data_offset;
- end = (void *)p + len - (sizeof(long) - 1);
+ union trace_synth_field *data = &entry->fields[n_u64];
+ unsigned long *p = (void *)entry + data->as_dynamic.offset;
trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
-
- for (; *p && p < end; p++)
- trace_seq_printf(s, "=> %pS\n", (void *)*p);
+ for (j = 1; j < data->as_dynamic.len / sizeof(long); j++)
+ trace_seq_printf(s, "=> %pS\n", (void *)p[j]);
n_u64++;
-
} else {
struct trace_print_flags __flags[] = {
__def_gfpflag_names, {-1, NULL} };
@@ -419,13 +404,13 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
print_synth_event_num_val(s, print_fmt,
se->fields[i]->name,
se->fields[i]->size,
- entry->fields[n_u64],
+ &entry->fields[n_u64],
space);
if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
trace_seq_puts(s, " (");
trace_print_flags_seq(s, "|",
- entry->fields[n_u64],
+ entry->fields[n_u64].as_u64,
__flags);
trace_seq_putc(s, ')');
}
@@ -454,21 +439,16 @@ static unsigned int trace_string(struct synth_trace_event *entry,
int ret;
if (is_dynamic) {
- u32 data_offset;
-
- data_offset = struct_size(entry, fields, event->n_u64);
- data_offset += data_size;
-
- len = fetch_store_strlen((unsigned long)str_val);
+ union trace_synth_field *data = &entry->fields[*n_u64];
- data_offset |= len << 16;
- *(u32 *)&entry->fields[*n_u64] = data_offset;
+ data->as_dynamic.offset = struct_size(entry, fields, event->n_u64) + data_size;
+ data->as_dynamic.len = fetch_store_strlen((unsigned long)str_val);
ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
(*n_u64)++;
} else {
- str_field = (char *)&entry->fields[*n_u64];
+ str_field = (char *)&entry->fields[*n_u64].as_u64;
#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
if ((unsigned long)str_val < TASK_SIZE)
@@ -492,6 +472,7 @@ static unsigned int trace_stack(struct synth_trace_event *entry,
unsigned int data_size,
unsigned int *n_u64)
{
+ union trace_synth_field *data = &entry->fields[*n_u64];
unsigned int len;
u32 data_offset;
void *data_loc;
@@ -504,10 +485,6 @@ static unsigned int trace_stack(struct synth_trace_event *entry,
break;
}
- /* Include the zero'd element if it fits */
- if (len < HIST_STACKTRACE_DEPTH)
- len++;
-
len *= sizeof(long);
/* Find the dynamic section to copy the stack into. */
@@ -515,8 +492,9 @@ static unsigned int trace_stack(struct synth_trace_event *entry,
memcpy(data_loc, stack, len);
/* Fill in the field that holds the offset/len combo */
- data_offset |= len << 16;
- *(u32 *)&entry->fields[*n_u64] = data_offset;
+
+ data->as_dynamic.offset = data_offset;
+ data->as_dynamic.len = len;
(*n_u64)++;
@@ -550,7 +528,8 @@ static notrace void trace_event_raw_event_synth(void *__data,
str_val = (char *)(long)var_ref_vals[val_idx];
if (event->dynamic_fields[i]->is_stack) {
- len = *((unsigned long *)str_val);
+ /* reserve one extra element for size */
+ len = *((unsigned long *)str_val) + 1;
len *= sizeof(unsigned long);
} else {
len = fetch_store_strlen((unsigned long)str_val);
@@ -592,19 +571,19 @@ static notrace void trace_event_raw_event_synth(void *__data,
switch (field->size) {
case 1:
- *(u8 *)&entry->fields[n_u64] = (u8)val;
+ entry->fields[n_u64].as_u8 = (u8)val;
break;
case 2:
- *(u16 *)&entry->fields[n_u64] = (u16)val;
+ entry->fields[n_u64].as_u16 = (u16)val;
break;
case 4:
- *(u32 *)&entry->fields[n_u64] = (u32)val;
+ entry->fields[n_u64].as_u32 = (u32)val;
break;
default:
- entry->fields[n_u64] = val;
+ entry->fields[n_u64].as_u64 = val;
break;
}
n_u64++;
@@ -1791,19 +1770,19 @@ int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
switch (field->size) {
case 1:
- *(u8 *)&state.entry->fields[n_u64] = (u8)val;
+ state.entry->fields[n_u64].as_u8 = (u8)val;
break;
case 2:
- *(u16 *)&state.entry->fields[n_u64] = (u16)val;
+ state.entry->fields[n_u64].as_u16 = (u16)val;
break;
case 4:
- *(u32 *)&state.entry->fields[n_u64] = (u32)val;
+ state.entry->fields[n_u64].as_u32 = (u32)val;
break;
default:
- state.entry->fields[n_u64] = val;
+ state.entry->fields[n_u64].as_u64 = val;
break;
}
n_u64++;
@@ -1884,19 +1863,19 @@ int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
switch (field->size) {
case 1:
- *(u8 *)&state.entry->fields[n_u64] = (u8)val;
+ state.entry->fields[n_u64].as_u8 = (u8)val;
break;
case 2:
- *(u16 *)&state.entry->fields[n_u64] = (u16)val;
+ state.entry->fields[n_u64].as_u16 = (u16)val;
break;
case 4:
- *(u32 *)&state.entry->fields[n_u64] = (u32)val;
+ state.entry->fields[n_u64].as_u32 = (u32)val;
break;
default:
- state.entry->fields[n_u64] = val;
+ state.entry->fields[n_u64].as_u64 = val;
break;
}
n_u64++;
@@ -2031,19 +2010,19 @@ static int __synth_event_add_val(const char *field_name, u64 val,
} else {
switch (field->size) {
case 1:
- *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
+ trace_state->entry->fields[field->offset].as_u8 = (u8)val;
break;
case 2:
- *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
+ trace_state->entry->fields[field->offset].as_u16 = (u16)val;
break;
case 4:
- *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
+ trace_state->entry->fields[field->offset].as_u32 = (u32)val;
break;
default:
- trace_state->entry->fields[field->offset] = val;
+ trace_state->entry->fields[field->offset].as_u64 = val;
break;
}
}
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 590b3d51afae..ba37f768e2f2 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -231,7 +231,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
graph_trace_open(iter);
-
+ else
+ iter->private = NULL;
}
static void irqsoff_trace_close(struct trace_iterator *iter)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 330aee1c1a49..0469a04a355f 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -168,6 +168,8 @@ static void wakeup_trace_open(struct trace_iterator *iter)
{
if (is_graph(iter->tr))
graph_trace_open(iter);
+ else
+ iter->private = NULL;
}
static void wakeup_trace_close(struct trace_iterator *iter)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d6798513a8c2..a8a1b0ac8b22 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1673,10 +1673,15 @@ menu "Debug kernel data structures"
config DEBUG_LIST
bool "Debug linked list manipulation"
- depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION
+ depends on DEBUG_KERNEL
+ select LIST_HARDENED
help
- Enable this to turn on extended checks in the linked-list
- walking routines.
+ Enable this to turn on extended checks in the linked-list walking
+ routines.
+
+ This option trades better quality error reports for performance, and
+ is more suitable for kernel debugging. If you care about performance,
+ you should only enable CONFIG_LIST_HARDENED instead.
If unsure, say N.
@@ -1710,16 +1715,6 @@ config DEBUG_NOTIFIERS
This is a relatively cheap check but if you care about maximum
performance, say N.
-config BUG_ON_DATA_CORRUPTION
- bool "Trigger a BUG when data corruption is detected"
- select DEBUG_LIST
- help
- Select this option if the kernel should BUG when it encounters
- data corruption in kernel memory structures when they get checked
- for validity.
-
- If unsure, say N.
-
config DEBUG_MAPLE_TREE
bool "Debug maple trees"
depends on DEBUG_KERNEL
@@ -2701,6 +2696,9 @@ config MEMCPY_SLOW_KUNIT_TEST
and bit ranges. These can be very slow, so they are split out
as a separate config, in case they need to be disabled.
+ Note this config option will be replaced by the use of KUnit test
+ attributes.
+
config IS_SIGNED_TYPE_KUNIT_TEST
tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -3010,6 +3008,19 @@ config RUST_BUILD_ASSERT_ALLOW
If unsure, say N.
+config RUST_KERNEL_DOCTESTS
+ bool "Doctests for the `kernel` crate" if !KUNIT_ALL_TESTS
+ depends on RUST && KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds the documentation tests of the `kernel` crate
+ as KUnit tests.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
endmenu # "Rust"
endmenu # Kernel hacking
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index efae7e011956..59e21bfec188 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -13,7 +13,7 @@ menuconfig UBSAN
if UBSAN
config UBSAN_TRAP
- bool "On Sanitizer warnings, abort the running kernel code"
+ bool "Abort on Sanitizer warnings (smaller kernel but less verbose)"
depends on !COMPILE_TEST
help
Building kernels with Sanitizer features enabled tends to grow
@@ -26,6 +26,14 @@ config UBSAN_TRAP
the system. For some system builders this is an acceptable
trade-off.
+ Also note that selecting Y will cause your kernel to Oops
+ with an "illegal instruction" error with no further details
+ when a UBSAN violation occurs. (Except on arm64, which will
+ report which Sanitizer failed.) This may make it hard to
+ determine whether an Oops was caused by UBSAN or to figure
+ out the details of a UBSAN violation. It makes the kernel log
+ output less useful for bug reports.
+
config CC_HAS_UBSAN_BOUNDS_STRICT
def_bool $(cc-option,-fsanitize=bounds-strict)
help
diff --git a/lib/Makefile b/lib/Makefile
index 1ffae65bb7ee..2e08397f6210 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -167,7 +167,7 @@ obj-$(CONFIG_BTREE) += btree.o
obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
+obj-$(CONFIG_LIST_HARDENED) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
obj-$(CONFIG_BITREVERSE) += bitrev.o
@@ -259,7 +259,6 @@ obj-$(CONFIG_DQL) += dynamic_queue_limits.o
obj-$(CONFIG_GLOB) += glob.o
obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
-obj-$(CONFIG_MPILIB) += mpi/
obj-$(CONFIG_DIMLIB) += dim/
obj-$(CONFIG_SIGNATURE) += digsig.o
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
index 0d3a686b5ba2..fb8c0c5c2bd2 100644
--- a/lib/clz_ctz.c
+++ b/lib/clz_ctz.c
@@ -28,36 +28,16 @@ int __weak __clzsi2(int val)
}
EXPORT_SYMBOL(__clzsi2);
-int __weak __clzdi2(long val);
-int __weak __ctzdi2(long val);
-#if BITS_PER_LONG == 32
-
-int __weak __clzdi2(long val)
+int __weak __clzdi2(u64 val);
+int __weak __clzdi2(u64 val)
{
- return 32 - fls((int)val);
+ return 64 - fls64(val);
}
EXPORT_SYMBOL(__clzdi2);
-int __weak __ctzdi2(long val)
+int __weak __ctzdi2(u64 val);
+int __weak __ctzdi2(u64 val)
{
- return __ffs((u32)val);
+ return __ffs64(val);
}
EXPORT_SYMBOL(__ctzdi2);
-
-#elif BITS_PER_LONG == 64
-
-int __weak __clzdi2(long val)
-{
- return 64 - fls64((u64)val);
-}
-EXPORT_SYMBOL(__clzdi2);
-
-int __weak __ctzdi2(long val)
-{
- return __ffs64((u64)val);
-}
-EXPORT_SYMBOL(__ctzdi2);
-
-#else
-#error BITS_PER_LONG not 32 or 64
-#endif
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 6ec2d4543d9c..8d1446c2be71 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -53,3 +53,5 @@ libblake2s-y += blake2s-selftest.o
libchacha20poly1305-y += chacha20poly1305-selftest.o
libcurve25519-y += curve25519-selftest.o
endif
+
+obj-$(CONFIG_MPILIB) += mpi/
diff --git a/lib/mpi/Makefile b/lib/crypto/mpi/Makefile
index 6e6ef9a34fe1..6e6ef9a34fe1 100644
--- a/lib/mpi/Makefile
+++ b/lib/crypto/mpi/Makefile
diff --git a/lib/mpi/ec.c b/lib/crypto/mpi/ec.c
index 40f5908e57a4..40f5908e57a4 100644
--- a/lib/mpi/ec.c
+++ b/lib/crypto/mpi/ec.c
diff --git a/lib/mpi/generic_mpih-add1.c b/lib/crypto/mpi/generic_mpih-add1.c
index 299308b5461c..299308b5461c 100644
--- a/lib/mpi/generic_mpih-add1.c
+++ b/lib/crypto/mpi/generic_mpih-add1.c
diff --git a/lib/mpi/generic_mpih-lshift.c b/lib/crypto/mpi/generic_mpih-lshift.c
index 7b21f5938a50..7b21f5938a50 100644
--- a/lib/mpi/generic_mpih-lshift.c
+++ b/lib/crypto/mpi/generic_mpih-lshift.c
diff --git a/lib/mpi/generic_mpih-mul1.c b/lib/crypto/mpi/generic_mpih-mul1.c
index e020e61d47b9..e020e61d47b9 100644
--- a/lib/mpi/generic_mpih-mul1.c
+++ b/lib/crypto/mpi/generic_mpih-mul1.c
diff --git a/lib/mpi/generic_mpih-mul2.c b/lib/crypto/mpi/generic_mpih-mul2.c
index 9484d8528243..9484d8528243 100644
--- a/lib/mpi/generic_mpih-mul2.c
+++ b/lib/crypto/mpi/generic_mpih-mul2.c
diff --git a/lib/mpi/generic_mpih-mul3.c b/lib/crypto/mpi/generic_mpih-mul3.c
index ccdbab4121e0..ccdbab4121e0 100644
--- a/lib/mpi/generic_mpih-mul3.c
+++ b/lib/crypto/mpi/generic_mpih-mul3.c
diff --git a/lib/mpi/generic_mpih-rshift.c b/lib/crypto/mpi/generic_mpih-rshift.c
index e07bc69aa898..e07bc69aa898 100644
--- a/lib/mpi/generic_mpih-rshift.c
+++ b/lib/crypto/mpi/generic_mpih-rshift.c
diff --git a/lib/mpi/generic_mpih-sub1.c b/lib/crypto/mpi/generic_mpih-sub1.c
index eea4382aad5f..eea4382aad5f 100644
--- a/lib/mpi/generic_mpih-sub1.c
+++ b/lib/crypto/mpi/generic_mpih-sub1.c
diff --git a/lib/mpi/longlong.h b/lib/crypto/mpi/longlong.h
index b6fa1d08fb55..b6fa1d08fb55 100644
--- a/lib/mpi/longlong.h
+++ b/lib/crypto/mpi/longlong.h
diff --git a/lib/mpi/mpi-add.c b/lib/crypto/mpi/mpi-add.c
index 9056fc5167fc..9056fc5167fc 100644
--- a/lib/mpi/mpi-add.c
+++ b/lib/crypto/mpi/mpi-add.c
diff --git a/lib/mpi/mpi-bit.c b/lib/crypto/mpi/mpi-bit.c
index 070ba784c9f1..070ba784c9f1 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/crypto/mpi/mpi-bit.c
diff --git a/lib/mpi/mpi-cmp.c b/lib/crypto/mpi/mpi-cmp.c
index c4cfa3ff0581..0835b6213235 100644
--- a/lib/mpi/mpi-cmp.c
+++ b/lib/crypto/mpi/mpi-cmp.c
@@ -25,8 +25,12 @@ int mpi_cmp_ui(MPI u, unsigned long v)
mpi_limb_t limb = v;
mpi_normalize(u);
- if (!u->nlimbs && !limb)
- return 0;
+ if (u->nlimbs == 0) {
+ if (v == 0)
+ return 0;
+ else
+ return -1;
+ }
if (u->sign)
return -1;
if (u->nlimbs > 1)
diff --git a/lib/mpi/mpi-div.c b/lib/crypto/mpi/mpi-div.c
index 45beab8b9e9e..45beab8b9e9e 100644
--- a/lib/mpi/mpi-div.c
+++ b/lib/crypto/mpi/mpi-div.c
diff --git a/lib/mpi/mpi-inline.h b/lib/crypto/mpi/mpi-inline.h
index 980b6b940953..980b6b940953 100644
--- a/lib/mpi/mpi-inline.h
+++ b/lib/crypto/mpi/mpi-inline.h
diff --git a/lib/mpi/mpi-internal.h b/lib/crypto/mpi/mpi-internal.h
index 554002182db1..554002182db1 100644
--- a/lib/mpi/mpi-internal.h
+++ b/lib/crypto/mpi/mpi-internal.h
diff --git a/lib/mpi/mpi-inv.c b/lib/crypto/mpi/mpi-inv.c
index 61e37d18f793..61e37d18f793 100644
--- a/lib/mpi/mpi-inv.c
+++ b/lib/crypto/mpi/mpi-inv.c
diff --git a/lib/mpi/mpi-mod.c b/lib/crypto/mpi/mpi-mod.c
index 54fcc01564d9..54fcc01564d9 100644
--- a/lib/mpi/mpi-mod.c
+++ b/lib/crypto/mpi/mpi-mod.c
diff --git a/lib/mpi/mpi-mul.c b/lib/crypto/mpi/mpi-mul.c
index 7f4eda8560dc..7f4eda8560dc 100644
--- a/lib/mpi/mpi-mul.c
+++ b/lib/crypto/mpi/mpi-mul.c
diff --git a/lib/mpi/mpi-pow.c b/lib/crypto/mpi/mpi-pow.c
index 2fd7a46d55ec..2fd7a46d55ec 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/crypto/mpi/mpi-pow.c
diff --git a/lib/mpi/mpi-sub-ui.c b/lib/crypto/mpi/mpi-sub-ui.c
index b41b082b5f3e..b41b082b5f3e 100644
--- a/lib/mpi/mpi-sub-ui.c
+++ b/lib/crypto/mpi/mpi-sub-ui.c
diff --git a/lib/mpi/mpicoder.c b/lib/crypto/mpi/mpicoder.c
index 3cb6bd148fa9..3cb6bd148fa9 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/crypto/mpi/mpicoder.c
diff --git a/lib/mpi/mpih-cmp.c b/lib/crypto/mpi/mpih-cmp.c
index f23709114a65..f23709114a65 100644
--- a/lib/mpi/mpih-cmp.c
+++ b/lib/crypto/mpi/mpih-cmp.c
diff --git a/lib/mpi/mpih-div.c b/lib/crypto/mpi/mpih-div.c
index be70ee2e42d3..be70ee2e42d3 100644
--- a/lib/mpi/mpih-div.c
+++ b/lib/crypto/mpi/mpih-div.c
diff --git a/lib/mpi/mpih-mul.c b/lib/crypto/mpi/mpih-mul.c
index e5f1c84e3c48..e5f1c84e3c48 100644
--- a/lib/mpi/mpih-mul.c
+++ b/lib/crypto/mpi/mpih-mul.c
diff --git a/lib/mpi/mpiutil.c b/lib/crypto/mpi/mpiutil.c
index aa8c46544af8..aa8c46544af8 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/crypto/mpi/mpiutil.c
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index e4dc809d1075..424737045b97 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -566,24 +566,37 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
}
EXPORT_SYMBOL(iov_iter_zero);
-size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
- struct iov_iter *i)
+size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
+ size_t bytes, struct iov_iter *i)
{
- char *kaddr = kmap_atomic(page), *p = kaddr + offset;
- if (!page_copy_sane(page, offset, bytes)) {
- kunmap_atomic(kaddr);
+ size_t n, copied = 0;
+
+ if (!page_copy_sane(page, offset, bytes))
return 0;
- }
- if (WARN_ON_ONCE(!i->data_source)) {
- kunmap_atomic(kaddr);
+ if (WARN_ON_ONCE(!i->data_source))
return 0;
- }
- iterate_and_advance(i, bytes, base, len, off,
- copyin(p + off, base, len),
- memcpy_from_iter(i, p + off, base, len)
- )
- kunmap_atomic(kaddr);
- return bytes;
+
+ do {
+ char *p;
+
+ n = bytes - copied;
+ if (PageHighMem(page)) {
+ page += offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+ n = min_t(size_t, n, PAGE_SIZE - offset);
+ }
+
+ p = kmap_atomic(page) + offset;
+ iterate_and_advance(i, n, base, len, off,
+ copyin(p + off, base, len),
+ memcpy_from_iter(i, p + off, base, len)
+ )
+ kunmap_atomic(p);
+ copied += n;
+ offset += n;
+ } while (PageHighMem(page) && copied != bytes && n > 0);
+
+ return copied;
}
EXPORT_SYMBOL(copy_page_from_iter_atomic);
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 626719b95bad..68a6daec0aef 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -4,7 +4,7 @@
menuconfig KUNIT
tristate "KUnit - Enable support for unit tests"
- select GLOB if KUNIT=y
+ select GLOB
help
Enables support for kernel unit tests (KUnit), a lightweight unit
testing and mocking framework for the Linux kernel. These tests are
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index cb417f504996..46f75f23dfe4 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -6,7 +6,8 @@ kunit-objs += test.o \
string-stream.o \
assert.o \
try-catch.o \
- executor.o
+ executor.o \
+ attributes.o
ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
kunit-objs += debugfs.o
diff --git a/lib/kunit/attributes.c b/lib/kunit/attributes.c
new file mode 100644
index 000000000000..1b512f7e1838
--- /dev/null
+++ b/lib/kunit/attributes.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit API to save and access test attributes
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+
+#include <kunit/test.h>
+#include <kunit/attributes.h>
+
+/* Options for printing attributes:
+ * PRINT_ALWAYS - attribute is printed for every test case and suite if set
+ * PRINT_SUITE - attribute is printed for every suite if set but not for test cases
+ * PRINT_NEVER - attribute is never printed
+ */
+enum print_ops {
+ PRINT_ALWAYS,
+ PRINT_SUITE,
+ PRINT_NEVER,
+};
+
+/**
+ * struct kunit_attr - represents a test attribute and holds flexible
+ * helper functions to interact with attribute.
+ *
+ * @name: name of test attribute, eg. speed
+ * @get_attr: function to return attribute value given a test
+ * @to_string: function to return string representation of given
+ * attribute value
+ * @filter: function to indicate whether a given attribute value passes a
+ * filter
+ * @attr_default: default attribute value used during filtering
+ * @print: value of enum print_ops to indicate when to print attribute
+ */
+struct kunit_attr {
+ const char *name;
+ void *(*get_attr)(void *test_or_suite, bool is_test);
+ const char *(*to_string)(void *attr, bool *to_free);
+ int (*filter)(void *attr, const char *input, int *err);
+ void *attr_default;
+ enum print_ops print;
+};
+
+/* String Lists for enum Attributes */
+
+static const char * const speed_str_list[] = {"unset", "very_slow", "slow", "normal"};
+
+/* To String Methods */
+
+static const char *attr_enum_to_string(void *attr, const char * const str_list[], bool *to_free)
+{
+ long val = (long)attr;
+
+ *to_free = false;
+ if (!val)
+ return NULL;
+ return str_list[val];
+}
+
+static const char *attr_speed_to_string(void *attr, bool *to_free)
+{
+ return attr_enum_to_string(attr, speed_str_list, to_free);
+}
+
+static const char *attr_string_to_string(void *attr, bool *to_free)
+{
+ *to_free = false;
+ return (char *) attr;
+}
+
+/* Filter Methods */
+
+static const char op_list[] = "<>!=";
+
+/*
+ * Returns whether the inputted integer value matches the filter given
+ * by the operation string and inputted integer.
+ */
+static int int_filter(long val, const char *op, int input, int *err)
+{
+ if (!strncmp(op, "<=", 2))
+ return (val <= input);
+ else if (!strncmp(op, ">=", 2))
+ return (val >= input);
+ else if (!strncmp(op, "!=", 2))
+ return (val != input);
+ else if (!strncmp(op, ">", 1))
+ return (val > input);
+ else if (!strncmp(op, "<", 1))
+ return (val < input);
+ else if (!strncmp(op, "=", 1))
+ return (val == input);
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter operation: %s\n", op);
+ return false;
+}
+
+/*
+ * Returns whether the inputted enum value "attr" matches the filter given
+ * by the input string. Note: the str_list includes the corresponding string
+ * list to the enum values.
+ */
+static int attr_enum_filter(void *attr, const char *input, int *err,
+ const char * const str_list[], int max)
+{
+ int i, j, input_int = -1;
+ long test_val = (long)attr;
+ const char *input_val = NULL;
+
+ for (i = 0; input[i]; i++) {
+ if (!strchr(op_list, input[i])) {
+ input_val = input + i;
+ break;
+ }
+ }
+
+ if (!input_val) {
+ *err = -EINVAL;
+ pr_err("kunit executor: filter value not found: %s\n", input);
+ return false;
+ }
+
+ for (j = 0; j <= max; j++) {
+ if (!strcmp(input_val, str_list[j]))
+ input_int = j;
+ }
+
+ if (input_int < 0) {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ }
+
+ return int_filter(test_val, input, input_int, err);
+}
+
+static int attr_speed_filter(void *attr, const char *input, int *err)
+{
+ return attr_enum_filter(attr, input, err, speed_str_list, KUNIT_SPEED_MAX);
+}
+
+/*
+ * Returns whether the inputted string value (attr) matches the filter given
+ * by the input string.
+ */
+static int attr_string_filter(void *attr, const char *input, int *err)
+{
+ char *str = attr;
+
+ if (!strncmp(input, "<", 1)) {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ } else if (!strncmp(input, ">", 1)) {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ } else if (!strncmp(input, "!=", 2)) {
+ return (strcmp(input + 2, str) != 0);
+ } else if (!strncmp(input, "=", 1)) {
+ return (strcmp(input + 1, str) == 0);
+ }
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter operation: %s\n", input);
+ return false;
+}
+
+
+/* Get Attribute Methods */
+
+static void *attr_speed_get(void *test_or_suite, bool is_test)
+{
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ if (test)
+ return ((void *) test->attr.speed);
+ else
+ return ((void *) suite->attr.speed);
+}
+
+static void *attr_module_get(void *test_or_suite, bool is_test)
+{
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ // Suites get their module attribute from their first test_case
+ if (test)
+ return ((void *) test->module_name);
+ else if (kunit_suite_num_test_cases(suite) > 0)
+ return ((void *) suite->test_cases[0].module_name);
+ else
+ return (void *) "";
+}
+
+/* List of all Test Attributes */
+
+static struct kunit_attr kunit_attr_list[] = {
+ {
+ .name = "speed",
+ .get_attr = attr_speed_get,
+ .to_string = attr_speed_to_string,
+ .filter = attr_speed_filter,
+ .attr_default = (void *)KUNIT_SPEED_NORMAL,
+ .print = PRINT_ALWAYS,
+ },
+ {
+ .name = "module",
+ .get_attr = attr_module_get,
+ .to_string = attr_string_to_string,
+ .filter = attr_string_filter,
+ .attr_default = (void *)"",
+ .print = PRINT_SUITE,
+ }
+};
+
+/* Helper Functions to Access Attributes */
+
+const char *kunit_attr_filter_name(struct kunit_attr_filter filter)
+{
+ return filter.attr->name;
+}
+
+void kunit_print_attr(void *test_or_suite, bool is_test, unsigned int test_level)
+{
+ int i;
+ bool to_free = false;
+ void *attr;
+ const char *attr_name, *attr_str;
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ for (i = 0; i < ARRAY_SIZE(kunit_attr_list); i++) {
+ if (kunit_attr_list[i].print == PRINT_NEVER ||
+ (test && kunit_attr_list[i].print == PRINT_SUITE))
+ continue;
+ attr = kunit_attr_list[i].get_attr(test_or_suite, is_test);
+ if (attr) {
+ attr_name = kunit_attr_list[i].name;
+ attr_str = kunit_attr_list[i].to_string(attr, &to_free);
+ if (test) {
+ kunit_log(KERN_INFO, test, "%*s# %s.%s: %s",
+ KUNIT_INDENT_LEN * test_level, "", test->name,
+ attr_name, attr_str);
+ } else {
+ kunit_log(KERN_INFO, suite, "%*s# %s: %s",
+ KUNIT_INDENT_LEN * test_level, "", attr_name, attr_str);
+ }
+
+ /* Free to_string of attribute if needed */
+ if (to_free)
+ kfree(attr_str);
+ }
+ }
+}
+
+/* Helper Functions to Filter Attributes */
+
+int kunit_get_filter_count(char *input)
+{
+ int i, comma_index = 0, count = 0;
+
+ for (i = 0; input[i]; i++) {
+ if (input[i] == ',') {
+ if ((i - comma_index) > 1)
+ count++;
+ comma_index = i;
+ }
+ }
+ if ((i - comma_index) > 0)
+ count++;
+ return count;
+}
+
+struct kunit_attr_filter kunit_next_attr_filter(char **filters, int *err)
+{
+ struct kunit_attr_filter filter = {};
+ int i, j, comma_index = 0, new_start_index = 0;
+ int op_index = -1, attr_index = -1;
+ char op;
+ char *input = *filters;
+
+ /* Parse input until operation */
+ for (i = 0; input[i]; i++) {
+ if (op_index < 0 && strchr(op_list, input[i])) {
+ op_index = i;
+ } else if (!comma_index && input[i] == ',') {
+ comma_index = i;
+ } else if (comma_index && input[i] != ' ') {
+ new_start_index = i;
+ break;
+ }
+ }
+
+ if (op_index <= 0) {
+ *err = -EINVAL;
+ pr_err("kunit executor: filter operation not found: %s\n", input);
+ return filter;
+ }
+
+ /* Temporarily set operator to \0 character. */
+ op = input[op_index];
+ input[op_index] = '\0';
+
+ /* Find associated kunit_attr object */
+ for (j = 0; j < ARRAY_SIZE(kunit_attr_list); j++) {
+ if (!strcmp(input, kunit_attr_list[j].name)) {
+ attr_index = j;
+ break;
+ }
+ }
+
+ input[op_index] = op;
+
+ if (attr_index < 0) {
+ *err = -EINVAL;
+ pr_err("kunit executor: attribute not found: %s\n", input);
+ } else {
+ filter.attr = &kunit_attr_list[attr_index];
+ }
+
+ if (comma_index > 0) {
+ input[comma_index] = '\0';
+ filter.input = input + op_index;
+ input = input + new_start_index;
+ } else {
+ filter.input = input + op_index;
+ input = NULL;
+ }
+
+ *filters = input;
+
+ return filter;
+}
+
+struct kunit_suite *kunit_filter_attr_tests(const struct kunit_suite *const suite,
+ struct kunit_attr_filter filter, char *action, int *err)
+{
+ int n = 0;
+ struct kunit_case *filtered, *test_case;
+ struct kunit_suite *copy;
+ void *suite_val, *test_val;
+ bool suite_result, test_result, default_result, result;
+
+ /* Allocate memory for new copy of suite and list of test cases */
+ copy = kmemdup(suite, sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return ERR_PTR(-ENOMEM);
+
+ kunit_suite_for_each_test_case(suite, test_case) { n++; }
+
+ filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL);
+ if (!filtered) {
+ kfree(copy);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ n = 0;
+
+ /* Save filtering result on default value */
+ default_result = filter.attr->filter(filter.attr->attr_default, filter.input, err);
+ if (*err)
+ goto err;
+
+ /* Save suite attribute value and filtering result on that value */
+ suite_val = filter.attr->get_attr((void *)suite, false);
+ suite_result = filter.attr->filter(suite_val, filter.input, err);
+ if (*err)
+ goto err;
+
+ /* For each test case, save test case if passes filtering. */
+ kunit_suite_for_each_test_case(suite, test_case) {
+ test_val = filter.attr->get_attr((void *) test_case, true);
+ test_result = filter.attr->filter(filter.attr->get_attr(test_case, true),
+ filter.input, err);
+ if (*err)
+ goto err;
+
+ /*
+ * If attribute value of test case is set, filter on that value.
+ * If not, filter on suite value if set. If not, filter on
+ * default value.
+ */
+ result = false;
+ if (test_val) {
+ if (test_result)
+ result = true;
+ } else if (suite_val) {
+ if (suite_result)
+ result = true;
+ } else if (default_result) {
+ result = true;
+ }
+
+ if (result) {
+ filtered[n++] = *test_case;
+ } else if (action && strcmp(action, "skip") == 0) {
+ test_case->status = KUNIT_SKIPPED;
+ filtered[n++] = *test_case;
+ }
+ }
+
+err:
+ if (n == 0 || *err) {
+ kfree(copy);
+ kfree(filtered);
+ return NULL;
+ }
+
+ copy->test_cases = filtered;
+
+ return copy;
+}
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 74982b83707c..5181aa2e760b 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -2,6 +2,7 @@
#include <linux/reboot.h>
#include <kunit/test.h>
+#include <kunit/attributes.h>
#include <linux/glob.h>
#include <linux/moduleparam.h>
@@ -12,28 +13,59 @@
extern struct kunit_suite * const __kunit_suites_start[];
extern struct kunit_suite * const __kunit_suites_end[];
-#if IS_BUILTIN(CONFIG_KUNIT)
-
-static char *filter_glob_param;
static char *action_param;
-module_param_named(filter_glob, filter_glob_param, charp, 0);
-MODULE_PARM_DESC(filter_glob,
- "Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test");
-module_param_named(action, action_param, charp, 0);
+module_param_named(action, action_param, charp, 0400);
MODULE_PARM_DESC(action,
"Changes KUnit executor behavior, valid values are:\n"
"<none>: run the tests like normal\n"
- "'list' to list test names instead of running them.\n");
+ "'list' to list test names instead of running them.\n"
+ "'list_attr' to list test names and attributes instead of running them.\n");
+
+const char *kunit_action(void)
+{
+ return action_param;
+}
+
+static char *filter_glob_param;
+static char *filter_param;
+static char *filter_action_param;
+
+module_param_named(filter_glob, filter_glob_param, charp, 0400);
+MODULE_PARM_DESC(filter_glob,
+ "Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test");
+module_param_named(filter, filter_param, charp, 0400);
+MODULE_PARM_DESC(filter,
+ "Filter which KUnit test suites/tests run at boot-time using attributes, e.g. speed>slow");
+module_param_named(filter_action, filter_action_param, charp, 0400);
+MODULE_PARM_DESC(filter_action,
+ "Changes behavior of filtered tests using attributes, valid values are:\n"
+ "<none>: do not run filtered tests as normal\n"
+ "'skip': skip all filtered tests instead so tests will appear in output\n");
+
+const char *kunit_filter_glob(void)
+{
+ return filter_glob_param;
+}
+
+char *kunit_filter(void)
+{
+ return filter_param;
+}
+
+char *kunit_filter_action(void)
+{
+ return filter_action_param;
+}
/* glob_match() needs NULL terminated strings, so we need a copy of filter_glob_param. */
-struct kunit_test_filter {
+struct kunit_glob_filter {
char *suite_glob;
char *test_glob;
};
/* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */
-static void kunit_parse_filter_glob(struct kunit_test_filter *parsed,
+static void kunit_parse_glob_filter(struct kunit_glob_filter *parsed,
const char *filter_glob)
{
const int len = strlen(filter_glob);
@@ -55,7 +87,7 @@ static void kunit_parse_filter_glob(struct kunit_test_filter *parsed,
/* Create a copy of suite with only tests that match test_glob. */
static struct kunit_suite *
-kunit_filter_tests(const struct kunit_suite *const suite, const char *test_glob)
+kunit_filter_glob_tests(const struct kunit_suite *const suite, const char *test_glob)
{
int n = 0;
struct kunit_case *filtered, *test_case;
@@ -89,16 +121,7 @@ kunit_filter_tests(const struct kunit_suite *const suite, const char *test_glob)
return copy;
}
-static char *kunit_shutdown;
-core_param(kunit_shutdown, kunit_shutdown, charp, 0644);
-
-/* Stores an array of suites, end points one past the end */
-struct suite_set {
- struct kunit_suite * const *start;
- struct kunit_suite * const *end;
-};
-
-static void kunit_free_suite_set(struct suite_set suite_set)
+void kunit_free_suite_set(struct kunit_suite_set suite_set)
{
struct kunit_suite * const *suites;
@@ -107,72 +130,117 @@ static void kunit_free_suite_set(struct suite_set suite_set)
kfree(suite_set.start);
}
-static struct suite_set kunit_filter_suites(const struct suite_set *suite_set,
- const char *filter_glob,
- int *err)
+struct kunit_suite_set
+kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ const char *filter_glob,
+ char *filters,
+ char *filter_action,
+ int *err)
{
- int i;
- struct kunit_suite **copy, *filtered_suite;
- struct suite_set filtered;
- struct kunit_test_filter filter;
+ int i, j, k;
+ int filter_count = 0;
+ struct kunit_suite **copy, **copy_start, *filtered_suite, *new_filtered_suite;
+ struct kunit_suite_set filtered = {NULL, NULL};
+ struct kunit_glob_filter parsed_glob;
+ struct kunit_attr_filter *parsed_filters = NULL;
const size_t max = suite_set->end - suite_set->start;
copy = kmalloc_array(max, sizeof(*filtered.start), GFP_KERNEL);
- filtered.start = copy;
if (!copy) { /* won't be able to run anything, return an empty set */
- filtered.end = copy;
return filtered;
}
+ copy_start = copy;
- kunit_parse_filter_glob(&filter, filter_glob);
-
- for (i = 0; &suite_set->start[i] != suite_set->end; i++) {
- if (!glob_match(filter.suite_glob, suite_set->start[i]->name))
- continue;
+ if (filter_glob)
+ kunit_parse_glob_filter(&parsed_glob, filter_glob);
- filtered_suite = kunit_filter_tests(suite_set->start[i], filter.test_glob);
- if (IS_ERR(filtered_suite)) {
- *err = PTR_ERR(filtered_suite);
+ /* Parse attribute filters */
+ if (filters) {
+ filter_count = kunit_get_filter_count(filters);
+ parsed_filters = kcalloc(filter_count, sizeof(*parsed_filters), GFP_KERNEL);
+ if (!parsed_filters) {
+ kfree(copy);
return filtered;
}
+ for (j = 0; j < filter_count; j++)
+ parsed_filters[j] = kunit_next_attr_filter(&filters, err);
+ if (*err)
+ goto err;
+ }
+
+ for (i = 0; &suite_set->start[i] != suite_set->end; i++) {
+ filtered_suite = suite_set->start[i];
+ if (filter_glob) {
+ if (!glob_match(parsed_glob.suite_glob, filtered_suite->name))
+ continue;
+ filtered_suite = kunit_filter_glob_tests(filtered_suite,
+ parsed_glob.test_glob);
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
+ goto err;
+ }
+ }
+ if (filter_count > 0 && parsed_filters != NULL) {
+ for (k = 0; k < filter_count; k++) {
+ new_filtered_suite = kunit_filter_attr_tests(filtered_suite,
+ parsed_filters[k], filter_action, err);
+
+ /* Free previous copy of suite */
+ if (k > 0 || filter_glob) {
+ kfree(filtered_suite->test_cases);
+ kfree(filtered_suite);
+ }
+
+ filtered_suite = new_filtered_suite;
+
+ if (*err)
+ goto err;
+ if (IS_ERR(filtered_suite)) {
+ *err = PTR_ERR(filtered_suite);
+ goto err;
+ }
+ if (!filtered_suite)
+ break;
+ }
+ }
+
if (!filtered_suite)
continue;
*copy++ = filtered_suite;
}
+ filtered.start = copy_start;
filtered.end = copy;
- kfree(filter.suite_glob);
- kfree(filter.test_glob);
- return filtered;
-}
+err:
+ if (*err)
+ kfree(copy);
-static void kunit_handle_shutdown(void)
-{
- if (!kunit_shutdown)
- return;
+ if (filter_glob) {
+ kfree(parsed_glob.suite_glob);
+ kfree(parsed_glob.test_glob);
+ }
- if (!strcmp(kunit_shutdown, "poweroff"))
- kernel_power_off();
- else if (!strcmp(kunit_shutdown, "halt"))
- kernel_halt();
- else if (!strcmp(kunit_shutdown, "reboot"))
- kernel_restart(NULL);
+ if (filter_count)
+ kfree(parsed_filters);
+ return filtered;
}
-static void kunit_exec_run_tests(struct suite_set *suite_set)
+void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin)
{
size_t num_suites = suite_set->end - suite_set->start;
- pr_info("KTAP version 1\n");
- pr_info("1..%zu\n", num_suites);
+ if (builtin || num_suites) {
+ pr_info("KTAP version 1\n");
+ pr_info("1..%zu\n", num_suites);
+ }
__kunit_test_suites_init(suite_set->start, num_suites);
}
-static void kunit_exec_list_tests(struct suite_set *suite_set)
+void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr)
{
struct kunit_suite * const *suites;
struct kunit_case *test_case;
@@ -180,23 +248,54 @@ static void kunit_exec_list_tests(struct suite_set *suite_set)
/* Hack: print a ktap header so kunit.py can find the start of KUnit output. */
pr_info("KTAP version 1\n");
- for (suites = suite_set->start; suites < suite_set->end; suites++)
+ for (suites = suite_set->start; suites < suite_set->end; suites++) {
+ /* Print suite name and suite attributes */
+ pr_info("%s\n", (*suites)->name);
+ if (include_attr)
+ kunit_print_attr((void *)(*suites), false, 0);
+
+ /* Print test case name and attributes in suite */
kunit_suite_for_each_test_case((*suites), test_case) {
pr_info("%s.%s\n", (*suites)->name, test_case->name);
+ if (include_attr)
+ kunit_print_attr((void *)test_case, true, 0);
}
+ }
+}
+
+#if IS_BUILTIN(CONFIG_KUNIT)
+
+static char *kunit_shutdown;
+core_param(kunit_shutdown, kunit_shutdown, charp, 0644);
+
+static void kunit_handle_shutdown(void)
+{
+ if (!kunit_shutdown)
+ return;
+
+ if (!strcmp(kunit_shutdown, "poweroff"))
+ kernel_power_off();
+ else if (!strcmp(kunit_shutdown, "halt"))
+ kernel_halt();
+ else if (!strcmp(kunit_shutdown, "reboot"))
+ kernel_restart(NULL);
+
}
int kunit_run_all_tests(void)
{
- struct suite_set suite_set = {__kunit_suites_start, __kunit_suites_end};
+ struct kunit_suite_set suite_set = {
+ __kunit_suites_start, __kunit_suites_end,
+ };
int err = 0;
if (!kunit_enabled()) {
pr_info("kunit: disabled\n");
goto out;
}
- if (filter_glob_param) {
- suite_set = kunit_filter_suites(&suite_set, filter_glob_param, &err);
+ if (filter_glob_param || filter_param) {
+ suite_set = kunit_filter_suites(&suite_set, filter_glob_param,
+ filter_param, filter_action_param, &err);
if (err) {
pr_err("kunit executor: error filtering suites: %d\n", err);
goto out;
@@ -204,13 +303,15 @@ int kunit_run_all_tests(void)
}
if (!action_param)
- kunit_exec_run_tests(&suite_set);
+ kunit_exec_run_tests(&suite_set, true);
else if (strcmp(action_param, "list") == 0)
- kunit_exec_list_tests(&suite_set);
+ kunit_exec_list_tests(&suite_set, false);
+ else if (strcmp(action_param, "list_attr") == 0)
+ kunit_exec_list_tests(&suite_set, true);
else
pr_err("kunit executor: unknown action '%s'\n", action_param);
- if (filter_glob_param) { /* a copy was made of each suite */
+ if (filter_glob_param || filter_param) { /* a copy was made of each suite */
kunit_free_suite_set(suite_set);
}
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index ce6749af374d..4084071d0eb5 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -7,6 +7,7 @@
*/
#include <kunit/test.h>
+#include <kunit/attributes.h>
static void kfree_at_end(struct kunit *test, const void *to_free);
static struct kunit_suite *alloc_fake_suite(struct kunit *test,
@@ -24,15 +25,15 @@ static struct kunit_case dummy_test_cases[] = {
static void parse_filter_test(struct kunit *test)
{
- struct kunit_test_filter filter = {NULL, NULL};
+ struct kunit_glob_filter filter = {NULL, NULL};
- kunit_parse_filter_glob(&filter, "suite");
+ kunit_parse_glob_filter(&filter, "suite");
KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite");
KUNIT_EXPECT_FALSE(test, filter.test_glob);
kfree(filter.suite_glob);
kfree(filter.test_glob);
- kunit_parse_filter_glob(&filter, "suite.test");
+ kunit_parse_glob_filter(&filter, "suite.test");
KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite");
KUNIT_EXPECT_STREQ(test, filter.test_glob, "test");
kfree(filter.suite_glob);
@@ -42,15 +43,17 @@ static void parse_filter_test(struct kunit *test)
static void filter_suites_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL};
- struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]};
- struct suite_set got;
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
/* Want: suite1, suite2, NULL -> suite2, NULL */
- got = kunit_filter_suites(&suite_set, "suite2", &err);
+ got = kunit_filter_suites(&suite_set, "suite2", NULL, NULL, &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
@@ -66,15 +69,17 @@ static void filter_suites_test(struct kunit *test)
static void filter_suites_test_glob_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL};
- struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]};
- struct suite_set got;
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
/* Want: suite1, suite2, NULL -> suite2 (just test1), NULL */
- got = kunit_filter_suites(&suite_set, "suite2.test2", &err);
+ got = kunit_filter_suites(&suite_set, "suite2.test2", NULL, NULL, &err);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start);
@@ -93,14 +98,16 @@ static void filter_suites_test_glob_test(struct kunit *test)
static void filter_suites_to_empty_test(struct kunit *test)
{
struct kunit_suite *subsuite[3] = {NULL, NULL};
- struct suite_set suite_set = {.start = subsuite, .end = &subsuite[2]};
- struct suite_set got;
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
int err = 0;
subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases);
subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases);
- got = kunit_filter_suites(&suite_set, "not_found", &err);
+ got = kunit_filter_suites(&suite_set, "not_found", NULL, NULL, &err);
KUNIT_ASSERT_EQ(test, err, 0);
kfree_at_end(test, got.start); /* just in case */
@@ -108,11 +115,132 @@ static void filter_suites_to_empty_test(struct kunit *test)
"should be empty to indicate no match");
}
+static void parse_filter_attr_test(struct kunit *test)
+{
+ int j, filter_count;
+ struct kunit_attr_filter *parsed_filters;
+ char *filters = "speed>slow, module!=example";
+ int err = 0;
+
+ filter_count = kunit_get_filter_count(filters);
+ KUNIT_EXPECT_EQ(test, filter_count, 2);
+
+ parsed_filters = kunit_kcalloc(test, filter_count, sizeof(*parsed_filters),
+ GFP_KERNEL);
+ for (j = 0; j < filter_count; j++) {
+ parsed_filters[j] = kunit_next_attr_filter(&filters, &err);
+ KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]);
+ }
+
+ KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[0]), "speed");
+ KUNIT_EXPECT_STREQ(test, parsed_filters[0].input, ">slow");
+
+ KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[1]), "module");
+ KUNIT_EXPECT_STREQ(test, parsed_filters[1].input, "!=example");
+}
+
+static struct kunit_case dummy_attr_test_cases[] = {
+ /* .run_case is not important, just needs to be non-NULL */
+ { .name = "slow", .run_case = dummy_test, .module_name = "dummy",
+ .attr.speed = KUNIT_SPEED_SLOW },
+ { .name = "normal", .run_case = dummy_test, .module_name = "dummy" },
+ {},
+};
+
+static void filter_attr_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "normal_suite", dummy_attr_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "slow_suite", dummy_attr_test_cases);
+ subsuite[1]->attr.speed = KUNIT_SPEED_SLOW; // Set suite attribute
+
+ /*
+ * Want: normal_suite(slow, normal), slow_suite(slow, normal),
+ * NULL -> normal_suite(normal), NULL
+ *
+ * The normal test in slow_suite is filtered out because the speed
+ * attribute is unset and thus, the filtering is based on the parent attribute
+ * of slow.
+ */
+ got = kunit_filter_suites(&suite_set, NULL, "speed>slow", NULL, &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ kfree_at_end(test, got.start);
+
+ /* Validate we just have normal_suite */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]);
+ KUNIT_EXPECT_STREQ(test, got.start[0]->name, "normal_suite");
+ KUNIT_ASSERT_EQ(test, got.end - got.start, 1);
+
+ /* Now validate we just have normal test case */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+ KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[0].name, "normal");
+ KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].name);
+}
+
+static void filter_attr_empty_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[3] = {NULL, NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[2],
+ };
+ struct kunit_suite_set got;
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite1", dummy_attr_test_cases);
+ subsuite[1] = alloc_fake_suite(test, "suite2", dummy_attr_test_cases);
+
+ got = kunit_filter_suites(&suite_set, NULL, "module!=dummy", NULL, &err);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ kfree_at_end(test, got.start); /* just in case */
+
+ KUNIT_EXPECT_PTR_EQ_MSG(test, got.start, got.end,
+ "should be empty to indicate no match");
+}
+
+static void filter_attr_skip_test(struct kunit *test)
+{
+ struct kunit_suite *subsuite[2] = {NULL};
+ struct kunit_suite_set suite_set = {
+ .start = subsuite, .end = &subsuite[1],
+ };
+ struct kunit_suite_set got;
+ int err = 0;
+
+ subsuite[0] = alloc_fake_suite(test, "suite", dummy_attr_test_cases);
+
+ /* Want: suite(slow, normal), NULL -> suite(slow with SKIP, normal), NULL */
+ got = kunit_filter_suites(&suite_set, NULL, "speed>slow", "skip", &err);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start);
+ KUNIT_ASSERT_EQ(test, err, 0);
+ kfree_at_end(test, got.start);
+
+ /* Validate we have both the slow and normal test */
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, got.start[0]->test_cases);
+ KUNIT_ASSERT_EQ(test, kunit_suite_num_test_cases(got.start[0]), 2);
+ KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[0].name, "slow");
+ KUNIT_EXPECT_STREQ(test, got.start[0]->test_cases[1].name, "normal");
+
+ /* Now ensure slow is skipped and normal is not */
+ KUNIT_EXPECT_EQ(test, got.start[0]->test_cases[0].status, KUNIT_SKIPPED);
+ KUNIT_EXPECT_FALSE(test, got.start[0]->test_cases[1].status);
+}
+
static struct kunit_case executor_test_cases[] = {
KUNIT_CASE(parse_filter_test),
KUNIT_CASE(filter_suites_test),
KUNIT_CASE(filter_suites_test_glob_test),
KUNIT_CASE(filter_suites_to_empty_test),
+ KUNIT_CASE(parse_filter_attr_test),
+ KUNIT_CASE(filter_attr_test),
+ KUNIT_CASE(filter_attr_empty_test),
+ KUNIT_CASE(filter_attr_skip_test),
{}
};
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index b69b689ea850..01a769f35e1d 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -221,6 +221,14 @@ static void example_params_test(struct kunit *test)
}
/*
+ * This test should always pass. Can be used to practice filtering attributes.
+ */
+static void example_slow_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 1 + 1, 2);
+}
+
+/*
* Here we make a list of all the test cases we want to add to the test suite
* below.
*/
@@ -237,6 +245,7 @@ static struct kunit_case example_test_cases[] = {
KUNIT_CASE(example_all_expect_macros_test),
KUNIT_CASE(example_static_stub_test),
KUNIT_CASE_PARAM(example_params_test, example_gen_params),
+ KUNIT_CASE_SLOW(example_slow_test),
{}
};
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 84e4666555c9..49698a168437 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -9,6 +9,7 @@
#include <kunit/resource.h>
#include <kunit/test.h>
#include <kunit/test-bug.h>
+#include <kunit/attributes.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -168,6 +169,13 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite)
}
EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
+/* Currently supported test levels */
+enum {
+ KUNIT_LEVEL_SUITE = 0,
+ KUNIT_LEVEL_CASE,
+ KUNIT_LEVEL_CASE_PARAM,
+};
+
static void kunit_print_suite_start(struct kunit_suite *suite)
{
/*
@@ -181,17 +189,11 @@ static void kunit_print_suite_start(struct kunit_suite *suite)
pr_info(KUNIT_SUBTEST_INDENT "KTAP version 1\n");
pr_info(KUNIT_SUBTEST_INDENT "# Subtest: %s\n",
suite->name);
+ kunit_print_attr((void *)suite, false, KUNIT_LEVEL_CASE);
pr_info(KUNIT_SUBTEST_INDENT "1..%zd\n",
kunit_suite_num_test_cases(suite));
}
-/* Currently supported test levels */
-enum {
- KUNIT_LEVEL_SUITE = 0,
- KUNIT_LEVEL_CASE,
- KUNIT_LEVEL_CASE_PARAM,
-};
-
static void kunit_print_ok_not_ok(struct kunit *test,
unsigned int test_level,
enum kunit_status status,
@@ -611,18 +613,22 @@ int kunit_run_tests(struct kunit_suite *suite)
kunit_suite_for_each_test_case(suite, test_case) {
struct kunit test = { .param_value = NULL, .param_index = 0 };
struct kunit_result_stats param_stats = { 0 };
- test_case->status = KUNIT_SKIPPED;
kunit_init_test(&test, test_case->name, test_case->log);
-
- if (!test_case->generate_params) {
+ if (test_case->status == KUNIT_SKIPPED) {
+ /* Test marked as skip */
+ test.status = KUNIT_SKIPPED;
+ kunit_update_stats(&param_stats, test.status);
+ } else if (!test_case->generate_params) {
/* Non-parameterised test. */
+ test_case->status = KUNIT_SKIPPED;
kunit_run_case_catch_errors(suite, test_case, &test);
kunit_update_stats(&param_stats, test.status);
} else {
/* Get initial param. */
param_desc[0] = '\0';
test.param_value = test_case->generate_params(NULL, param_desc);
+ test_case->status = KUNIT_SKIPPED;
kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
"KTAP version 1\n");
kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
@@ -651,6 +657,7 @@ int kunit_run_tests(struct kunit_suite *suite)
}
}
+ kunit_print_attr((void *)test_case, true, KUNIT_LEVEL_CASE);
kunit_print_test_stats(&test, param_stats);
@@ -729,12 +736,45 @@ EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
#ifdef CONFIG_MODULES
static void kunit_module_init(struct module *mod)
{
- __kunit_test_suites_init(mod->kunit_suites, mod->num_kunit_suites);
+ struct kunit_suite_set suite_set = {
+ mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites,
+ };
+ const char *action = kunit_action();
+ int err = 0;
+
+ suite_set = kunit_filter_suites(&suite_set,
+ kunit_filter_glob() ?: "*.*",
+ kunit_filter(), kunit_filter_action(),
+ &err);
+ if (err)
+ pr_err("kunit module: error filtering suites: %d\n", err);
+
+ mod->kunit_suites = (struct kunit_suite **)suite_set.start;
+ mod->num_kunit_suites = suite_set.end - suite_set.start;
+
+ if (!action)
+ kunit_exec_run_tests(&suite_set, false);
+ else if (!strcmp(action, "list"))
+ kunit_exec_list_tests(&suite_set, false);
+ else if (!strcmp(action, "list_attr"))
+ kunit_exec_list_tests(&suite_set, true);
+ else
+ pr_err("kunit: unknown action '%s'\n", action);
}
static void kunit_module_exit(struct module *mod)
{
- __kunit_test_suites_exit(mod->kunit_suites, mod->num_kunit_suites);
+ struct kunit_suite_set suite_set = {
+ mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites,
+ };
+ const char *action = kunit_action();
+
+ if (!action)
+ __kunit_test_suites_exit(mod->kunit_suites,
+ mod->num_kunit_suites);
+
+ if (suite_set.start)
+ kunit_free_suite_set(suite_set);
}
static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
diff --git a/lib/list_debug.c b/lib/list_debug.c
index d98d43f80958..db602417febf 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -2,7 +2,8 @@
* Copyright 2006, Red Hat, Inc., Dave Jones
* Released under the General Public License (GPL).
*
- * This file contains the linked list validation for DEBUG_LIST.
+ * This file contains the linked list validation and error reporting for
+ * LIST_HARDENED and DEBUG_LIST.
*/
#include <linux/export.h>
@@ -17,8 +18,9 @@
* attempt).
*/
-bool __list_add_valid(struct list_head *new, struct list_head *prev,
- struct list_head *next)
+__list_valid_slowpath
+bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev,
+ struct list_head *next)
{
if (CHECK_DATA_CORRUPTION(prev == NULL,
"list_add corruption. prev is NULL.\n") ||
@@ -37,9 +39,10 @@ bool __list_add_valid(struct list_head *new, struct list_head *prev,
return true;
}
-EXPORT_SYMBOL(__list_add_valid);
+EXPORT_SYMBOL(__list_add_valid_or_report);
-bool __list_del_entry_valid(struct list_head *entry)
+__list_valid_slowpath
+bool __list_del_entry_valid_or_report(struct list_head *entry)
{
struct list_head *prev, *next;
@@ -65,6 +68,5 @@ bool __list_del_entry_valid(struct list_head *entry)
return false;
return true;
-
}
-EXPORT_SYMBOL(__list_del_entry_valid);
+EXPORT_SYMBOL(__list_del_entry_valid_or_report);
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 8d24279fad05..6f6a5fc85b42 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -2506,94 +2506,29 @@ static void fs_reclaim_tests(void)
pr_cont("\n");
}
-#define __guard(cleanup) __maybe_unused __attribute__((__cleanup__(cleanup)))
+/* Defines guard classes to create contexts */
+DEFINE_LOCK_GUARD_0(HARDIRQ, HARDIRQ_ENTER(), HARDIRQ_EXIT())
+DEFINE_LOCK_GUARD_0(NOTTHREADED_HARDIRQ,
+ do {
+ local_irq_disable();
+ __irq_enter();
+ WARN_ON(!in_irq());
+ } while(0), HARDIRQ_EXIT())
+DEFINE_LOCK_GUARD_0(SOFTIRQ, SOFTIRQ_ENTER(), SOFTIRQ_EXIT())
+
+/* Define RCU guards, should go away when RCU has its own guard definitions */
+DEFINE_LOCK_GUARD_0(RCU, rcu_read_lock(), rcu_read_unlock())
+DEFINE_LOCK_GUARD_0(RCU_BH, rcu_read_lock_bh(), rcu_read_unlock_bh())
+DEFINE_LOCK_GUARD_0(RCU_SCHED, rcu_read_lock_sched(), rcu_read_unlock_sched())
-static void hardirq_exit(int *_)
-{
- HARDIRQ_EXIT();
-}
-
-#define HARDIRQ_CONTEXT(name, ...) \
- int hardirq_guard_##name __guard(hardirq_exit); \
- HARDIRQ_ENTER();
-
-#define NOTTHREADED_HARDIRQ_CONTEXT(name, ...) \
- int notthreaded_hardirq_guard_##name __guard(hardirq_exit); \
- local_irq_disable(); \
- __irq_enter(); \
- WARN_ON(!in_irq());
-
-static void softirq_exit(int *_)
-{
- SOFTIRQ_EXIT();
-}
-
-#define SOFTIRQ_CONTEXT(name, ...) \
- int softirq_guard_##name __guard(softirq_exit); \
- SOFTIRQ_ENTER();
-
-static void rcu_exit(int *_)
-{
- rcu_read_unlock();
-}
-
-#define RCU_CONTEXT(name, ...) \
- int rcu_guard_##name __guard(rcu_exit); \
- rcu_read_lock();
-
-static void rcu_bh_exit(int *_)
-{
- rcu_read_unlock_bh();
-}
-
-#define RCU_BH_CONTEXT(name, ...) \
- int rcu_bh_guard_##name __guard(rcu_bh_exit); \
- rcu_read_lock_bh();
-
-static void rcu_sched_exit(int *_)
-{
- rcu_read_unlock_sched();
-}
-
-#define RCU_SCHED_CONTEXT(name, ...) \
- int rcu_sched_guard_##name __guard(rcu_sched_exit); \
- rcu_read_lock_sched();
-
-static void raw_spinlock_exit(raw_spinlock_t **lock)
-{
- raw_spin_unlock(*lock);
-}
-
-#define RAW_SPINLOCK_CONTEXT(name, lock) \
- raw_spinlock_t *raw_spinlock_guard_##name __guard(raw_spinlock_exit) = &(lock); \
- raw_spin_lock(&(lock));
-
-static void spinlock_exit(spinlock_t **lock)
-{
- spin_unlock(*lock);
-}
-
-#define SPINLOCK_CONTEXT(name, lock) \
- spinlock_t *spinlock_guard_##name __guard(spinlock_exit) = &(lock); \
- spin_lock(&(lock));
-
-static void mutex_exit(struct mutex **lock)
-{
- mutex_unlock(*lock);
-}
-
-#define MUTEX_CONTEXT(name, lock) \
- struct mutex *mutex_guard_##name __guard(mutex_exit) = &(lock); \
- mutex_lock(&(lock));
#define GENERATE_2_CONTEXT_TESTCASE(outer, outer_lock, inner, inner_lock) \
\
static void __maybe_unused inner##_in_##outer(void) \
{ \
- outer##_CONTEXT(_, outer_lock); \
- { \
- inner##_CONTEXT(_, inner_lock); \
- } \
+ /* Relies the reversed clean-up ordering: inner first */ \
+ guard(outer)(outer_lock); \
+ guard(inner)(inner_lock); \
}
/*
@@ -2632,21 +2567,21 @@ GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
-GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
-GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
-GENERATE_2_CONTEXT_TESTCASE(MUTEX, mutex_A, inner, inner_lock)
+GENERATE_2_CONTEXT_TESTCASE(raw_spinlock, &raw_lock_A, inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(spinlock, &lock_A, inner, inner_lock) \
+GENERATE_2_CONTEXT_TESTCASE(mutex, &mutex_A, inner, inner_lock)
GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RCU, )
-GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(RAW_SPINLOCK, raw_lock_B)
-GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(SPINLOCK, lock_B)
-GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(raw_spinlock, &raw_lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(spinlock, &lock_B)
+GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(mutex, &mutex_B)
/* the outer context allows all kinds of preemption */
#define DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(outer) \
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
- dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
- dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
- dotest(MUTEX_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \
+ dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(mutex_in_##outer, SUCCESS, LOCKTYPE_MUTEX); \
/*
* the outer context only allows the preemption introduced by spinlock_t (which
@@ -2654,16 +2589,16 @@ GENERATE_2_CONTEXT_TESTCASE_FOR_ALL_OUTER(MUTEX, mutex_B)
*/
#define DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(outer) \
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
- dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
- dotest(SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
- dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
+ dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(mutex_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
/* the outer doesn't allows any kind of preemption */
#define DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(outer) \
dotest(RCU_in_##outer, SUCCESS, LOCKTYPE_RWLOCK); \
- dotest(RAW_SPINLOCK_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
- dotest(SPINLOCK_in_##outer, FAILURE, LOCKTYPE_SPIN); \
- dotest(MUTEX_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
+ dotest(raw_spinlock_in_##outer, SUCCESS, LOCKTYPE_SPIN); \
+ dotest(spinlock_in_##outer, FAILURE, LOCKTYPE_SPIN); \
+ dotest(mutex_in_##outer, FAILURE, LOCKTYPE_MUTEX); \
static void wait_context_tests(void)
{
@@ -2697,15 +2632,15 @@ static void wait_context_tests(void)
pr_cont("\n");
print_testname("in RAW_SPINLOCK context");
- DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RAW_SPINLOCK);
+ DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(raw_spinlock);
pr_cont("\n");
print_testname("in SPINLOCK context");
- DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(SPINLOCK);
+ DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(spinlock);
pr_cont("\n");
print_testname("in MUTEX context");
- DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(MUTEX);
+ DO_CONTEXT_TESTCASE_OUTER_PREEMPTIBLE(mutex);
pr_cont("\n");
}
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 4dd73cf936a6..f723024e1426 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -4265,6 +4265,10 @@ static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
* mas_wr_append: Attempt to append
* @wr_mas: the maple write state
*
+ * This is currently unsafe in rcu mode since the end of the node may be cached
+ * by readers while the node contents may be updated which could result in
+ * inaccurate information.
+ *
* Return: True if appended, false otherwise
*/
static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
@@ -4274,6 +4278,9 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
struct ma_state *mas = wr_mas->mas;
unsigned char node_pivots = mt_pivots[wr_mas->type];
+ if (mt_in_rcu(mas->tree))
+ return false;
+
if (mas->offset != wr_mas->node_end)
return false;
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 887926f04731..440aee705ccc 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -551,10 +551,10 @@ static void strtomem_test(struct kunit *test)
static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE(memset_test),
KUNIT_CASE(memcpy_test),
- KUNIT_CASE(memcpy_large_test),
- KUNIT_CASE(memmove_test),
- KUNIT_CASE(memmove_large_test),
- KUNIT_CASE(memmove_overlap_test),
+ KUNIT_CASE_SLOW(memcpy_large_test),
+ KUNIT_CASE_SLOW(memmove_test),
+ KUNIT_CASE_SLOW(memmove_large_test),
+ KUNIT_CASE_SLOW(memmove_overlap_test),
KUNIT_CASE(strtomem_test),
{}
};
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 1a31065b2036..976b9bd02a1b 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1136,7 +1136,6 @@ static void set_iter_tags(struct radix_tree_iter *iter,
void __rcu **radix_tree_iter_resume(void __rcu **slot,
struct radix_tree_iter *iter)
{
- slot++;
iter->index = __radix_tree_iter_add(iter, 1);
iter->next_index = iter->index;
iter->tags = 0;
diff --git a/mm/Makefile b/mm/Makefile
index 678530a07326..d4ee20988dd1 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -51,7 +51,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
util.o mmzone.o vmstat.o backing-dev.o \
mm_init.o percpu.o slab_common.o \
- compaction.o show_mem.o\
+ compaction.o show_mem.o shmem_quota.o\
interval_tree.o list_lru.o workingset.o \
debug.o gup.o mmap_lock.o $(mmu-y)
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 2fcc9731528a..e0e59d420fca 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -386,6 +386,7 @@ out:
static const struct mm_walk_ops damon_mkold_ops = {
.pmd_entry = damon_mkold_pmd_entry,
.hugetlb_entry = damon_mkold_hugetlb_entry,
+ .walk_lock = PGWALK_RDLOCK,
};
static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
@@ -525,6 +526,7 @@ out:
static const struct mm_walk_ops damon_young_ops = {
.pmd_entry = damon_young_pmd_entry,
.hugetlb_entry = damon_young_hugetlb_entry,
+ .walk_lock = PGWALK_RDLOCK,
};
static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
diff --git a/mm/filemap.c b/mm/filemap.c
index 9e44a49bbd74..baafbf324c9f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1855,30 +1855,15 @@ out:
*
* Looks up the page cache entry at @mapping & @index.
*
- * @fgp_flags can be zero or more of these flags:
- *
- * * %FGP_ACCESSED - The folio will be marked accessed.
- * * %FGP_LOCK - The folio is returned locked.
- * * %FGP_CREAT - If no page is present then a new page is allocated using
- * @gfp and added to the page cache and the VM's LRU list.
- * The page is returned locked and with an increased refcount.
- * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
- * page is already in cache. If the page was allocated, unlock it before
- * returning so the caller can do the same dance.
- * * %FGP_WRITE - The page will be written to by the caller.
- * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
- * * %FGP_NOWAIT - Don't get blocked by page lock.
- * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
- *
* If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
* if the %GFP flags specified for %FGP_CREAT are atomic.
*
- * If there is a page cache page, it is returned with an increased refcount.
+ * If this function returns a folio, it is returned with an increased refcount.
*
* Return: The found folio or an ERR_PTR() otherwise.
*/
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
- int fgp_flags, gfp_t gfp)
+ fgf_t fgp_flags, gfp_t gfp)
{
struct folio *folio;
@@ -1920,7 +1905,9 @@ repeat:
folio_wait_stable(folio);
no_page:
if (!folio && (fgp_flags & FGP_CREAT)) {
+ unsigned order = FGF_GET_ORDER(fgp_flags);
int err;
+
if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
gfp |= __GFP_WRITE;
if (fgp_flags & FGP_NOFS)
@@ -1929,26 +1916,44 @@ no_page:
gfp &= ~GFP_KERNEL;
gfp |= GFP_NOWAIT | __GFP_NOWARN;
}
-
- folio = filemap_alloc_folio(gfp, 0);
- if (!folio)
- return ERR_PTR(-ENOMEM);
-
if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
fgp_flags |= FGP_LOCK;
- /* Init accessed so avoid atomic mark_page_accessed later */
- if (fgp_flags & FGP_ACCESSED)
- __folio_set_referenced(folio);
+ if (!mapping_large_folio_support(mapping))
+ order = 0;
+ if (order > MAX_PAGECACHE_ORDER)
+ order = MAX_PAGECACHE_ORDER;
+ /* If we're not aligned, allocate a smaller folio */
+ if (index & ((1UL << order) - 1))
+ order = __ffs(index);
- err = filemap_add_folio(mapping, folio, index, gfp);
- if (unlikely(err)) {
+ do {
+ gfp_t alloc_gfp = gfp;
+
+ err = -ENOMEM;
+ if (order == 1)
+ order = 0;
+ if (order > 0)
+ alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
+ folio = filemap_alloc_folio(alloc_gfp, order);
+ if (!folio)
+ continue;
+
+ /* Init accessed so avoid atomic mark_page_accessed later */
+ if (fgp_flags & FGP_ACCESSED)
+ __folio_set_referenced(folio);
+
+ err = filemap_add_folio(mapping, folio, index, gfp);
+ if (!err)
+ break;
folio_put(folio);
folio = NULL;
- if (err == -EEXIST)
- goto repeat;
- }
+ } while (order-- > 0);
+ if (err == -EEXIST)
+ goto repeat;
+ if (err)
+ return ERR_PTR(err);
/*
* filemap_add_folio locks the page, and for mmap
* we expect an unlocked page.
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index c6f056c20503..10c3247542cb 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -92,7 +92,7 @@ EXPORT_SYMBOL(add_to_page_cache_lru);
noinline
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
- int fgp_flags, gfp_t gfp)
+ fgf_t fgp_flags, gfp_t gfp)
{
struct folio *folio;
diff --git a/mm/gup.c b/mm/gup.c
index 76d222ccc3ff..6e2f9e9d6537 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -597,7 +597,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
pte = ptep_get(ptep);
if (!pte_present(pte))
goto no_page;
- if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
+ if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags))
goto no_page;
page = vm_normal_page(vma, address, pte);
@@ -714,7 +714,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
if (likely(!pmd_trans_huge(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
- if (pmd_protnone(pmdval) && !gup_can_follow_protnone(flags))
+ if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
return no_page_table(vma, flags);
ptl = pmd_lock(mm, pmd);
@@ -851,6 +851,10 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
if (WARN_ON_ONCE(foll_flags & FOLL_PIN))
return NULL;
+ /*
+ * We never set FOLL_HONOR_NUMA_FAULT because callers don't expect
+ * to fail on PROT_NONE-mapped pages.
+ */
page = follow_page_mask(vma, address, foll_flags, &ctx);
if (ctx.pgmap)
put_dev_pagemap(ctx.pgmap);
@@ -2227,6 +2231,13 @@ static bool is_valid_gup_args(struct page **pages, int *locked,
gup_flags |= FOLL_UNLOCKABLE;
}
+ /*
+ * For now, always trigger NUMA hinting faults. Some GUP users like
+ * KVM require the hint to be as the calling context of GUP is
+ * functionally similar to a memory reference from task context.
+ */
+ gup_flags |= FOLL_HONOR_NUMA_FAULT;
+
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
(FOLL_PIN | FOLL_GET)))
@@ -2551,7 +2562,14 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
struct page *page;
struct folio *folio;
- if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
+ /*
+ * Always fallback to ordinary GUP on PROT_NONE-mapped pages:
+ * pte_access_permitted() better should reject these pages
+ * either way: otherwise, GUP-fast might succeed in
+ * cases where ordinary GUP would fail due to VMA access
+ * permissions.
+ */
+ if (pte_protnone(pte))
goto pte_unmap;
if (!pte_access_permitted(pte, flags & FOLL_WRITE))
@@ -2970,8 +2988,8 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo
if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
pmd_devmap(pmd))) {
- if (pmd_protnone(pmd) &&
- !gup_can_follow_protnone(flags))
+ /* See gup_pte_range() */
+ if (pmd_protnone(pmd))
return 0;
if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
@@ -3151,7 +3169,7 @@ static int internal_get_user_pages_fast(unsigned long start,
if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
FOLL_FORCE | FOLL_PIN | FOLL_GET |
FOLL_FAST_ONLY | FOLL_NOFAULT |
- FOLL_PCI_P2PDMA)))
+ FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT)))
return -EINVAL;
if (gup_flags & FOLL_PIN)
diff --git a/mm/hmm.c b/mm/hmm.c
index 855e25e59d8f..277ddcab4947 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -562,6 +562,7 @@ static const struct mm_walk_ops hmm_walk_ops = {
.pte_hole = hmm_vma_walk_hole,
.hugetlb_entry = hmm_vma_walk_hugetlb_entry,
.test_walk = hmm_vma_walk_test,
+ .walk_lock = PGWALK_RDLOCK,
};
/**
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index eb3678360b97..e4f0266a22d4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1467,8 +1467,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
return ERR_PTR(-EFAULT);
- /* Full NUMA hinting faults to serialise migration in fault paths */
- if (pmd_protnone(*pmd) && !gup_can_follow_protnone(flags))
+ if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
return NULL;
if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
@@ -1613,7 +1612,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
* If other processes are mapping this folio, we couldn't discard
* the folio unless they all do MADV_FREE so let's skip the folio.
*/
- if (folio_mapcount(folio) != 1)
+ if (folio_estimated_sharers(folio) != 1)
goto out;
if (!folio_trylock(folio))
@@ -2521,7 +2520,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
struct address_space *swap_cache = NULL;
unsigned long offset = 0;
unsigned int nr = thp_nr_pages(head);
- int i;
+ int i, nr_dropped = 0;
/* complete memcg works before add pages to LRU */
split_page_memcg(head, nr);
@@ -2546,7 +2545,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
struct folio *tail = page_folio(head + i);
if (shmem_mapping(head->mapping))
- shmem_uncharge(head->mapping->host, 1);
+ nr_dropped++;
else if (folio_test_clear_dirty(tail))
folio_account_cleaned(tail,
inode_to_wb(folio->mapping->host));
@@ -2583,6 +2582,8 @@ static void __split_huge_page(struct page *page, struct list_head *list,
}
local_irq_enable();
+ if (nr_dropped)
+ shmem_uncharge(head->mapping->host, nr_dropped);
remap_page(folio, nr);
if (PageSwapCache(head)) {
diff --git a/mm/internal.h b/mm/internal.h
index a7d9e980429a..8ed127c1c808 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -924,6 +924,13 @@ int migrate_device_coherent_page(struct page *page);
struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
int __must_check try_grab_page(struct page *page, unsigned int flags);
+/*
+ * mm/huge_memory.c
+ */
+struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmd,
+ unsigned int flags);
+
enum {
/* mark page accessed */
FOLL_TOUCH = 1 << 16,
@@ -998,6 +1005,16 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma,
smp_rmb();
/*
+ * During GUP-fast we might not get called on the head page for a
+ * hugetlb page that is mapped using cont-PTE, because GUP-fast does
+ * not work with the abstracted hugetlb PTEs that always point at the
+ * head page. For hugetlb, PageAnonExclusive only applies on the head
+ * page (as it cannot be partially COW-shared), so lookup the head page.
+ */
+ if (unlikely(!PageHead(page) && PageHuge(page)))
+ page = compound_head(page);
+
+ /*
* Note that PageKsm() pages cannot be exclusive, and consequently,
* cannot get pinned.
*/
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 78c8d5d8b628..47d1d32c734f 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1955,10 +1955,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
goto xa_locked;
}
}
- if (!shmem_charge(mapping->host, 1)) {
- result = SCAN_FAIL;
- goto xa_locked;
- }
nr_none++;
continue;
}
@@ -2145,8 +2141,13 @@ xa_unlocked:
*/
try_to_unmap_flush();
- if (result != SCAN_SUCCEED)
+ if (result == SCAN_SUCCEED && nr_none &&
+ !shmem_charge(mapping->host, nr_none))
+ result = SCAN_FAIL;
+ if (result != SCAN_SUCCEED) {
+ nr_none = 0;
goto rollback;
+ }
/*
* The old pages are locked, so they won't change anymore.
@@ -2283,8 +2284,8 @@ rollback:
if (nr_none) {
xas_lock_irq(&xas);
mapping->nrpages -= nr_none;
- shmem_uncharge(mapping->host, nr_none);
xas_unlock_irq(&xas);
+ shmem_uncharge(mapping->host, nr_none);
}
list_for_each_entry_safe(page, tmp, &pagelist, lru) {
diff --git a/mm/ksm.c b/mm/ksm.c
index d20d7662419b..d7b5b95e936e 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -455,6 +455,12 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex
static const struct mm_walk_ops break_ksm_ops = {
.pmd_entry = break_ksm_pmd_entry,
+ .walk_lock = PGWALK_RDLOCK,
+};
+
+static const struct mm_walk_ops break_ksm_lock_vma_ops = {
+ .pmd_entry = break_ksm_pmd_entry,
+ .walk_lock = PGWALK_WRLOCK,
};
/*
@@ -470,16 +476,17 @@ static const struct mm_walk_ops break_ksm_ops = {
* of the process that owns 'vma'. We also do not want to enforce
* protection keys here anyway.
*/
-static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
+static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma)
{
vm_fault_t ret = 0;
+ const struct mm_walk_ops *ops = lock_vma ?
+ &break_ksm_lock_vma_ops : &break_ksm_ops;
do {
int ksm_page;
cond_resched();
- ksm_page = walk_page_range_vma(vma, addr, addr + 1,
- &break_ksm_ops, NULL);
+ ksm_page = walk_page_range_vma(vma, addr, addr + 1, ops, NULL);
if (WARN_ON_ONCE(ksm_page < 0))
return ksm_page;
if (!ksm_page)
@@ -565,7 +572,7 @@ static void break_cow(struct ksm_rmap_item *rmap_item)
mmap_read_lock(mm);
vma = find_mergeable_vma(mm, addr);
if (vma)
- break_ksm(vma, addr);
+ break_ksm(vma, addr, false);
mmap_read_unlock(mm);
}
@@ -871,7 +878,7 @@ static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list)
* in cmp_and_merge_page on one of the rmap_items we would be removing.
*/
static int unmerge_ksm_pages(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end, bool lock_vma)
{
unsigned long addr;
int err = 0;
@@ -882,7 +889,7 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma,
if (signal_pending(current))
err = -ERESTARTSYS;
else
- err = break_ksm(vma, addr);
+ err = break_ksm(vma, addr, lock_vma);
}
return err;
}
@@ -1029,7 +1036,7 @@ static int unmerge_and_remove_all_rmap_items(void)
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
continue;
err = unmerge_ksm_pages(vma,
- vma->vm_start, vma->vm_end);
+ vma->vm_start, vma->vm_end, false);
if (err)
goto error;
}
@@ -2530,7 +2537,7 @@ static int __ksm_del_vma(struct vm_area_struct *vma)
return 0;
if (vma->anon_vma) {
- err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end);
+ err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true);
if (err)
return err;
}
@@ -2668,7 +2675,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
return 0; /* just ignore the advice */
if (vma->anon_vma) {
- err = unmerge_ksm_pages(vma, start, end);
+ err = unmerge_ksm_pages(vma, start, end, true);
if (err)
return err;
}
diff --git a/mm/madvise.c b/mm/madvise.c
index 886f06066622..ec30f48f8f2e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -233,6 +233,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
static const struct mm_walk_ops swapin_walk_ops = {
.pmd_entry = swapin_walk_pmd_entry,
+ .walk_lock = PGWALK_RDLOCK,
};
static void shmem_swapin_range(struct vm_area_struct *vma,
@@ -383,7 +384,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
folio = pfn_folio(pmd_pfn(orig_pmd));
/* Do not interfere with other mappings of this folio */
- if (folio_mapcount(folio) != 1)
+ if (folio_estimated_sharers(folio) != 1)
goto huge_unlock;
if (pageout_anon_only_filter && !folio_test_anon(folio))
@@ -457,7 +458,7 @@ regular_folio:
if (folio_test_large(folio)) {
int err;
- if (folio_mapcount(folio) != 1)
+ if (folio_estimated_sharers(folio) != 1)
break;
if (pageout_anon_only_filter && !folio_test_anon(folio))
break;
@@ -534,6 +535,7 @@ regular_folio:
static const struct mm_walk_ops cold_walk_ops = {
.pmd_entry = madvise_cold_or_pageout_pte_range,
+ .walk_lock = PGWALK_RDLOCK,
};
static void madvise_cold_page_range(struct mmu_gather *tlb,
@@ -678,7 +680,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
if (folio_test_large(folio)) {
int err;
- if (folio_mapcount(folio) != 1)
+ if (folio_estimated_sharers(folio) != 1)
break;
if (!folio_trylock(folio))
break;
@@ -757,6 +759,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
static const struct mm_walk_ops madvise_free_walk_ops = {
.pmd_entry = madvise_free_pte_range,
+ .walk_lock = PGWALK_RDLOCK,
};
static int madvise_free_single_vma(struct vm_area_struct *vma,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e8ca4bdcb03c..315fd5f45e3c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6024,6 +6024,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
static const struct mm_walk_ops precharge_walk_ops = {
.pmd_entry = mem_cgroup_count_precharge_pte_range,
+ .walk_lock = PGWALK_RDLOCK,
};
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
@@ -6303,6 +6304,7 @@ put: /* get_mctgt_type() gets & locks the page */
static const struct mm_walk_ops charge_walk_ops = {
.pmd_entry = mem_cgroup_move_charge_pte_range,
+ .walk_lock = PGWALK_RDLOCK,
};
static void mem_cgroup_move_charge(void)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 9a285038d765..fe121fdb05f7 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -831,6 +831,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
static const struct mm_walk_ops hwp_walk_ops = {
.pmd_entry = hwpoison_pte_range,
.hugetlb_entry = hwpoison_hugetlb_range,
+ .walk_lock = PGWALK_RDLOCK,
};
/*
@@ -2740,10 +2741,13 @@ retry:
if (ret > 0) {
ret = soft_offline_in_use_page(page);
} else if (ret == 0) {
- if (!page_handle_poison(page, true, false) && try_again) {
- try_again = false;
- flags &= ~MF_COUNT_INCREASED;
- goto retry;
+ if (!page_handle_poison(page, true, false)) {
+ if (try_again) {
+ try_again = false;
+ flags &= ~MF_COUNT_INCREASED;
+ goto retry;
+ }
+ ret = -EBUSY;
}
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index c53f8beeb507..ec2eaceffd74 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -718,6 +718,14 @@ static const struct mm_walk_ops queue_pages_walk_ops = {
.hugetlb_entry = queue_folios_hugetlb,
.pmd_entry = queue_folios_pte_range,
.test_walk = queue_pages_test_walk,
+ .walk_lock = PGWALK_RDLOCK,
+};
+
+static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = {
+ .hugetlb_entry = queue_folios_hugetlb,
+ .pmd_entry = queue_folios_pte_range,
+ .test_walk = queue_pages_test_walk,
+ .walk_lock = PGWALK_WRLOCK,
};
/*
@@ -738,7 +746,7 @@ static const struct mm_walk_ops queue_pages_walk_ops = {
static int
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
nodemask_t *nodes, unsigned long flags,
- struct list_head *pagelist)
+ struct list_head *pagelist, bool lock_vma)
{
int err;
struct queue_pages qp = {
@@ -749,8 +757,10 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
.end = end,
.first = NULL,
};
+ const struct mm_walk_ops *ops = lock_vma ?
+ &queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops;
- err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
+ err = walk_page_range(mm, start, end, ops, &qp);
if (!qp.first)
/* whole range in hole */
@@ -1078,7 +1088,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
vma = find_vma(mm, 0);
VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
- flags | MPOL_MF_DISCONTIG_OK, &pagelist);
+ flags | MPOL_MF_DISCONTIG_OK, &pagelist, false);
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, alloc_migration_target, NULL,
@@ -1321,12 +1331,8 @@ static long do_mbind(unsigned long start, unsigned long len,
* Lock the VMAs before scanning for pages to migrate, to ensure we don't
* miss a concurrently inserted page.
*/
- vma_iter_init(&vmi, mm, start);
- for_each_vma_range(vmi, vma, end)
- vma_start_write(vma);
-
ret = queue_pages_range(mm, start, end, nmask,
- flags | MPOL_MF_INVERT, &pagelist);
+ flags | MPOL_MF_INVERT, &pagelist, true);
if (ret < 0) {
err = ret;
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 8365158460ed..d5f492356e3e 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -279,6 +279,7 @@ next:
static const struct mm_walk_ops migrate_vma_walk_ops = {
.pmd_entry = migrate_vma_collect_pmd,
.pte_hole = migrate_vma_collect_hole,
+ .walk_lock = PGWALK_RDLOCK,
};
/*
diff --git a/mm/mincore.c b/mm/mincore.c
index b7f7a516b26c..dad3622cc963 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -176,6 +176,7 @@ static const struct mm_walk_ops mincore_walk_ops = {
.pmd_entry = mincore_pte_range,
.pte_hole = mincore_unmapped_range,
.hugetlb_entry = mincore_hugetlb,
+ .walk_lock = PGWALK_RDLOCK,
};
/*
diff --git a/mm/mlock.c b/mm/mlock.c
index 0a0c996c5c21..479e09d0994c 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -371,6 +371,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
{
static const struct mm_walk_ops mlock_walk_ops = {
.pmd_entry = mlock_pte_range,
+ .walk_lock = PGWALK_WRLOCK_VERIFY,
};
/*
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 6f658d483704..3aef1340533a 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -568,6 +568,7 @@ static const struct mm_walk_ops prot_none_walk_ops = {
.pte_entry = prot_none_pte_entry,
.hugetlb_entry = prot_none_hugetlb_entry,
.test_walk = prot_none_test,
+ .walk_lock = PGWALK_WRLOCK,
};
int
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d3f42009bb70..b8d3d7040a50 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1193,7 +1193,7 @@ static void wb_update_write_bandwidth(struct bdi_writeback *wb,
* write_bandwidth = ---------------------------------------------------
* period
*
- * @written may have decreased due to folio_account_redirty().
+ * @written may have decreased due to folio_redirty_for_writepage().
* Avoid underflowing @bw calculation.
*/
bw = written - min(written, wb->written_stamp);
@@ -2712,37 +2712,6 @@ bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
EXPORT_SYMBOL(filemap_dirty_folio);
/**
- * folio_account_redirty - Manually account for redirtying a page.
- * @folio: The folio which is being redirtied.
- *
- * Most filesystems should call folio_redirty_for_writepage() instead
- * of this fuction. If your filesystem is doing writeback outside the
- * context of a writeback_control(), it can call this when redirtying
- * a folio, to de-account the dirty counters (NR_DIRTIED, WB_DIRTIED,
- * tsk->nr_dirtied), so that they match the written counters (NR_WRITTEN,
- * WB_WRITTEN) in long term. The mismatches will lead to systematic errors
- * in balanced_dirty_ratelimit and the dirty pages position control.
- */
-void folio_account_redirty(struct folio *folio)
-{
- struct address_space *mapping = folio->mapping;
-
- if (mapping && mapping_can_writeback(mapping)) {
- struct inode *inode = mapping->host;
- struct bdi_writeback *wb;
- struct wb_lock_cookie cookie = {};
- long nr = folio_nr_pages(folio);
-
- wb = unlocked_inode_to_wb_begin(inode, &cookie);
- current->nr_dirtied -= nr;
- node_stat_mod_folio(folio, NR_DIRTIED, -nr);
- wb_stat_mod(wb, WB_DIRTIED, -nr);
- unlocked_inode_to_wb_end(inode, &cookie);
- }
-}
-EXPORT_SYMBOL(folio_account_redirty);
-
-/**
* folio_redirty_for_writepage - Decline to write a dirty folio.
* @wbc: The writeback control.
* @folio: The folio.
@@ -2757,13 +2726,23 @@ EXPORT_SYMBOL(folio_account_redirty);
bool folio_redirty_for_writepage(struct writeback_control *wbc,
struct folio *folio)
{
- bool ret;
+ struct address_space *mapping = folio->mapping;
long nr = folio_nr_pages(folio);
+ bool ret;
wbc->pages_skipped += nr;
- ret = filemap_dirty_folio(folio->mapping, folio);
- folio_account_redirty(folio);
+ ret = filemap_dirty_folio(mapping, folio);
+ if (mapping && mapping_can_writeback(mapping)) {
+ struct inode *inode = mapping->host;
+ struct bdi_writeback *wb;
+ struct wb_lock_cookie cookie = {};
+ wb = unlocked_inode_to_wb_begin(inode, &cookie);
+ current->nr_dirtied -= nr;
+ node_stat_mod_folio(folio, NR_DIRTIED, -nr);
+ wb_stat_mod(wb, WB_DIRTIED, -nr);
+ unlocked_inode_to_wb_end(inode, &cookie);
+ }
return ret;
}
EXPORT_SYMBOL(folio_redirty_for_writepage);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 2022333805d3..9b2d23fbf4d3 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -400,6 +400,33 @@ static int __walk_page_range(unsigned long start, unsigned long end,
return err;
}
+static inline void process_mm_walk_lock(struct mm_struct *mm,
+ enum page_walk_lock walk_lock)
+{
+ if (walk_lock == PGWALK_RDLOCK)
+ mmap_assert_locked(mm);
+ else
+ mmap_assert_write_locked(mm);
+}
+
+static inline void process_vma_walk_lock(struct vm_area_struct *vma,
+ enum page_walk_lock walk_lock)
+{
+#ifdef CONFIG_PER_VMA_LOCK
+ switch (walk_lock) {
+ case PGWALK_WRLOCK:
+ vma_start_write(vma);
+ break;
+ case PGWALK_WRLOCK_VERIFY:
+ vma_assert_write_locked(vma);
+ break;
+ case PGWALK_RDLOCK:
+ /* PGWALK_RDLOCK is handled by process_mm_walk_lock */
+ break;
+ }
+#endif
+}
+
/**
* walk_page_range - walk page table with caller specific callbacks
* @mm: mm_struct representing the target process of page table walk
@@ -459,7 +486,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
if (!walk.mm)
return -EINVAL;
- mmap_assert_locked(walk.mm);
+ process_mm_walk_lock(walk.mm, ops->walk_lock);
vma = find_vma(walk.mm, start);
do {
@@ -474,6 +501,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
if (ops->pte_hole)
err = ops->pte_hole(start, next, -1, &walk);
} else { /* inside vma */
+ process_vma_walk_lock(vma, ops->walk_lock);
walk.vma = vma;
next = min(end, vma->vm_end);
vma = find_vma(mm, vma->vm_end);
@@ -549,7 +577,8 @@ int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
if (start < vma->vm_start || end > vma->vm_end)
return -EINVAL;
- mmap_assert_locked(walk.mm);
+ process_mm_walk_lock(walk.mm, ops->walk_lock);
+ process_vma_walk_lock(vma, ops->walk_lock);
return __walk_page_range(start, end, &walk);
}
@@ -566,7 +595,8 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
if (!walk.mm)
return -EINVAL;
- mmap_assert_locked(walk.mm);
+ process_mm_walk_lock(walk.mm, ops->walk_lock);
+ process_vma_walk_lock(vma, ops->walk_lock);
return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
}
diff --git a/mm/readahead.c b/mm/readahead.c
index a9c999aa19af..e815c114de21 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -461,19 +461,6 @@ static int try_context_readahead(struct address_space *mapping,
return 1;
}
-/*
- * There are some parts of the kernel which assume that PMD entries
- * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
- * limit the maximum allocation order to PMD size. I'm not aware of any
- * assumptions about maximum order if THP are disabled, but 8 seems like
- * a good order (that's 1MB if you're using 4kB pages)
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
-#else
-#define MAX_PAGECACHE_ORDER 8
-#endif
-
static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
pgoff_t mark, unsigned int order, gfp_t gfp)
{
diff --git a/mm/shmem.c b/mm/shmem.c
index f5af4b943e42..479d1ce65868 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -78,6 +78,7 @@ static struct vfsmount *shm_mnt;
#include <uapi/linux/memfd.h>
#include <linux/rmap.h>
#include <linux/uuid.h>
+#include <linux/quotaops.h>
#include <linux/uaccess.h>
@@ -89,6 +90,9 @@ static struct vfsmount *shm_mnt;
/* Pretend that each entry is of this size in directory's i_size */
#define BOGO_DIRENT_SIZE 20
+/* Pretend that one inode + its dentry occupy this much memory */
+#define BOGO_INODE_SIZE 1024
+
/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
#define SHORT_SYMLINK_LEN 128
@@ -116,11 +120,14 @@ struct shmem_options {
int huge;
int seen;
bool noswap;
+ unsigned short quota_types;
+ struct shmem_quota_limits qlimits;
#define SHMEM_SEEN_BLOCKS 1
#define SHMEM_SEEN_INODES 2
#define SHMEM_SEEN_HUGE 4
#define SHMEM_SEEN_INUMS 8
#define SHMEM_SEEN_NOSWAP 16
+#define SHMEM_SEEN_QUOTA 32
};
#ifdef CONFIG_TMPFS
@@ -133,7 +140,8 @@ static unsigned long shmem_default_max_inodes(void)
{
unsigned long nr_pages = totalram_pages();
- return min(nr_pages - totalhigh_pages(), nr_pages / 2);
+ return min3(nr_pages - totalhigh_pages(), nr_pages / 2,
+ ULONG_MAX / BOGO_INODE_SIZE);
}
#endif
@@ -199,33 +207,47 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
}
-static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
+static int shmem_inode_acct_block(struct inode *inode, long pages)
{
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ int err = -ENOSPC;
if (shmem_acct_block(info->flags, pages))
- return false;
+ return err;
+ might_sleep(); /* when quotas */
if (sbinfo->max_blocks) {
if (percpu_counter_compare(&sbinfo->used_blocks,
sbinfo->max_blocks - pages) > 0)
goto unacct;
+
+ err = dquot_alloc_block_nodirty(inode, pages);
+ if (err)
+ goto unacct;
+
percpu_counter_add(&sbinfo->used_blocks, pages);
+ } else {
+ err = dquot_alloc_block_nodirty(inode, pages);
+ if (err)
+ goto unacct;
}
- return true;
+ return 0;
unacct:
shmem_unacct_blocks(info->flags, pages);
- return false;
+ return err;
}
-static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
+static void shmem_inode_unacct_blocks(struct inode *inode, long pages)
{
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ might_sleep(); /* when quotas */
+ dquot_free_block_nodirty(inode, pages);
+
if (sbinfo->max_blocks)
percpu_counter_sub(&sbinfo->used_blocks, pages);
shmem_unacct_blocks(info->flags, pages);
@@ -254,6 +276,47 @@ bool vma_is_shmem(struct vm_area_struct *vma)
static LIST_HEAD(shmem_swaplist);
static DEFINE_MUTEX(shmem_swaplist_mutex);
+#ifdef CONFIG_TMPFS_QUOTA
+
+static int shmem_enable_quotas(struct super_block *sb,
+ unsigned short quota_types)
+{
+ int type, err = 0;
+
+ sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
+ for (type = 0; type < SHMEM_MAXQUOTAS; type++) {
+ if (!(quota_types & (1 << type)))
+ continue;
+ err = dquot_load_quota_sb(sb, type, QFMT_SHMEM,
+ DQUOT_USAGE_ENABLED |
+ DQUOT_LIMITS_ENABLED);
+ if (err)
+ goto out_err;
+ }
+ return 0;
+
+out_err:
+ pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n",
+ type, err);
+ for (type--; type >= 0; type--)
+ dquot_quota_off(sb, type);
+ return err;
+}
+
+static void shmem_disable_quotas(struct super_block *sb)
+{
+ int type;
+
+ for (type = 0; type < SHMEM_MAXQUOTAS; type++)
+ dquot_quota_off(sb, type);
+}
+
+static struct dquot **shmem_get_dquots(struct inode *inode)
+{
+ return SHMEM_I(inode)->i_dquot;
+}
+#endif /* CONFIG_TMPFS_QUOTA */
+
/*
* shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
* produces a novel ino for the newly allocated inode.
@@ -272,11 +335,11 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
if (!(sb->s_flags & SB_KERNMOUNT)) {
raw_spin_lock(&sbinfo->stat_lock);
if (sbinfo->max_inodes) {
- if (!sbinfo->free_inodes) {
+ if (sbinfo->free_ispace < BOGO_INODE_SIZE) {
raw_spin_unlock(&sbinfo->stat_lock);
return -ENOSPC;
}
- sbinfo->free_inodes--;
+ sbinfo->free_ispace -= BOGO_INODE_SIZE;
}
if (inop) {
ino = sbinfo->next_ino++;
@@ -330,12 +393,12 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
return 0;
}
-static void shmem_free_inode(struct super_block *sb)
+static void shmem_free_inode(struct super_block *sb, size_t freed_ispace)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
if (sbinfo->max_inodes) {
raw_spin_lock(&sbinfo->stat_lock);
- sbinfo->free_inodes++;
+ sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace;
raw_spin_unlock(&sbinfo->stat_lock);
}
}
@@ -343,62 +406,65 @@ static void shmem_free_inode(struct super_block *sb)
/**
* shmem_recalc_inode - recalculate the block usage of an inode
* @inode: inode to recalc
+ * @alloced: the change in number of pages allocated to inode
+ * @swapped: the change in number of pages swapped from inode
*
* We have to calculate the free blocks since the mm can drop
* undirtied hole pages behind our back.
*
* But normally info->alloced == inode->i_mapping->nrpages + info->swapped
* So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
- *
- * It has to be called with the spinlock held.
*/
-static void shmem_recalc_inode(struct inode *inode)
+static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped)
{
struct shmem_inode_info *info = SHMEM_I(inode);
long freed;
- freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
- if (freed > 0) {
+ spin_lock(&info->lock);
+ info->alloced += alloced;
+ info->swapped += swapped;
+ freed = info->alloced - info->swapped -
+ READ_ONCE(inode->i_mapping->nrpages);
+ /*
+ * Special case: whereas normally shmem_recalc_inode() is called
+ * after i_mapping->nrpages has already been adjusted (up or down),
+ * shmem_writepage() has to raise swapped before nrpages is lowered -
+ * to stop a racing shmem_recalc_inode() from thinking that a page has
+ * been freed. Compensate here, to avoid the need for a followup call.
+ */
+ if (swapped > 0)
+ freed += swapped;
+ if (freed > 0)
info->alloced -= freed;
- inode->i_blocks -= freed * BLOCKS_PER_PAGE;
+ spin_unlock(&info->lock);
+
+ /* The quota case may block */
+ if (freed > 0)
shmem_inode_unacct_blocks(inode, freed);
- }
}
bool shmem_charge(struct inode *inode, long pages)
{
- struct shmem_inode_info *info = SHMEM_I(inode);
- unsigned long flags;
+ struct address_space *mapping = inode->i_mapping;
- if (!shmem_inode_acct_block(inode, pages))
+ if (shmem_inode_acct_block(inode, pages))
return false;
/* nrpages adjustment first, then shmem_recalc_inode() when balanced */
- inode->i_mapping->nrpages += pages;
-
- spin_lock_irqsave(&info->lock, flags);
- info->alloced += pages;
- inode->i_blocks += pages * BLOCKS_PER_PAGE;
- shmem_recalc_inode(inode);
- spin_unlock_irqrestore(&info->lock, flags);
+ xa_lock_irq(&mapping->i_pages);
+ mapping->nrpages += pages;
+ xa_unlock_irq(&mapping->i_pages);
+ shmem_recalc_inode(inode, pages, 0);
return true;
}
void shmem_uncharge(struct inode *inode, long pages)
{
- struct shmem_inode_info *info = SHMEM_I(inode);
- unsigned long flags;
-
+ /* pages argument is currently unused: keep it to help debugging */
/* nrpages adjustment done by __filemap_remove_folio() or caller */
- spin_lock_irqsave(&info->lock, flags);
- info->alloced -= pages;
- inode->i_blocks -= pages * BLOCKS_PER_PAGE;
- shmem_recalc_inode(inode);
- spin_unlock_irqrestore(&info->lock, flags);
-
- shmem_inode_unacct_blocks(inode, pages);
+ shmem_recalc_inode(inode, 0, 0);
}
/*
@@ -806,14 +872,16 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
XA_STATE(xas, &mapping->i_pages, start);
struct page *page;
unsigned long swapped = 0;
+ unsigned long max = end - 1;
rcu_read_lock();
- xas_for_each(&xas, page, end - 1) {
+ xas_for_each(&xas, page, max) {
if (xas_retry(&xas, page))
continue;
if (xa_is_value(page))
swapped++;
-
+ if (xas.xa_index == max)
+ break;
if (need_resched()) {
xas_pause(&xas);
cond_resched_rcu();
@@ -1038,16 +1106,13 @@ whole_folios:
folio_batch_release(&fbatch);
}
- spin_lock_irq(&info->lock);
- info->swapped -= nr_swaps_freed;
- shmem_recalc_inode(inode);
- spin_unlock_irq(&info->lock);
+ shmem_recalc_inode(inode, 0, -nr_swaps_freed);
}
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
shmem_undo_range(inode, lstart, lend, false);
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode->i_mtime = inode_set_ctime_current(inode);
inode_inc_iversion(inode);
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
@@ -1059,11 +1124,9 @@ static int shmem_getattr(struct mnt_idmap *idmap,
struct inode *inode = path->dentry->d_inode;
struct shmem_inode_info *info = SHMEM_I(inode);
- if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
- spin_lock_irq(&info->lock);
- shmem_recalc_inode(inode);
- spin_unlock_irq(&info->lock);
- }
+ if (info->alloced - info->swapped != inode->i_mapping->nrpages)
+ shmem_recalc_inode(inode, 0, 0);
+
if (info->fsflags & FS_APPEND_FL)
stat->attributes |= STATX_ATTR_APPEND;
if (info->fsflags & FS_IMMUTABLE_FL)
@@ -1073,7 +1136,7 @@ static int shmem_getattr(struct mnt_idmap *idmap,
stat->attributes_mask |= (STATX_ATTR_APPEND |
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
- generic_fillattr(idmap, inode, stat);
+ generic_fillattr(idmap, request_mask, inode, stat);
if (shmem_is_huge(inode, 0, false, NULL, 0))
stat->blksize = HPAGE_PMD_SIZE;
@@ -1140,13 +1203,28 @@ static int shmem_setattr(struct mnt_idmap *idmap,
}
}
+ if (is_quota_modification(idmap, inode, attr)) {
+ error = dquot_initialize(inode);
+ if (error)
+ return error;
+ }
+
+ /* Transfer quota accounting */
+ if (i_uid_needs_update(idmap, attr, inode) ||
+ i_gid_needs_update(idmap, attr, inode)) {
+ error = dquot_transfer(idmap, inode, attr);
+
+ if (error)
+ return error;
+ }
+
setattr_copy(idmap, inode, attr);
if (attr->ia_valid & ATTR_MODE)
error = posix_acl_chmod(idmap, dentry, inode->i_mode);
if (!error && update_ctime) {
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
if (update_mtime)
- inode->i_mtime = inode->i_ctime;
+ inode->i_mtime = inode_get_ctime(inode);
inode_inc_iversion(inode);
}
return error;
@@ -1156,6 +1234,7 @@ static void shmem_evict_inode(struct inode *inode)
{
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ size_t freed = 0;
if (shmem_mapping(inode->i_mapping)) {
shmem_unacct_size(info->flags, inode->i_size);
@@ -1182,10 +1261,14 @@ static void shmem_evict_inode(struct inode *inode)
}
}
- simple_xattrs_free(&info->xattrs);
+ simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
+ shmem_free_inode(inode->i_sb, freed);
WARN_ON(inode->i_blocks);
- shmem_free_inode(inode->i_sb);
clear_inode(inode);
+#ifdef CONFIG_TMPFS_QUOTA
+ dquot_free_inode(inode);
+ dquot_drop(inode);
+#endif
}
static int shmem_find_swap_entries(struct address_space *mapping,
@@ -1429,11 +1512,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
if (add_to_swap_cache(folio, swap,
__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
NULL) == 0) {
- spin_lock_irq(&info->lock);
- shmem_recalc_inode(inode);
- info->swapped++;
- spin_unlock_irq(&info->lock);
-
+ shmem_recalc_inode(inode, 0, 1);
swap_shmem_alloc(swap);
shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
@@ -1588,13 +1667,14 @@ static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
struct shmem_inode_info *info = SHMEM_I(inode);
struct folio *folio;
int nr;
- int err = -ENOSPC;
+ int err;
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
huge = false;
nr = huge ? HPAGE_PMD_NR : 1;
- if (!shmem_inode_acct_block(inode, nr))
+ err = shmem_inode_acct_block(inode, nr);
+ if (err)
goto failed;
if (huge)
@@ -1703,7 +1783,6 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
struct folio *folio, swp_entry_t swap)
{
struct address_space *mapping = inode->i_mapping;
- struct shmem_inode_info *info = SHMEM_I(inode);
swp_entry_t swapin_error;
void *old;
@@ -1716,16 +1795,12 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
folio_wait_writeback(folio);
delete_from_swap_cache(folio);
- spin_lock_irq(&info->lock);
/*
- * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't
- * be 0 when inode is released and thus trigger WARN_ON(inode->i_blocks) in
- * shmem_evict_inode.
+ * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks
+ * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks)
+ * in shmem_evict_inode().
*/
- info->alloced--;
- info->swapped--;
- shmem_recalc_inode(inode);
- spin_unlock_irq(&info->lock);
+ shmem_recalc_inode(inode, -1, -1);
swap_free(swap);
}
@@ -1812,10 +1887,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
if (error)
goto failed;
- spin_lock_irq(&info->lock);
- info->swapped--;
- shmem_recalc_inode(inode);
- spin_unlock_irq(&info->lock);
+ shmem_recalc_inode(inode, 0, -1);
if (sgp == SGP_WRITE)
folio_mark_accessed(folio);
@@ -1980,13 +2052,9 @@ alloc_nohuge:
charge_mm);
if (error)
goto unacct;
- folio_add_lru(folio);
- spin_lock_irq(&info->lock);
- info->alloced += folio_nr_pages(folio);
- inode->i_blocks += (blkcnt_t)BLOCKS_PER_PAGE << folio_order(folio);
- shmem_recalc_inode(inode);
- spin_unlock_irq(&info->lock);
+ folio_add_lru(folio);
+ shmem_recalc_inode(inode, folio_nr_pages(folio), 0);
alloced = true;
if (folio_test_pmd_mappable(folio) &&
@@ -2035,9 +2103,7 @@ clear:
if (alloced) {
folio_clear_dirty(folio);
filemap_remove_folio(folio);
- spin_lock_irq(&info->lock);
- shmem_recalc_inode(inode);
- spin_unlock_irq(&info->lock);
+ shmem_recalc_inode(inode, 0, 0);
}
error = -EINVAL;
goto unlock;
@@ -2063,9 +2129,7 @@ unlock:
folio_put(folio);
}
if (error == -ENOSPC && !once++) {
- spin_lock_irq(&info->lock);
- shmem_recalc_inode(inode);
- spin_unlock_irq(&info->lock);
+ shmem_recalc_inode(inode, 0, 0);
goto repeat;
}
if (error == -EEXIST)
@@ -2326,6 +2390,12 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
+static int shmem_file_open(struct inode *inode, struct file *file)
+{
+ file->f_mode |= FMODE_CAN_ODIRECT;
+ return generic_file_open(inode, file);
+}
+
#ifdef CONFIG_TMPFS_XATTR
static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
@@ -2355,77 +2425,127 @@ static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags)
#define shmem_initxattrs NULL
#endif
-static struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb,
- struct inode *dir, umode_t mode, dev_t dev,
- unsigned long flags)
+static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode)
+{
+ return &SHMEM_I(inode)->dir_offsets;
+}
+
+static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
+ struct super_block *sb,
+ struct inode *dir, umode_t mode,
+ dev_t dev, unsigned long flags)
{
struct inode *inode;
struct shmem_inode_info *info;
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
ino_t ino;
+ int err;
+
+ err = shmem_reserve_inode(sb, &ino);
+ if (err)
+ return ERR_PTR(err);
- if (shmem_reserve_inode(sb, &ino))
- return NULL;
inode = new_inode(sb);
- if (inode) {
- inode->i_ino = ino;
- inode_init_owner(idmap, inode, dir, mode);
- inode->i_blocks = 0;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
- inode->i_generation = get_random_u32();
- info = SHMEM_I(inode);
- memset(info, 0, (char *)inode - (char *)info);
- spin_lock_init(&info->lock);
- atomic_set(&info->stop_eviction, 0);
- info->seals = F_SEAL_SEAL;
- info->flags = flags & VM_NORESERVE;
- info->i_crtime = inode->i_mtime;
- info->fsflags = (dir == NULL) ? 0 :
- SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
- if (info->fsflags)
- shmem_set_inode_flags(inode, info->fsflags);
- INIT_LIST_HEAD(&info->shrinklist);
- INIT_LIST_HEAD(&info->swaplist);
- if (sbinfo->noswap)
- mapping_set_unevictable(inode->i_mapping);
- simple_xattrs_init(&info->xattrs);
- cache_no_acl(inode);
- mapping_set_large_folios(inode->i_mapping);
-
- switch (mode & S_IFMT) {
- default:
- inode->i_op = &shmem_special_inode_operations;
- init_special_inode(inode, mode, dev);
- break;
- case S_IFREG:
- inode->i_mapping->a_ops = &shmem_aops;
- inode->i_op = &shmem_inode_operations;
- inode->i_fop = &shmem_file_operations;
- mpol_shared_policy_init(&info->policy,
- shmem_get_sbmpol(sbinfo));
- break;
- case S_IFDIR:
- inc_nlink(inode);
- /* Some things misbehave if size == 0 on a directory */
- inode->i_size = 2 * BOGO_DIRENT_SIZE;
- inode->i_op = &shmem_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
- break;
- case S_IFLNK:
- /*
- * Must not load anything in the rbtree,
- * mpol_free_shared_policy will not be called.
- */
- mpol_shared_policy_init(&info->policy, NULL);
- break;
- }
+ if (!inode) {
+ shmem_free_inode(sb, 0);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ inode->i_ino = ino;
+ inode_init_owner(idmap, inode, dir, mode);
+ inode->i_blocks = 0;
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ inode->i_generation = get_random_u32();
+ info = SHMEM_I(inode);
+ memset(info, 0, (char *)inode - (char *)info);
+ spin_lock_init(&info->lock);
+ atomic_set(&info->stop_eviction, 0);
+ info->seals = F_SEAL_SEAL;
+ info->flags = flags & VM_NORESERVE;
+ info->i_crtime = inode->i_mtime;
+ info->fsflags = (dir == NULL) ? 0 :
+ SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
+ if (info->fsflags)
+ shmem_set_inode_flags(inode, info->fsflags);
+ INIT_LIST_HEAD(&info->shrinklist);
+ INIT_LIST_HEAD(&info->swaplist);
+ INIT_LIST_HEAD(&info->swaplist);
+ if (sbinfo->noswap)
+ mapping_set_unevictable(inode->i_mapping);
+ simple_xattrs_init(&info->xattrs);
+ cache_no_acl(inode);
+ mapping_set_large_folios(inode->i_mapping);
+
+ switch (mode & S_IFMT) {
+ default:
+ inode->i_op = &shmem_special_inode_operations;
+ init_special_inode(inode, mode, dev);
+ break;
+ case S_IFREG:
+ inode->i_mapping->a_ops = &shmem_aops;
+ inode->i_op = &shmem_inode_operations;
+ inode->i_fop = &shmem_file_operations;
+ mpol_shared_policy_init(&info->policy,
+ shmem_get_sbmpol(sbinfo));
+ break;
+ case S_IFDIR:
+ inc_nlink(inode);
+ /* Some things misbehave if size == 0 on a directory */
+ inode->i_size = 2 * BOGO_DIRENT_SIZE;
+ inode->i_op = &shmem_dir_inode_operations;
+ inode->i_fop = &simple_offset_dir_operations;
+ simple_offset_init(shmem_get_offset_ctx(inode));
+ break;
+ case S_IFLNK:
+ /*
+ * Must not load anything in the rbtree,
+ * mpol_free_shared_policy will not be called.
+ */
+ mpol_shared_policy_init(&info->policy, NULL);
+ break;
+ }
+
+ lockdep_annotate_inode_mutex_key(inode);
+ return inode;
+}
- lockdep_annotate_inode_mutex_key(inode);
- } else
- shmem_free_inode(sb);
+#ifdef CONFIG_TMPFS_QUOTA
+static struct inode *shmem_get_inode(struct mnt_idmap *idmap,
+ struct super_block *sb, struct inode *dir,
+ umode_t mode, dev_t dev, unsigned long flags)
+{
+ int err;
+ struct inode *inode;
+
+ inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
+ if (IS_ERR(inode))
+ return inode;
+
+ err = dquot_initialize(inode);
+ if (err)
+ goto errout;
+
+ err = dquot_alloc_inode(inode);
+ if (err) {
+ dquot_drop(inode);
+ goto errout;
+ }
return inode;
+
+errout:
+ inode->i_flags |= S_NOQUOTA;
+ iput(inode);
+ return ERR_PTR(err);
+}
+#else
+static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
+ struct super_block *sb, struct inode *dir,
+ umode_t mode, dev_t dev, unsigned long flags)
+{
+ return __shmem_get_inode(idmap, sb, dir, mode, dev, flags);
}
+#endif /* CONFIG_TMPFS_QUOTA */
#ifdef CONFIG_USERFAULTFD
int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
@@ -2445,7 +2565,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
int ret;
pgoff_t max_off;
- if (!shmem_inode_acct_block(inode, 1)) {
+ if (shmem_inode_acct_block(inode, 1)) {
/*
* We may have got a page, returned -ENOENT triggering a retry,
* and now we find ourselves with -ENOMEM. Release the page, to
@@ -2527,12 +2647,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
if (ret)
goto out_delete_from_cache;
- spin_lock_irq(&info->lock);
- info->alloced++;
- inode->i_blocks += BLOCKS_PER_PAGE;
- shmem_recalc_inode(inode);
- spin_unlock_irq(&info->lock);
-
+ shmem_recalc_inode(inode, 1, 0);
folio_unlock(folio);
return 0;
out_delete_from_cache:
@@ -2731,6 +2846,28 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
return retval ? retval : error;
}
+static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ ssize_t ret;
+
+ inode_lock(inode);
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto unlock;
+ ret = file_remove_privs(file);
+ if (ret)
+ goto unlock;
+ ret = file_update_time(file);
+ if (ret)
+ goto unlock;
+ ret = generic_perform_write(iocb, from);
+unlock:
+ inode_unlock(inode);
+ return ret;
+}
+
static bool zero_pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
@@ -3055,7 +3192,7 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
}
if (sbinfo->max_inodes) {
buf->f_files = sbinfo->max_inodes;
- buf->f_ffree = sbinfo->free_inodes;
+ buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE;
}
/* else leave those fields 0 like simple_statfs */
@@ -3072,27 +3209,32 @@ shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode *inode;
- int error = -ENOSPC;
+ int error;
inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE);
- if (inode) {
- error = simple_acl_create(dir, inode);
- if (error)
- goto out_iput;
- error = security_inode_init_security(inode, dir,
- &dentry->d_name,
- shmem_initxattrs, NULL);
- if (error && error != -EOPNOTSUPP)
- goto out_iput;
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
- error = 0;
- dir->i_size += BOGO_DIRENT_SIZE;
- dir->i_ctime = dir->i_mtime = current_time(dir);
- inode_inc_iversion(dir);
- d_instantiate(dentry, inode);
- dget(dentry); /* Extra count - pin the dentry in core */
- }
+ error = simple_acl_create(dir, inode);
+ if (error)
+ goto out_iput;
+ error = security_inode_init_security(inode, dir,
+ &dentry->d_name,
+ shmem_initxattrs, NULL);
+ if (error && error != -EOPNOTSUPP)
+ goto out_iput;
+
+ error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
+ if (error)
+ goto out_iput;
+
+ dir->i_size += BOGO_DIRENT_SIZE;
+ dir->i_mtime = inode_set_ctime_current(dir);
+ inode_inc_iversion(dir);
+ d_instantiate(dentry, inode);
+ dget(dentry); /* Extra count - pin the dentry in core */
return error;
+
out_iput:
iput(inode);
return error;
@@ -3103,20 +3245,26 @@ shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
struct file *file, umode_t mode)
{
struct inode *inode;
- int error = -ENOSPC;
+ int error;
inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
- if (inode) {
- error = security_inode_init_security(inode, dir,
- NULL,
- shmem_initxattrs, NULL);
- if (error && error != -EOPNOTSUPP)
- goto out_iput;
- error = simple_acl_create(dir, inode);
- if (error)
- goto out_iput;
- d_tmpfile(file, inode);
+
+ if (IS_ERR(inode)) {
+ error = PTR_ERR(inode);
+ goto err_out;
}
+
+ error = security_inode_init_security(inode, dir,
+ NULL,
+ shmem_initxattrs, NULL);
+ if (error && error != -EOPNOTSUPP)
+ goto out_iput;
+ error = simple_acl_create(dir, inode);
+ if (error)
+ goto out_iput;
+ d_tmpfile(file, inode);
+
+err_out:
return finish_open_simple(file, error);
out_iput:
iput(inode);
@@ -3162,8 +3310,16 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
goto out;
}
+ ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
+ if (ret) {
+ if (inode->i_nlink)
+ shmem_free_inode(inode->i_sb, 0);
+ goto out;
+ }
+
dir->i_size += BOGO_DIRENT_SIZE;
- inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
+ dir->i_mtime = inode_set_ctime_to_ts(dir,
+ inode_set_ctime_current(inode));
inode_inc_iversion(dir);
inc_nlink(inode);
ihold(inode); /* New dentry reference */
@@ -3178,10 +3334,13 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry)
struct inode *inode = d_inode(dentry);
if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
- shmem_free_inode(inode->i_sb);
+ shmem_free_inode(inode->i_sb, 0);
+
+ simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
dir->i_size -= BOGO_DIRENT_SIZE;
- inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
+ dir->i_mtime = inode_set_ctime_to_ts(dir,
+ inode_set_ctime_current(inode));
inode_inc_iversion(dir);
drop_nlink(inode);
dput(dentry); /* Undo the count from "create" - this does all the work */
@@ -3238,24 +3397,29 @@ static int shmem_rename2(struct mnt_idmap *idmap,
{
struct inode *inode = d_inode(old_dentry);
int they_are_dirs = S_ISDIR(inode->i_mode);
+ int error;
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
if (flags & RENAME_EXCHANGE)
- return simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
+ return simple_offset_rename_exchange(old_dir, old_dentry,
+ new_dir, new_dentry);
if (!simple_empty(new_dentry))
return -ENOTEMPTY;
if (flags & RENAME_WHITEOUT) {
- int error;
-
error = shmem_whiteout(idmap, old_dir, old_dentry);
if (error)
return error;
}
+ simple_offset_remove(shmem_get_offset_ctx(old_dir), old_dentry);
+ error = simple_offset_add(shmem_get_offset_ctx(new_dir), old_dentry);
+ if (error)
+ return error;
+
if (d_really_is_positive(new_dentry)) {
(void) shmem_unlink(new_dir, new_dentry);
if (they_are_dirs) {
@@ -3269,9 +3433,7 @@ static int shmem_rename2(struct mnt_idmap *idmap,
old_dir->i_size -= BOGO_DIRENT_SIZE;
new_dir->i_size += BOGO_DIRENT_SIZE;
- old_dir->i_ctime = old_dir->i_mtime =
- new_dir->i_ctime = new_dir->i_mtime =
- inode->i_ctime = current_time(old_dir);
+ simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
inode_inc_iversion(old_dir);
inode_inc_iversion(new_dir);
return 0;
@@ -3291,31 +3453,32 @@ static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
VM_NORESERVE);
- if (!inode)
- return -ENOSPC;
+
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
error = security_inode_init_security(inode, dir, &dentry->d_name,
shmem_initxattrs, NULL);
- if (error && error != -EOPNOTSUPP) {
- iput(inode);
- return error;
- }
+ if (error && error != -EOPNOTSUPP)
+ goto out_iput;
+
+ error = simple_offset_add(shmem_get_offset_ctx(dir), dentry);
+ if (error)
+ goto out_iput;
inode->i_size = len-1;
if (len <= SHORT_SYMLINK_LEN) {
inode->i_link = kmemdup(symname, len, GFP_KERNEL);
if (!inode->i_link) {
- iput(inode);
- return -ENOMEM;
+ error = -ENOMEM;
+ goto out_remove_offset;
}
inode->i_op = &shmem_short_symlink_operations;
} else {
inode_nohighmem(inode);
error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
- if (error) {
- iput(inode);
- return error;
- }
+ if (error)
+ goto out_remove_offset;
inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_symlink_inode_operations;
memcpy(folio_address(folio), symname, len);
@@ -3325,11 +3488,17 @@ static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
folio_put(folio);
}
dir->i_size += BOGO_DIRENT_SIZE;
- dir->i_ctime = dir->i_mtime = current_time(dir);
+ dir->i_mtime = inode_set_ctime_current(dir);
inode_inc_iversion(dir);
d_instantiate(dentry, inode);
dget(dentry);
return 0;
+
+out_remove_offset:
+ simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
+out_iput:
+ iput(inode);
+ return error;
}
static void shmem_put_link(void *arg)
@@ -3397,7 +3566,7 @@ static int shmem_fileattr_set(struct mnt_idmap *idmap,
(fa->flags & SHMEM_FL_USER_MODIFIABLE);
shmem_set_inode_flags(inode, info->fsflags);
- inode->i_ctime = current_time(inode);
+ inode_set_ctime_current(inode);
inode_inc_iversion(inode);
return 0;
}
@@ -3417,21 +3586,40 @@ static int shmem_initxattrs(struct inode *inode,
void *fs_info)
{
struct shmem_inode_info *info = SHMEM_I(inode);
+ struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
const struct xattr *xattr;
struct simple_xattr *new_xattr;
+ size_t ispace = 0;
size_t len;
+ if (sbinfo->max_inodes) {
+ for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+ ispace += simple_xattr_space(xattr->name,
+ xattr->value_len + XATTR_SECURITY_PREFIX_LEN);
+ }
+ if (ispace) {
+ raw_spin_lock(&sbinfo->stat_lock);
+ if (sbinfo->free_ispace < ispace)
+ ispace = 0;
+ else
+ sbinfo->free_ispace -= ispace;
+ raw_spin_unlock(&sbinfo->stat_lock);
+ if (!ispace)
+ return -ENOSPC;
+ }
+ }
+
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
if (!new_xattr)
- return -ENOMEM;
+ break;
len = strlen(xattr->name) + 1;
new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
if (!new_xattr->name) {
kvfree(new_xattr);
- return -ENOMEM;
+ break;
}
memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
@@ -3442,6 +3630,16 @@ static int shmem_initxattrs(struct inode *inode,
simple_xattr_add(&info->xattrs, new_xattr);
}
+ if (xattr->name != NULL) {
+ if (ispace) {
+ raw_spin_lock(&sbinfo->stat_lock);
+ sbinfo->free_ispace += ispace;
+ raw_spin_unlock(&sbinfo->stat_lock);
+ }
+ simple_xattrs_free(&info->xattrs, NULL);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -3462,15 +3660,40 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
size_t size, int flags)
{
struct shmem_inode_info *info = SHMEM_I(inode);
- int err;
+ struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ struct simple_xattr *old_xattr;
+ size_t ispace = 0;
name = xattr_full_name(handler, name);
- err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
- if (!err) {
- inode->i_ctime = current_time(inode);
+ if (value && sbinfo->max_inodes) {
+ ispace = simple_xattr_space(name, size);
+ raw_spin_lock(&sbinfo->stat_lock);
+ if (sbinfo->free_ispace < ispace)
+ ispace = 0;
+ else
+ sbinfo->free_ispace -= ispace;
+ raw_spin_unlock(&sbinfo->stat_lock);
+ if (!ispace)
+ return -ENOSPC;
+ }
+
+ old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
+ if (!IS_ERR(old_xattr)) {
+ ispace = 0;
+ if (old_xattr && sbinfo->max_inodes)
+ ispace = simple_xattr_space(old_xattr->name,
+ old_xattr->size);
+ simple_xattr_free(old_xattr);
+ old_xattr = NULL;
+ inode_set_ctime_current(inode);
inode_inc_iversion(inode);
}
- return err;
+ if (ispace) {
+ raw_spin_lock(&sbinfo->stat_lock);
+ sbinfo->free_ispace += ispace;
+ raw_spin_unlock(&sbinfo->stat_lock);
+ }
+ return PTR_ERR(old_xattr);
}
static const struct xattr_handler shmem_security_xattr_handler = {
@@ -3485,9 +3708,16 @@ static const struct xattr_handler shmem_trusted_xattr_handler = {
.set = shmem_xattr_handler_set,
};
+static const struct xattr_handler shmem_user_xattr_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .get = shmem_xattr_handler_get,
+ .set = shmem_xattr_handler_set,
+};
+
static const struct xattr_handler *shmem_xattr_handlers[] = {
&shmem_security_xattr_handler,
&shmem_trusted_xattr_handler,
+ &shmem_user_xattr_handler,
NULL
};
@@ -3500,6 +3730,7 @@ static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
static const struct inode_operations shmem_short_symlink_operations = {
.getattr = shmem_getattr,
+ .setattr = shmem_setattr,
.get_link = simple_get_link,
#ifdef CONFIG_TMPFS_XATTR
.listxattr = shmem_listxattr,
@@ -3508,6 +3739,7 @@ static const struct inode_operations shmem_short_symlink_operations = {
static const struct inode_operations shmem_symlink_inode_operations = {
.getattr = shmem_getattr,
+ .setattr = shmem_setattr,
.get_link = shmem_get_link,
#ifdef CONFIG_TMPFS_XATTR
.listxattr = shmem_listxattr,
@@ -3607,6 +3839,13 @@ enum shmem_param {
Opt_inode32,
Opt_inode64,
Opt_noswap,
+ Opt_quota,
+ Opt_usrquota,
+ Opt_grpquota,
+ Opt_usrquota_block_hardlimit,
+ Opt_usrquota_inode_hardlimit,
+ Opt_grpquota_block_hardlimit,
+ Opt_grpquota_inode_hardlimit,
};
static const struct constant_table shmem_param_enums_huge[] = {
@@ -3629,6 +3868,15 @@ const struct fs_parameter_spec shmem_fs_parameters[] = {
fsparam_flag ("inode32", Opt_inode32),
fsparam_flag ("inode64", Opt_inode64),
fsparam_flag ("noswap", Opt_noswap),
+#ifdef CONFIG_TMPFS_QUOTA
+ fsparam_flag ("quota", Opt_quota),
+ fsparam_flag ("usrquota", Opt_usrquota),
+ fsparam_flag ("grpquota", Opt_grpquota),
+ fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit),
+ fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit),
+ fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit),
+ fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit),
+#endif
{}
};
@@ -3639,6 +3887,8 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
unsigned long long size;
char *rest;
int opt;
+ kuid_t kuid;
+ kgid_t kgid;
opt = fs_parse(fc, shmem_fs_parameters, param, &result);
if (opt < 0)
@@ -3660,13 +3910,13 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_nr_blocks:
ctx->blocks = memparse(param->string, &rest);
- if (*rest || ctx->blocks > S64_MAX)
+ if (*rest || ctx->blocks > LONG_MAX)
goto bad_value;
ctx->seen |= SHMEM_SEEN_BLOCKS;
break;
case Opt_nr_inodes:
ctx->inodes = memparse(param->string, &rest);
- if (*rest)
+ if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE)
goto bad_value;
ctx->seen |= SHMEM_SEEN_INODES;
break;
@@ -3674,14 +3924,32 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
ctx->mode = result.uint_32 & 07777;
break;
case Opt_uid:
- ctx->uid = make_kuid(current_user_ns(), result.uint_32);
- if (!uid_valid(ctx->uid))
+ kuid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(kuid))
+ goto bad_value;
+
+ /*
+ * The requested uid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kuid_has_mapping(fc->user_ns, kuid))
goto bad_value;
+
+ ctx->uid = kuid;
break;
case Opt_gid:
- ctx->gid = make_kgid(current_user_ns(), result.uint_32);
- if (!gid_valid(ctx->gid))
+ kgid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(kgid))
+ goto bad_value;
+
+ /*
+ * The requested gid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kgid_has_mapping(fc->user_ns, kgid))
goto bad_value;
+
+ ctx->gid = kgid;
break;
case Opt_huge:
ctx->huge = result.uint_32;
@@ -3720,6 +3988,60 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
ctx->noswap = true;
ctx->seen |= SHMEM_SEEN_NOSWAP;
break;
+ case Opt_quota:
+ if (fc->user_ns != &init_user_ns)
+ return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
+ ctx->seen |= SHMEM_SEEN_QUOTA;
+ ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP);
+ break;
+ case Opt_usrquota:
+ if (fc->user_ns != &init_user_ns)
+ return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
+ ctx->seen |= SHMEM_SEEN_QUOTA;
+ ctx->quota_types |= QTYPE_MASK_USR;
+ break;
+ case Opt_grpquota:
+ if (fc->user_ns != &init_user_ns)
+ return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported");
+ ctx->seen |= SHMEM_SEEN_QUOTA;
+ ctx->quota_types |= QTYPE_MASK_GRP;
+ break;
+ case Opt_usrquota_block_hardlimit:
+ size = memparse(param->string, &rest);
+ if (*rest || !size)
+ goto bad_value;
+ if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
+ return invalfc(fc,
+ "User quota block hardlimit too large.");
+ ctx->qlimits.usrquota_bhardlimit = size;
+ break;
+ case Opt_grpquota_block_hardlimit:
+ size = memparse(param->string, &rest);
+ if (*rest || !size)
+ goto bad_value;
+ if (size > SHMEM_QUOTA_MAX_SPC_LIMIT)
+ return invalfc(fc,
+ "Group quota block hardlimit too large.");
+ ctx->qlimits.grpquota_bhardlimit = size;
+ break;
+ case Opt_usrquota_inode_hardlimit:
+ size = memparse(param->string, &rest);
+ if (*rest || !size)
+ goto bad_value;
+ if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
+ return invalfc(fc,
+ "User quota inode hardlimit too large.");
+ ctx->qlimits.usrquota_ihardlimit = size;
+ break;
+ case Opt_grpquota_inode_hardlimit:
+ size = memparse(param->string, &rest);
+ if (*rest || !size)
+ goto bad_value;
+ if (size > SHMEM_QUOTA_MAX_INO_LIMIT)
+ return invalfc(fc,
+ "Group quota inode hardlimit too large.");
+ ctx->qlimits.grpquota_ihardlimit = size;
+ break;
}
return 0;
@@ -3775,21 +4097,17 @@ static int shmem_parse_options(struct fs_context *fc, void *data)
/*
* Reconfigure a shmem filesystem.
- *
- * Note that we disallow change from limited->unlimited blocks/inodes while any
- * are in use; but we must separately disallow unlimited->limited, because in
- * that case we have no record of how much is already in use.
*/
static int shmem_reconfigure(struct fs_context *fc)
{
struct shmem_options *ctx = fc->fs_private;
struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
- unsigned long inodes;
+ unsigned long used_isp;
struct mempolicy *mpol = NULL;
const char *err;
raw_spin_lock(&sbinfo->stat_lock);
- inodes = sbinfo->max_inodes - sbinfo->free_inodes;
+ used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace;
if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
if (!sbinfo->max_blocks) {
@@ -3807,7 +4125,7 @@ static int shmem_reconfigure(struct fs_context *fc)
err = "Cannot retroactively limit inodes";
goto out;
}
- if (ctx->inodes < inodes) {
+ if (ctx->inodes * BOGO_INODE_SIZE < used_isp) {
err = "Too few inodes for current use";
goto out;
}
@@ -3827,6 +4145,24 @@ static int shmem_reconfigure(struct fs_context *fc)
goto out;
}
+ if (ctx->seen & SHMEM_SEEN_QUOTA &&
+ !sb_any_quota_loaded(fc->root->d_sb)) {
+ err = "Cannot enable quota on remount";
+ goto out;
+ }
+
+#ifdef CONFIG_TMPFS_QUOTA
+#define CHANGED_LIMIT(name) \
+ (ctx->qlimits.name## hardlimit && \
+ (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit))
+
+ if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) ||
+ CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) {
+ err = "Cannot change global quota limit on remount";
+ goto out;
+ }
+#endif /* CONFIG_TMPFS_QUOTA */
+
if (ctx->seen & SHMEM_SEEN_HUGE)
sbinfo->huge = ctx->huge;
if (ctx->seen & SHMEM_SEEN_INUMS)
@@ -3835,7 +4171,7 @@ static int shmem_reconfigure(struct fs_context *fc)
sbinfo->max_blocks = ctx->blocks;
if (ctx->seen & SHMEM_SEEN_INODES) {
sbinfo->max_inodes = ctx->inodes;
- sbinfo->free_inodes = ctx->inodes - inodes;
+ sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp;
}
/*
@@ -3918,6 +4254,9 @@ static void shmem_put_super(struct super_block *sb)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+#ifdef CONFIG_TMPFS_QUOTA
+ shmem_disable_quotas(sb);
+#endif
free_percpu(sbinfo->ino_batch);
percpu_counter_destroy(&sbinfo->used_blocks);
mpol_put(sbinfo->mpol);
@@ -3930,12 +4269,13 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
struct shmem_options *ctx = fc->fs_private;
struct inode *inode;
struct shmem_sb_info *sbinfo;
+ int error = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
L1_CACHE_BYTES), GFP_KERNEL);
if (!sbinfo)
- return -ENOMEM;
+ return error;
sb->s_fs_info = sbinfo;
@@ -3962,7 +4302,8 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_flags |= SB_NOUSER;
#endif
sbinfo->max_blocks = ctx->blocks;
- sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
+ sbinfo->max_inodes = ctx->inodes;
+ sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE;
if (sb->s_flags & SB_KERNMOUNT) {
sbinfo->ino_batch = alloc_percpu(ino_t);
if (!sbinfo->ino_batch)
@@ -3996,10 +4337,27 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
#endif
uuid_gen(&sb->s_uuid);
+#ifdef CONFIG_TMPFS_QUOTA
+ if (ctx->seen & SHMEM_SEEN_QUOTA) {
+ sb->dq_op = &shmem_quota_operations;
+ sb->s_qcop = &dquot_quotactl_sysfile_ops;
+ sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
+
+ /* Copy the default limits from ctx into sbinfo */
+ memcpy(&sbinfo->qlimits, &ctx->qlimits,
+ sizeof(struct shmem_quota_limits));
+
+ if (shmem_enable_quotas(sb, ctx->quota_types))
+ goto failed;
+ }
+#endif /* CONFIG_TMPFS_QUOTA */
+
inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0,
VM_NORESERVE);
- if (!inode)
+ if (IS_ERR(inode)) {
+ error = PTR_ERR(inode);
goto failed;
+ }
inode->i_uid = sbinfo->uid;
inode->i_gid = sbinfo->gid;
sb->s_root = d_make_root(inode);
@@ -4009,7 +4367,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
failed:
shmem_put_super(sb);
- return -ENOMEM;
+ return error;
}
static int shmem_get_tree(struct fs_context *fc)
@@ -4059,6 +4417,8 @@ static void shmem_destroy_inode(struct inode *inode)
{
if (S_ISREG(inode->i_mode))
mpol_free_shared_policy(&SHMEM_I(inode)->policy);
+ if (S_ISDIR(inode->i_mode))
+ simple_offset_destroy(shmem_get_offset_ctx(inode));
}
static void shmem_init_inode(void *foo)
@@ -4102,12 +4462,12 @@ EXPORT_SYMBOL(shmem_aops);
static const struct file_operations shmem_file_operations = {
.mmap = shmem_mmap,
- .open = generic_file_open,
+ .open = shmem_file_open,
.get_unmapped_area = shmem_get_unmapped_area,
#ifdef CONFIG_TMPFS
.llseek = shmem_file_llseek,
.read_iter = shmem_file_read_iter,
- .write_iter = generic_file_write_iter,
+ .write_iter = shmem_file_write_iter,
.fsync = noop_fsync,
.splice_read = shmem_file_splice_read,
.splice_write = iter_file_splice_write,
@@ -4139,6 +4499,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
.mknod = shmem_mknod,
.rename = shmem_rename2,
.tmpfile = shmem_tmpfile,
+ .get_offset_ctx = shmem_get_offset_ctx,
#endif
#ifdef CONFIG_TMPFS_XATTR
.listxattr = shmem_listxattr,
@@ -4170,6 +4531,9 @@ static const struct super_operations shmem_ops = {
.statfs = shmem_statfs,
.show_options = shmem_show_options,
#endif
+#ifdef CONFIG_TMPFS_QUOTA
+ .get_dquots = shmem_get_dquots,
+#endif
.evict_inode = shmem_evict_inode,
.drop_inode = generic_delete_inode,
.put_super = shmem_put_super,
@@ -4223,7 +4587,7 @@ static struct file_system_type shmem_fs_type = {
#endif
.kill_sb = kill_litter_super,
#ifdef CONFIG_SHMEM
- .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
+ .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP | FS_MGTIME,
#else
.fs_flags = FS_USERNS_MOUNT,
#endif
@@ -4235,6 +4599,14 @@ void __init shmem_init(void)
shmem_init_inodecache();
+#ifdef CONFIG_TMPFS_QUOTA
+ error = register_quota_format(&shmem_quota_format);
+ if (error < 0) {
+ pr_err("Could not register quota format\n");
+ goto out3;
+ }
+#endif
+
error = register_filesystem(&shmem_fs_type);
if (error) {
pr_err("Could not register tmpfs\n");
@@ -4259,6 +4631,10 @@ void __init shmem_init(void)
out1:
unregister_filesystem(&shmem_fs_type);
out2:
+#ifdef CONFIG_TMPFS_QUOTA
+ unregister_quota_format(&shmem_quota_format);
+out3:
+#endif
shmem_destroy_inodecache();
shm_mnt = ERR_PTR(error);
}
@@ -4378,10 +4754,16 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
#define shmem_vm_ops generic_file_vm_ops
#define shmem_anon_vm_ops generic_file_vm_ops
#define shmem_file_operations ramfs_file_operations
-#define shmem_get_inode(idmap, sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
#define shmem_acct_size(flags, size) 0
#define shmem_unacct_size(flags, size) do {} while (0)
+static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir,
+ umode_t mode, dev_t dev, unsigned long flags)
+{
+ struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
+ return inode ? inode : ERR_PTR(-ENOSPC);
+}
+
#endif /* CONFIG_SHMEM */
/* common code */
@@ -4406,9 +4788,10 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, l
inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
S_IFREG | S_IRWXUGO, 0, flags);
- if (unlikely(!inode)) {
+
+ if (IS_ERR(inode)) {
shmem_unacct_size(flags, size);
- return ERR_PTR(-ENOSPC);
+ return ERR_CAST(inode);
}
inode->i_flags |= i_flags;
inode->i_size = size;
diff --git a/mm/shmem_quota.c b/mm/shmem_quota.c
new file mode 100644
index 000000000000..062d1c1097ae
--- /dev/null
+++ b/mm/shmem_quota.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * In memory quota format relies on quota infrastructure to store dquot
+ * information for us. While conventional quota formats for file systems
+ * with persistent storage can load quota information into dquot from the
+ * storage on-demand and hence quota dquot shrinker can free any dquot
+ * that is not currently being used, it must be avoided here. Otherwise we
+ * can lose valuable information, user provided limits, because there is
+ * no persistent storage to load the information from afterwards.
+ *
+ * One information that in-memory quota format needs to keep track of is
+ * a sorted list of ids for each quota type. This is done by utilizing
+ * an rb tree which root is stored in mem_dqinfo->dqi_priv for each quota
+ * type.
+ *
+ * This format can be used to support quota on file system without persistent
+ * storage such as tmpfs.
+ *
+ * Author: Lukas Czerner <lczerner@redhat.com>
+ * Carlos Maiolino <cmaiolino@redhat.com>
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ */
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/shmem_fs.h>
+
+#include <linux/quotaops.h>
+#include <linux/quota.h>
+
+#ifdef CONFIG_TMPFS_QUOTA
+
+/*
+ * The following constants define the amount of time given a user
+ * before the soft limits are treated as hard limits (usually resulting
+ * in an allocation failure). The timer is started when the user crosses
+ * their soft limit, it is reset when they go below their soft limit.
+ */
+#define SHMEM_MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */
+#define SHMEM_MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */
+
+struct quota_id {
+ struct rb_node node;
+ qid_t id;
+ qsize_t bhardlimit;
+ qsize_t bsoftlimit;
+ qsize_t ihardlimit;
+ qsize_t isoftlimit;
+};
+
+static int shmem_check_quota_file(struct super_block *sb, int type)
+{
+ /* There is no real quota file, nothing to do */
+ return 1;
+}
+
+/*
+ * There is no real quota file. Just allocate rb_root for quota ids and
+ * set limits
+ */
+static int shmem_read_file_info(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ struct mem_dqinfo *info = &dqopt->info[type];
+
+ info->dqi_priv = kzalloc(sizeof(struct rb_root), GFP_NOFS);
+ if (!info->dqi_priv)
+ return -ENOMEM;
+
+ info->dqi_max_spc_limit = SHMEM_QUOTA_MAX_SPC_LIMIT;
+ info->dqi_max_ino_limit = SHMEM_QUOTA_MAX_INO_LIMIT;
+
+ info->dqi_bgrace = SHMEM_MAX_DQ_TIME;
+ info->dqi_igrace = SHMEM_MAX_IQ_TIME;
+ info->dqi_flags = 0;
+
+ return 0;
+}
+
+static int shmem_write_file_info(struct super_block *sb, int type)
+{
+ /* There is no real quota file, nothing to do */
+ return 0;
+}
+
+/*
+ * Free all the quota_id entries in the rb tree and rb_root.
+ */
+static int shmem_free_file_info(struct super_block *sb, int type)
+{
+ struct mem_dqinfo *info = &sb_dqopt(sb)->info[type];
+ struct rb_root *root = info->dqi_priv;
+ struct quota_id *entry;
+ struct rb_node *node;
+
+ info->dqi_priv = NULL;
+ node = rb_first(root);
+ while (node) {
+ entry = rb_entry(node, struct quota_id, node);
+ node = rb_next(&entry->node);
+
+ rb_erase(&entry->node, root);
+ kfree(entry);
+ }
+
+ kfree(root);
+ return 0;
+}
+
+static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
+{
+ struct mem_dqinfo *info = sb_dqinfo(sb, qid->type);
+ struct rb_node *node = ((struct rb_root *)info->dqi_priv)->rb_node;
+ qid_t id = from_kqid(&init_user_ns, *qid);
+ struct quota_info *dqopt = sb_dqopt(sb);
+ struct quota_id *entry = NULL;
+ int ret = 0;
+
+ if (!sb_has_quota_active(sb, qid->type))
+ return -ESRCH;
+
+ down_read(&dqopt->dqio_sem);
+ while (node) {
+ entry = rb_entry(node, struct quota_id, node);
+
+ if (id < entry->id)
+ node = node->rb_left;
+ else if (id > entry->id)
+ node = node->rb_right;
+ else
+ goto got_next_id;
+ }
+
+ if (!entry) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ if (id > entry->id) {
+ node = rb_next(&entry->node);
+ if (!node) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+ entry = rb_entry(node, struct quota_id, node);
+ }
+
+got_next_id:
+ *qid = make_kqid(&init_user_ns, qid->type, entry->id);
+out_unlock:
+ up_read(&dqopt->dqio_sem);
+ return ret;
+}
+
+/*
+ * Load dquot with limits from existing entry, or create the new entry if
+ * it does not exist.
+ */
+static int shmem_acquire_dquot(struct dquot *dquot)
+{
+ struct mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_id.type);
+ struct rb_node **n = &((struct rb_root *)info->dqi_priv)->rb_node;
+ struct shmem_sb_info *sbinfo = dquot->dq_sb->s_fs_info;
+ struct rb_node *parent = NULL, *new_node = NULL;
+ struct quota_id *new_entry, *entry;
+ qid_t id = from_kqid(&init_user_ns, dquot->dq_id);
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+ int ret = 0;
+
+ mutex_lock(&dquot->dq_lock);
+
+ down_write(&dqopt->dqio_sem);
+ while (*n) {
+ parent = *n;
+ entry = rb_entry(parent, struct quota_id, node);
+
+ if (id < entry->id)
+ n = &(*n)->rb_left;
+ else if (id > entry->id)
+ n = &(*n)->rb_right;
+ else
+ goto found;
+ }
+
+ /* We don't have entry for this id yet, create it */
+ new_entry = kzalloc(sizeof(struct quota_id), GFP_NOFS);
+ if (!new_entry) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ new_entry->id = id;
+ if (dquot->dq_id.type == USRQUOTA) {
+ new_entry->bhardlimit = sbinfo->qlimits.usrquota_bhardlimit;
+ new_entry->ihardlimit = sbinfo->qlimits.usrquota_ihardlimit;
+ } else if (dquot->dq_id.type == GRPQUOTA) {
+ new_entry->bhardlimit = sbinfo->qlimits.grpquota_bhardlimit;
+ new_entry->ihardlimit = sbinfo->qlimits.grpquota_ihardlimit;
+ }
+
+ new_node = &new_entry->node;
+ rb_link_node(new_node, parent, n);
+ rb_insert_color(new_node, (struct rb_root *)info->dqi_priv);
+ entry = new_entry;
+
+found:
+ /* Load the stored limits from the tree */
+ spin_lock(&dquot->dq_dqb_lock);
+ dquot->dq_dqb.dqb_bhardlimit = entry->bhardlimit;
+ dquot->dq_dqb.dqb_bsoftlimit = entry->bsoftlimit;
+ dquot->dq_dqb.dqb_ihardlimit = entry->ihardlimit;
+ dquot->dq_dqb.dqb_isoftlimit = entry->isoftlimit;
+
+ if (!dquot->dq_dqb.dqb_bhardlimit &&
+ !dquot->dq_dqb.dqb_bsoftlimit &&
+ !dquot->dq_dqb.dqb_ihardlimit &&
+ !dquot->dq_dqb.dqb_isoftlimit)
+ set_bit(DQ_FAKE_B, &dquot->dq_flags);
+ spin_unlock(&dquot->dq_dqb_lock);
+
+ /* Make sure flags update is visible after dquot has been filled */
+ smp_mb__before_atomic();
+ set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+out_unlock:
+ up_write(&dqopt->dqio_sem);
+ mutex_unlock(&dquot->dq_lock);
+ return ret;
+}
+
+static bool shmem_is_empty_dquot(struct dquot *dquot)
+{
+ struct shmem_sb_info *sbinfo = dquot->dq_sb->s_fs_info;
+ qsize_t bhardlimit;
+ qsize_t ihardlimit;
+
+ if (dquot->dq_id.type == USRQUOTA) {
+ bhardlimit = sbinfo->qlimits.usrquota_bhardlimit;
+ ihardlimit = sbinfo->qlimits.usrquota_ihardlimit;
+ } else if (dquot->dq_id.type == GRPQUOTA) {
+ bhardlimit = sbinfo->qlimits.grpquota_bhardlimit;
+ ihardlimit = sbinfo->qlimits.grpquota_ihardlimit;
+ }
+
+ if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
+ (dquot->dq_dqb.dqb_curspace == 0 &&
+ dquot->dq_dqb.dqb_curinodes == 0 &&
+ dquot->dq_dqb.dqb_bhardlimit == bhardlimit &&
+ dquot->dq_dqb.dqb_ihardlimit == ihardlimit))
+ return true;
+
+ return false;
+}
+/*
+ * Store limits from dquot in the tree unless it's fake. If it is fake
+ * remove the id from the tree since there is no useful information in
+ * there.
+ */
+static int shmem_release_dquot(struct dquot *dquot)
+{
+ struct mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_id.type);
+ struct rb_node *node = ((struct rb_root *)info->dqi_priv)->rb_node;
+ qid_t id = from_kqid(&init_user_ns, dquot->dq_id);
+ struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
+ struct quota_id *entry = NULL;
+
+ mutex_lock(&dquot->dq_lock);
+ /* Check whether we are not racing with some other dqget() */
+ if (dquot_is_busy(dquot))
+ goto out_dqlock;
+
+ down_write(&dqopt->dqio_sem);
+ while (node) {
+ entry = rb_entry(node, struct quota_id, node);
+
+ if (id < entry->id)
+ node = node->rb_left;
+ else if (id > entry->id)
+ node = node->rb_right;
+ else
+ goto found;
+ }
+
+ /* We should always find the entry in the rb tree */
+ WARN_ONCE(1, "quota id %u from dquot %p, not in rb tree!\n", id, dquot);
+ up_write(&dqopt->dqio_sem);
+ mutex_unlock(&dquot->dq_lock);
+ return -ENOENT;
+
+found:
+ if (shmem_is_empty_dquot(dquot)) {
+ /* Remove entry from the tree */
+ rb_erase(&entry->node, info->dqi_priv);
+ kfree(entry);
+ } else {
+ /* Store the limits in the tree */
+ spin_lock(&dquot->dq_dqb_lock);
+ entry->bhardlimit = dquot->dq_dqb.dqb_bhardlimit;
+ entry->bsoftlimit = dquot->dq_dqb.dqb_bsoftlimit;
+ entry->ihardlimit = dquot->dq_dqb.dqb_ihardlimit;
+ entry->isoftlimit = dquot->dq_dqb.dqb_isoftlimit;
+ spin_unlock(&dquot->dq_dqb_lock);
+ }
+
+ clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
+ up_write(&dqopt->dqio_sem);
+
+out_dqlock:
+ mutex_unlock(&dquot->dq_lock);
+ return 0;
+}
+
+static int shmem_mark_dquot_dirty(struct dquot *dquot)
+{
+ return 0;
+}
+
+static int shmem_dquot_write_info(struct super_block *sb, int type)
+{
+ return 0;
+}
+
+static const struct quota_format_ops shmem_format_ops = {
+ .check_quota_file = shmem_check_quota_file,
+ .read_file_info = shmem_read_file_info,
+ .write_file_info = shmem_write_file_info,
+ .free_file_info = shmem_free_file_info,
+};
+
+struct quota_format_type shmem_quota_format = {
+ .qf_fmt_id = QFMT_SHMEM,
+ .qf_ops = &shmem_format_ops,
+ .qf_owner = THIS_MODULE
+};
+
+const struct dquot_operations shmem_quota_operations = {
+ .acquire_dquot = shmem_acquire_dquot,
+ .release_dquot = shmem_release_dquot,
+ .alloc_dquot = dquot_alloc,
+ .destroy_dquot = dquot_destroy,
+ .write_info = shmem_dquot_write_info,
+ .mark_dirty = shmem_mark_dquot_dirty,
+ .get_next_id = shmem_get_next_id,
+};
+#endif /* CONFIG_TMPFS_QUOTA */
diff --git a/mm/truncate.c b/mm/truncate.c
index 95d1291d269b..c3320e66d6ea 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -657,11 +657,11 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
}
folio_lock(folio);
- VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
- if (folio->mapping != mapping) {
+ if (unlikely(folio->mapping != mapping)) {
folio_unlock(folio);
continue;
}
+ VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
folio_wait_writeback(folio);
if (folio_mapped(folio))
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 93cf99aba335..228a4a5312f2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2979,6 +2979,10 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
free_vm_area(area);
return NULL;
}
+
+ flush_cache_vmap((unsigned long)area->addr,
+ (unsigned long)area->addr + count * PAGE_SIZE);
+
return area->addr;
}
EXPORT_SYMBOL_GPL(vmap_pfn);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1080209a568b..2fe4a11d63f4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4284,6 +4284,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_
static const struct mm_walk_ops mm_walk_ops = {
.test_walk = should_skip_vma,
.p4d_entry = walk_pud_range,
+ .walk_lock = PGWALK_RDLOCK,
};
int err;
@@ -4853,16 +4854,17 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg)
spin_lock_irq(&pgdat->memcg_lru.lock);
- VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
+ if (hlist_nulls_unhashed(&lruvec->lrugen.list))
+ goto unlock;
gen = lruvec->lrugen.gen;
- hlist_nulls_del_rcu(&lruvec->lrugen.list);
+ hlist_nulls_del_init_rcu(&lruvec->lrugen.list);
pgdat->memcg_lru.nr_memcgs[gen]--;
if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq))
WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
-
+unlock:
spin_unlock_irq(&pgdat->memcg_lru.lock);
}
}
@@ -5434,8 +5436,10 @@ restart:
rcu_read_lock();
hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) {
- if (op)
+ if (op) {
lru_gen_rotate_memcg(lruvec, op);
+ op = 0;
+ }
mem_cgroup_put(memcg);
@@ -5443,7 +5447,7 @@ restart:
memcg = lruvec_memcg(lruvec);
if (!mem_cgroup_tryget(memcg)) {
- op = 0;
+ lru_gen_release_memcg(memcg);
memcg = NULL;
continue;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 17e6281e408c..ccff2b6ef958 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4061,6 +4061,9 @@ egress_verdict:
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
case TC_ACT_TRAP:
+ consume_skb(skb);
+ fallthrough;
+ case TC_ACT_CONSUMED:
*ret = NET_XMIT_SUCCESS;
return NULL;
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 1591b061105a..8f56e8723c73 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -254,12 +254,17 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
int err;
struct net *net = dev_net(skb->dev);
- /* Only need dccph_dport & dccph_sport which are the first
- * 4 bytes in dccp header.
+ /* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
+ * which is in byte 7 of the dccp header.
* Our caller (icmp_socket_deliver()) already pulled 8 bytes for us.
+ *
+ * Later on, we want to access the sequence number fields, which are
+ * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
*/
- BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
- BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
+ dh = (struct dccp_hdr *)(skb->data + offset);
+ if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
+ return -EINVAL;
+ iph = (struct iphdr *)skb->data;
dh = (struct dccp_hdr *)(skb->data + offset);
sk = __inet_lookup_established(net, &dccp_hashinfo,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 686090bc5945..33f6ccf6ba77 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -74,7 +74,7 @@ static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
- const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
+ const struct ipv6hdr *hdr;
const struct dccp_hdr *dh;
struct dccp_sock *dp;
struct ipv6_pinfo *np;
@@ -83,12 +83,17 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
__u64 seq;
struct net *net = dev_net(skb->dev);
- /* Only need dccph_dport & dccph_sport which are the first
- * 4 bytes in dccp header.
+ /* For the first __dccp_basic_hdr_len() check, we only need dh->dccph_x,
+ * which is in byte 7 of the dccp header.
* Our caller (icmpv6_notify()) already pulled 8 bytes for us.
+ *
+ * Later on, we want to access the sequence number fields, which are
+ * beyond 8 bytes, so we have to pskb_may_pull() ourselves.
*/
- BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8);
- BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8);
+ dh = (struct dccp_hdr *)(skb->data + offset);
+ if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
+ return -EINVAL;
+ hdr = (const struct ipv6hdr *)skb->data;
dh = (struct dccp_hdr *)(skb->data + offset);
sk = __inet6_lookup_established(net, &dccp_hashinfo,
diff --git a/net/devlink/Makefile b/net/devlink/Makefile
index a087af581847..000da622116a 100644
--- a/net/devlink/Makefile
+++ b/net/devlink/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y := leftover.o core.o netlink.o netlink_gen.o dev.o health.o
+obj-y := core.o netlink.o netlink_gen.o dev.o port.o sb.o dpipe.o \
+ resource.o param.o region.o health.o trap.o rate.o linecard.o
diff --git a/net/devlink/core.c b/net/devlink/core.c
index c23ebabadc52..6cec4afb01fb 100644
--- a/net/devlink/core.c
+++ b/net/devlink/core.c
@@ -5,9 +5,15 @@
*/
#include <net/genetlink.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/devlink.h>
#include "devl_internal.h"
+EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
+EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
+EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report);
+
DEFINE_XARRAY_FLAGS(devlinks, XA_FLAGS_ALLOC);
void *devlink_priv(struct devlink *devlink)
diff --git a/net/devlink/dev.c b/net/devlink/dev.c
index abf3393a7a17..bba4ace7d22b 100644
--- a/net/devlink/dev.c
+++ b/net/devlink/dev.c
@@ -174,7 +174,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-void devlink_notify(struct devlink *devlink, enum devlink_command cmd)
+static void devlink_notify(struct devlink *devlink, enum devlink_command cmd)
{
struct sk_buff *msg;
int err;
@@ -230,6 +230,32 @@ int devlink_nl_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
return devlink_nl_dumpit(msg, cb, devlink_nl_get_dump_one);
}
+void devlink_notify_register(struct devlink *devlink)
+{
+ devlink_notify(devlink, DEVLINK_CMD_NEW);
+ devlink_linecards_notify_register(devlink);
+ devlink_ports_notify_register(devlink);
+ devlink_trap_policers_notify_register(devlink);
+ devlink_trap_groups_notify_register(devlink);
+ devlink_traps_notify_register(devlink);
+ devlink_rates_notify_register(devlink);
+ devlink_regions_notify_register(devlink);
+ devlink_params_notify_register(devlink);
+}
+
+void devlink_notify_unregister(struct devlink *devlink)
+{
+ devlink_params_notify_unregister(devlink);
+ devlink_regions_notify_unregister(devlink);
+ devlink_rates_notify_unregister(devlink);
+ devlink_traps_notify_unregister(devlink);
+ devlink_trap_groups_notify_unregister(devlink);
+ devlink_trap_policers_notify_unregister(devlink);
+ devlink_ports_notify_unregister(devlink);
+ devlink_linecards_notify_unregister(devlink);
+ devlink_notify(devlink, DEVLINK_CMD_DEL);
+}
+
static void devlink_reload_failed_set(struct devlink *devlink,
bool reload_failed)
{
diff --git a/net/devlink/devl_internal.h b/net/devlink/devl_internal.h
index eb1d5066c73f..f6b5fea2e13c 100644
--- a/net/devlink/devl_internal.h
+++ b/net/devlink/devl_internal.h
@@ -3,6 +3,7 @@
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
*/
+#include <linux/etherdevice.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/notifier.h>
@@ -11,6 +12,8 @@
#include <linux/xarray.h>
#include <net/devlink.h>
#include <net/net_namespace.h>
+#include <net/rtnetlink.h>
+#include <rdma/ib_verbs.h>
#include "netlink_gen.h"
@@ -118,14 +121,9 @@ typedef int devlink_nl_dump_one_func_t(struct sk_buff *msg,
struct netlink_callback *cb,
int flags);
-extern const struct genl_small_ops devlink_nl_small_ops[40];
-
struct devlink *
devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs);
-void devlink_notify_unregister(struct devlink *devlink);
-void devlink_notify_register(struct devlink *devlink);
-
int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb,
devlink_nl_dump_one_func_t *dump_one);
@@ -147,13 +145,36 @@ devlink_nl_put_handle(struct sk_buff *msg, struct devlink *devlink)
return 0;
}
+int devlink_nl_msg_reply_and_new(struct sk_buff **msg, struct genl_info *info);
+
/* Notify */
-void devlink_notify(struct devlink *devlink, enum devlink_command cmd);
+void devlink_notify_register(struct devlink *devlink);
+void devlink_notify_unregister(struct devlink *devlink);
+void devlink_ports_notify_register(struct devlink *devlink);
+void devlink_ports_notify_unregister(struct devlink *devlink);
+void devlink_params_notify_register(struct devlink *devlink);
+void devlink_params_notify_unregister(struct devlink *devlink);
+void devlink_regions_notify_register(struct devlink *devlink);
+void devlink_regions_notify_unregister(struct devlink *devlink);
+void devlink_trap_policers_notify_register(struct devlink *devlink);
+void devlink_trap_policers_notify_unregister(struct devlink *devlink);
+void devlink_trap_groups_notify_register(struct devlink *devlink);
+void devlink_trap_groups_notify_unregister(struct devlink *devlink);
+void devlink_traps_notify_register(struct devlink *devlink);
+void devlink_traps_notify_unregister(struct devlink *devlink);
+void devlink_rates_notify_register(struct devlink *devlink);
+void devlink_rates_notify_unregister(struct devlink *devlink);
+void devlink_linecards_notify_register(struct devlink *devlink);
+void devlink_linecards_notify_unregister(struct devlink *devlink);
/* Ports */
+#define ASSERT_DEVLINK_PORT_INITIALIZED(devlink_port) \
+ WARN_ON_ONCE(!(devlink_port)->initialized)
+
+struct devlink_port *devlink_port_get_by_index(struct devlink *devlink,
+ unsigned int port_index);
int devlink_port_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr);
-
struct devlink_port *
devlink_port_get_from_info(struct devlink *devlink, struct genl_info *info);
struct devlink_port *devlink_port_get_from_attrs(struct devlink *devlink,
@@ -184,12 +205,62 @@ int devlink_resources_validate(struct devlink *devlink,
int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
+/* Linecards */
+struct devlink_linecard {
+ struct list_head list;
+ struct devlink *devlink;
+ unsigned int index;
+ const struct devlink_linecard_ops *ops;
+ void *priv;
+ enum devlink_linecard_state state;
+ struct mutex state_lock; /* Protects state */
+ const char *type;
+ struct devlink_linecard_type *types;
+ unsigned int types_count;
+ struct devlink *nested_devlink;
+};
+
/* Devlink nl cmds */
int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_cmd_flash_update(struct sk_buff *skb, struct genl_info *info);
int devlink_nl_cmd_selftests_run(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_port_split_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_port_new_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_port_del_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_dpipe_table_get(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_dpipe_headers_get(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_resource_set(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_resource_dump(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_param_set_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb);
+int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_region_del(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
int devlink_nl_cmd_health_reporter_set_doit(struct sk_buff *skb,
struct genl_info *info);
int devlink_nl_cmd_health_reporter_recover_doit(struct sk_buff *skb,
@@ -202,3 +273,13 @@ int devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
struct genl_info *info);
int devlink_nl_cmd_health_reporter_test_doit(struct sk_buff *skb,
struct genl_info *info);
+int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_trap_policer_set_doit(struct sk_buff *skb,
+ struct genl_info *info);
+int devlink_nl_cmd_rate_set_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_rate_new_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb, struct genl_info *info);
+int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb,
+ struct genl_info *info);
diff --git a/net/devlink/dpipe.c b/net/devlink/dpipe.c
new file mode 100644
index 000000000000..431227c412e5
--- /dev/null
+++ b/net/devlink/dpipe.c
@@ -0,0 +1,917 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include "devl_internal.h"
+
+static struct devlink_dpipe_field devlink_dpipe_fields_ethernet[] = {
+ {
+ .name = "destination mac",
+ .id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC,
+ .bitwidth = 48,
+ },
+};
+
+struct devlink_dpipe_header devlink_dpipe_header_ethernet = {
+ .name = "ethernet",
+ .id = DEVLINK_DPIPE_HEADER_ETHERNET,
+ .fields = devlink_dpipe_fields_ethernet,
+ .fields_count = ARRAY_SIZE(devlink_dpipe_fields_ethernet),
+ .global = true,
+};
+EXPORT_SYMBOL_GPL(devlink_dpipe_header_ethernet);
+
+static struct devlink_dpipe_field devlink_dpipe_fields_ipv4[] = {
+ {
+ .name = "destination ip",
+ .id = DEVLINK_DPIPE_FIELD_IPV4_DST_IP,
+ .bitwidth = 32,
+ },
+};
+
+struct devlink_dpipe_header devlink_dpipe_header_ipv4 = {
+ .name = "ipv4",
+ .id = DEVLINK_DPIPE_HEADER_IPV4,
+ .fields = devlink_dpipe_fields_ipv4,
+ .fields_count = ARRAY_SIZE(devlink_dpipe_fields_ipv4),
+ .global = true,
+};
+EXPORT_SYMBOL_GPL(devlink_dpipe_header_ipv4);
+
+static struct devlink_dpipe_field devlink_dpipe_fields_ipv6[] = {
+ {
+ .name = "destination ip",
+ .id = DEVLINK_DPIPE_FIELD_IPV6_DST_IP,
+ .bitwidth = 128,
+ },
+};
+
+struct devlink_dpipe_header devlink_dpipe_header_ipv6 = {
+ .name = "ipv6",
+ .id = DEVLINK_DPIPE_HEADER_IPV6,
+ .fields = devlink_dpipe_fields_ipv6,
+ .fields_count = ARRAY_SIZE(devlink_dpipe_fields_ipv6),
+ .global = true,
+};
+EXPORT_SYMBOL_GPL(devlink_dpipe_header_ipv6);
+
+int devlink_dpipe_match_put(struct sk_buff *skb,
+ struct devlink_dpipe_match *match)
+{
+ struct devlink_dpipe_header *header = match->header;
+ struct devlink_dpipe_field *field = &header->fields[match->field_id];
+ struct nlattr *match_attr;
+
+ match_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_MATCH);
+ if (!match_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_MATCH_TYPE, match->type) ||
+ nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, match->header_index) ||
+ nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
+ nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
+ nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, match_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, match_attr);
+ return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_match_put);
+
+static int devlink_dpipe_matches_put(struct devlink_dpipe_table *table,
+ struct sk_buff *skb)
+{
+ struct nlattr *matches_attr;
+
+ matches_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_TABLE_MATCHES);
+ if (!matches_attr)
+ return -EMSGSIZE;
+
+ if (table->table_ops->matches_dump(table->priv, skb))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, matches_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, matches_attr);
+ return -EMSGSIZE;
+}
+
+int devlink_dpipe_action_put(struct sk_buff *skb,
+ struct devlink_dpipe_action *action)
+{
+ struct devlink_dpipe_header *header = action->header;
+ struct devlink_dpipe_field *field = &header->fields[action->field_id];
+ struct nlattr *action_attr;
+
+ action_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ACTION);
+ if (!action_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_ACTION_TYPE, action->type) ||
+ nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, action->header_index) ||
+ nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
+ nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
+ nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, action_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, action_attr);
+ return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_action_put);
+
+static int devlink_dpipe_actions_put(struct devlink_dpipe_table *table,
+ struct sk_buff *skb)
+{
+ struct nlattr *actions_attr;
+
+ actions_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_TABLE_ACTIONS);
+ if (!actions_attr)
+ return -EMSGSIZE;
+
+ if (table->table_ops->actions_dump(table->priv, skb))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, actions_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, actions_attr);
+ return -EMSGSIZE;
+}
+
+static int devlink_dpipe_table_put(struct sk_buff *skb,
+ struct devlink_dpipe_table *table)
+{
+ struct nlattr *table_attr;
+ u64 table_size;
+
+ table_size = table->table_ops->size_get(table->priv);
+ table_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLE);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_TABLE_NAME, table->name) ||
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_SIZE, table_size,
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+ if (nla_put_u8(skb, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED,
+ table->counters_enabled))
+ goto nla_put_failure;
+
+ if (table->resource_valid) {
+ if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
+ table->resource_id, DEVLINK_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
+ table->resource_units, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+ }
+ if (devlink_dpipe_matches_put(table, skb))
+ goto nla_put_failure;
+
+ if (devlink_dpipe_actions_put(table, skb))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, table_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, table_attr);
+ return -EMSGSIZE;
+}
+
+static int devlink_dpipe_send_and_alloc_skb(struct sk_buff **pskb,
+ struct genl_info *info)
+{
+ int err;
+
+ if (*pskb) {
+ err = genlmsg_reply(*pskb, info);
+ if (err)
+ return err;
+ }
+ *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!*pskb)
+ return -ENOMEM;
+ return 0;
+}
+
+static int devlink_dpipe_tables_fill(struct genl_info *info,
+ enum devlink_command cmd, int flags,
+ struct list_head *dpipe_tables,
+ const char *table_name)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_dpipe_table *table;
+ struct nlattr *tables_attr;
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ bool incomplete;
+ void *hdr;
+ int i;
+ int err;
+
+ table = list_first_entry(dpipe_tables,
+ struct devlink_dpipe_table, list);
+start_again:
+ err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+ if (err)
+ return err;
+
+ hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+ &devlink_nl_family, NLM_F_MULTI, cmd);
+ if (!hdr) {
+ nlmsg_free(skb);
+ return -EMSGSIZE;
+ }
+
+ if (devlink_nl_put_handle(skb, devlink))
+ goto nla_put_failure;
+ tables_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLES);
+ if (!tables_attr)
+ goto nla_put_failure;
+
+ i = 0;
+ incomplete = false;
+ list_for_each_entry_from(table, dpipe_tables, list) {
+ if (!table_name) {
+ err = devlink_dpipe_table_put(skb, table);
+ if (err) {
+ if (!i)
+ goto err_table_put;
+ incomplete = true;
+ break;
+ }
+ } else {
+ if (!strcmp(table->name, table_name)) {
+ err = devlink_dpipe_table_put(skb, table);
+ if (err)
+ break;
+ }
+ }
+ i++;
+ }
+
+ nla_nest_end(skb, tables_attr);
+ genlmsg_end(skb, hdr);
+ if (incomplete)
+ goto start_again;
+
+send_done:
+ nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+ NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+ if (err)
+ return err;
+ goto send_done;
+ }
+
+ return genlmsg_reply(skb, info);
+
+nla_put_failure:
+ err = -EMSGSIZE;
+err_table_put:
+ nlmsg_free(skb);
+ return err;
+}
+
+int devlink_nl_cmd_dpipe_table_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ const char *table_name = NULL;
+
+ if (info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME])
+ table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
+
+ return devlink_dpipe_tables_fill(info, DEVLINK_CMD_DPIPE_TABLE_GET, 0,
+ &devlink->dpipe_table_list,
+ table_name);
+}
+
+static int devlink_dpipe_value_put(struct sk_buff *skb,
+ struct devlink_dpipe_value *value)
+{
+ if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE,
+ value->value_size, value->value))
+ return -EMSGSIZE;
+ if (value->mask)
+ if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE_MASK,
+ value->value_size, value->mask))
+ return -EMSGSIZE;
+ if (value->mapping_valid)
+ if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_VALUE_MAPPING,
+ value->mapping_value))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int devlink_dpipe_action_value_put(struct sk_buff *skb,
+ struct devlink_dpipe_value *value)
+{
+ if (!value->action)
+ return -EINVAL;
+ if (devlink_dpipe_action_put(skb, value->action))
+ return -EMSGSIZE;
+ if (devlink_dpipe_value_put(skb, value))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int devlink_dpipe_action_values_put(struct sk_buff *skb,
+ struct devlink_dpipe_value *values,
+ unsigned int values_count)
+{
+ struct nlattr *action_attr;
+ int i;
+ int err;
+
+ for (i = 0; i < values_count; i++) {
+ action_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_ACTION_VALUE);
+ if (!action_attr)
+ return -EMSGSIZE;
+ err = devlink_dpipe_action_value_put(skb, &values[i]);
+ if (err)
+ goto err_action_value_put;
+ nla_nest_end(skb, action_attr);
+ }
+ return 0;
+
+err_action_value_put:
+ nla_nest_cancel(skb, action_attr);
+ return err;
+}
+
+static int devlink_dpipe_match_value_put(struct sk_buff *skb,
+ struct devlink_dpipe_value *value)
+{
+ if (!value->match)
+ return -EINVAL;
+ if (devlink_dpipe_match_put(skb, value->match))
+ return -EMSGSIZE;
+ if (devlink_dpipe_value_put(skb, value))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int devlink_dpipe_match_values_put(struct sk_buff *skb,
+ struct devlink_dpipe_value *values,
+ unsigned int values_count)
+{
+ struct nlattr *match_attr;
+ int i;
+ int err;
+
+ for (i = 0; i < values_count; i++) {
+ match_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_MATCH_VALUE);
+ if (!match_attr)
+ return -EMSGSIZE;
+ err = devlink_dpipe_match_value_put(skb, &values[i]);
+ if (err)
+ goto err_match_value_put;
+ nla_nest_end(skb, match_attr);
+ }
+ return 0;
+
+err_match_value_put:
+ nla_nest_cancel(skb, match_attr);
+ return err;
+}
+
+static int devlink_dpipe_entry_put(struct sk_buff *skb,
+ struct devlink_dpipe_entry *entry)
+{
+ struct nlattr *entry_attr, *matches_attr, *actions_attr;
+ int err;
+
+ entry_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ENTRY);
+ if (!entry_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_INDEX, entry->index,
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+ if (entry->counter_valid)
+ if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_COUNTER,
+ entry->counter, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ matches_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES);
+ if (!matches_attr)
+ goto nla_put_failure;
+
+ err = devlink_dpipe_match_values_put(skb, entry->match_values,
+ entry->match_values_count);
+ if (err) {
+ nla_nest_cancel(skb, matches_attr);
+ goto err_match_values_put;
+ }
+ nla_nest_end(skb, matches_attr);
+
+ actions_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES);
+ if (!actions_attr)
+ goto nla_put_failure;
+
+ err = devlink_dpipe_action_values_put(skb, entry->action_values,
+ entry->action_values_count);
+ if (err) {
+ nla_nest_cancel(skb, actions_attr);
+ goto err_action_values_put;
+ }
+ nla_nest_end(skb, actions_attr);
+
+ nla_nest_end(skb, entry_attr);
+ return 0;
+
+nla_put_failure:
+ err = -EMSGSIZE;
+err_match_values_put:
+err_action_values_put:
+ nla_nest_cancel(skb, entry_attr);
+ return err;
+}
+
+static struct devlink_dpipe_table *
+devlink_dpipe_table_find(struct list_head *dpipe_tables,
+ const char *table_name, struct devlink *devlink)
+{
+ struct devlink_dpipe_table *table;
+
+ list_for_each_entry_rcu(table, dpipe_tables, list,
+ lockdep_is_held(&devlink->lock)) {
+ if (!strcmp(table->name, table_name))
+ return table;
+ }
+ return NULL;
+}
+
+int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ struct devlink *devlink;
+ int err;
+
+ err = devlink_dpipe_send_and_alloc_skb(&dump_ctx->skb,
+ dump_ctx->info);
+ if (err)
+ return err;
+
+ dump_ctx->hdr = genlmsg_put(dump_ctx->skb,
+ dump_ctx->info->snd_portid,
+ dump_ctx->info->snd_seq,
+ &devlink_nl_family, NLM_F_MULTI,
+ dump_ctx->cmd);
+ if (!dump_ctx->hdr)
+ goto nla_put_failure;
+
+ devlink = dump_ctx->info->user_ptr[0];
+ if (devlink_nl_put_handle(dump_ctx->skb, devlink))
+ goto nla_put_failure;
+ dump_ctx->nest = nla_nest_start_noflag(dump_ctx->skb,
+ DEVLINK_ATTR_DPIPE_ENTRIES);
+ if (!dump_ctx->nest)
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ nlmsg_free(dump_ctx->skb);
+ return -EMSGSIZE;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_prepare);
+
+int devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
+ struct devlink_dpipe_entry *entry)
+{
+ return devlink_dpipe_entry_put(dump_ctx->skb, entry);
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_append);
+
+int devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ nla_nest_end(dump_ctx->skb, dump_ctx->nest);
+ genlmsg_end(dump_ctx->skb, dump_ctx->hdr);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_close);
+
+void devlink_dpipe_entry_clear(struct devlink_dpipe_entry *entry)
+
+{
+ unsigned int value_count, value_index;
+ struct devlink_dpipe_value *value;
+
+ value = entry->action_values;
+ value_count = entry->action_values_count;
+ for (value_index = 0; value_index < value_count; value_index++) {
+ kfree(value[value_index].value);
+ kfree(value[value_index].mask);
+ }
+
+ value = entry->match_values;
+ value_count = entry->match_values_count;
+ for (value_index = 0; value_index < value_count; value_index++) {
+ kfree(value[value_index].value);
+ kfree(value[value_index].mask);
+ }
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_entry_clear);
+
+static int devlink_dpipe_entries_fill(struct genl_info *info,
+ enum devlink_command cmd, int flags,
+ struct devlink_dpipe_table *table)
+{
+ struct devlink_dpipe_dump_ctx dump_ctx;
+ struct nlmsghdr *nlh;
+ int err;
+
+ dump_ctx.skb = NULL;
+ dump_ctx.cmd = cmd;
+ dump_ctx.info = info;
+
+ err = table->table_ops->entries_dump(table->priv,
+ table->counters_enabled,
+ &dump_ctx);
+ if (err)
+ return err;
+
+send_done:
+ nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
+ NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
+ if (err)
+ return err;
+ goto send_done;
+ }
+ return genlmsg_reply(dump_ctx.skb, info);
+}
+
+int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_dpipe_table *table;
+ const char *table_name;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_DPIPE_TABLE_NAME))
+ return -EINVAL;
+
+ table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
+ table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+ table_name, devlink);
+ if (!table)
+ return -EINVAL;
+
+ if (!table->table_ops->entries_dump)
+ return -EINVAL;
+
+ return devlink_dpipe_entries_fill(info, DEVLINK_CMD_DPIPE_ENTRIES_GET,
+ 0, table);
+}
+
+static int devlink_dpipe_fields_put(struct sk_buff *skb,
+ const struct devlink_dpipe_header *header)
+{
+ struct devlink_dpipe_field *field;
+ struct nlattr *field_attr;
+ int i;
+
+ for (i = 0; i < header->fields_count; i++) {
+ field = &header->fields[i];
+ field_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_FIELD);
+ if (!field_attr)
+ return -EMSGSIZE;
+ if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_FIELD_NAME, field->name) ||
+ nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
+ nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH, field->bitwidth) ||
+ nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE, field->mapping_type))
+ goto nla_put_failure;
+ nla_nest_end(skb, field_attr);
+ }
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, field_attr);
+ return -EMSGSIZE;
+}
+
+static int devlink_dpipe_header_put(struct sk_buff *skb,
+ struct devlink_dpipe_header *header)
+{
+ struct nlattr *fields_attr, *header_attr;
+ int err;
+
+ header_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADER);
+ if (!header_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_HEADER_NAME, header->name) ||
+ nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
+ nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
+ goto nla_put_failure;
+
+ fields_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_DPIPE_HEADER_FIELDS);
+ if (!fields_attr)
+ goto nla_put_failure;
+
+ err = devlink_dpipe_fields_put(skb, header);
+ if (err) {
+ nla_nest_cancel(skb, fields_attr);
+ goto nla_put_failure;
+ }
+ nla_nest_end(skb, fields_attr);
+ nla_nest_end(skb, header_attr);
+ return 0;
+
+nla_put_failure:
+ err = -EMSGSIZE;
+ nla_nest_cancel(skb, header_attr);
+ return err;
+}
+
+static int devlink_dpipe_headers_fill(struct genl_info *info,
+ enum devlink_command cmd, int flags,
+ struct devlink_dpipe_headers *
+ dpipe_headers)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct nlattr *headers_attr;
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ void *hdr;
+ int i, j;
+ int err;
+
+ i = 0;
+start_again:
+ err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+ if (err)
+ return err;
+
+ hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+ &devlink_nl_family, NLM_F_MULTI, cmd);
+ if (!hdr) {
+ nlmsg_free(skb);
+ return -EMSGSIZE;
+ }
+
+ if (devlink_nl_put_handle(skb, devlink))
+ goto nla_put_failure;
+ headers_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADERS);
+ if (!headers_attr)
+ goto nla_put_failure;
+
+ j = 0;
+ for (; i < dpipe_headers->headers_count; i++) {
+ err = devlink_dpipe_header_put(skb, dpipe_headers->headers[i]);
+ if (err) {
+ if (!j)
+ goto err_table_put;
+ break;
+ }
+ j++;
+ }
+ nla_nest_end(skb, headers_attr);
+ genlmsg_end(skb, hdr);
+ if (i != dpipe_headers->headers_count)
+ goto start_again;
+
+send_done:
+ nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+ NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = devlink_dpipe_send_and_alloc_skb(&skb, info);
+ if (err)
+ return err;
+ goto send_done;
+ }
+ return genlmsg_reply(skb, info);
+
+nla_put_failure:
+ err = -EMSGSIZE;
+err_table_put:
+ nlmsg_free(skb);
+ return err;
+}
+
+int devlink_nl_cmd_dpipe_headers_get(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+
+ if (!devlink->dpipe_headers)
+ return -EOPNOTSUPP;
+ return devlink_dpipe_headers_fill(info, DEVLINK_CMD_DPIPE_HEADERS_GET,
+ 0, devlink->dpipe_headers);
+}
+
+static int devlink_dpipe_table_counters_set(struct devlink *devlink,
+ const char *table_name,
+ bool enable)
+{
+ struct devlink_dpipe_table *table;
+
+ table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+ table_name, devlink);
+ if (!table)
+ return -EINVAL;
+
+ if (table->counter_control_extern)
+ return -EOPNOTSUPP;
+
+ if (!(table->counters_enabled ^ enable))
+ return 0;
+
+ table->counters_enabled = enable;
+ if (table->table_ops->counters_set_update)
+ table->table_ops->counters_set_update(table->priv, enable);
+ return 0;
+}
+
+int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ const char *table_name;
+ bool counters_enable;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_DPIPE_TABLE_NAME) ||
+ GENL_REQ_ATTR_CHECK(info,
+ DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED))
+ return -EINVAL;
+
+ table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
+ counters_enable = !!nla_get_u8(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED]);
+
+ return devlink_dpipe_table_counters_set(devlink, table_name,
+ counters_enable);
+}
+
+/**
+ * devl_dpipe_headers_register - register dpipe headers
+ *
+ * @devlink: devlink
+ * @dpipe_headers: dpipe header array
+ *
+ * Register the headers supported by hardware.
+ */
+void devl_dpipe_headers_register(struct devlink *devlink,
+ struct devlink_dpipe_headers *dpipe_headers)
+{
+ lockdep_assert_held(&devlink->lock);
+
+ devlink->dpipe_headers = dpipe_headers;
+}
+EXPORT_SYMBOL_GPL(devl_dpipe_headers_register);
+
+/**
+ * devl_dpipe_headers_unregister - unregister dpipe headers
+ *
+ * @devlink: devlink
+ *
+ * Unregister the headers supported by hardware.
+ */
+void devl_dpipe_headers_unregister(struct devlink *devlink)
+{
+ lockdep_assert_held(&devlink->lock);
+
+ devlink->dpipe_headers = NULL;
+}
+EXPORT_SYMBOL_GPL(devl_dpipe_headers_unregister);
+
+/**
+ * devlink_dpipe_table_counter_enabled - check if counter allocation
+ * required
+ * @devlink: devlink
+ * @table_name: tables name
+ *
+ * Used by driver to check if counter allocation is required.
+ * After counter allocation is turned on the table entries
+ * are updated to include counter statistics.
+ *
+ * After that point on the driver must respect the counter
+ * state so that each entry added to the table is added
+ * with a counter.
+ */
+bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
+ const char *table_name)
+{
+ struct devlink_dpipe_table *table;
+ bool enabled;
+
+ rcu_read_lock();
+ table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+ table_name, devlink);
+ enabled = false;
+ if (table)
+ enabled = table->counters_enabled;
+ rcu_read_unlock();
+ return enabled;
+}
+EXPORT_SYMBOL_GPL(devlink_dpipe_table_counter_enabled);
+
+/**
+ * devl_dpipe_table_register - register dpipe table
+ *
+ * @devlink: devlink
+ * @table_name: table name
+ * @table_ops: table ops
+ * @priv: priv
+ * @counter_control_extern: external control for counters
+ */
+int devl_dpipe_table_register(struct devlink *devlink,
+ const char *table_name,
+ struct devlink_dpipe_table_ops *table_ops,
+ void *priv, bool counter_control_extern)
+{
+ struct devlink_dpipe_table *table;
+
+ lockdep_assert_held(&devlink->lock);
+
+ if (WARN_ON(!table_ops->size_get))
+ return -EINVAL;
+
+ if (devlink_dpipe_table_find(&devlink->dpipe_table_list, table_name,
+ devlink))
+ return -EEXIST;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ table->name = table_name;
+ table->table_ops = table_ops;
+ table->priv = priv;
+ table->counter_control_extern = counter_control_extern;
+
+ list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devl_dpipe_table_register);
+
+/**
+ * devl_dpipe_table_unregister - unregister dpipe table
+ *
+ * @devlink: devlink
+ * @table_name: table name
+ */
+void devl_dpipe_table_unregister(struct devlink *devlink,
+ const char *table_name)
+{
+ struct devlink_dpipe_table *table;
+
+ lockdep_assert_held(&devlink->lock);
+
+ table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+ table_name, devlink);
+ if (!table)
+ return;
+ list_del_rcu(&table->list);
+ kfree_rcu(table, rcu);
+}
+EXPORT_SYMBOL_GPL(devl_dpipe_table_unregister);
+
+/**
+ * devl_dpipe_table_resource_set - set the resource id
+ *
+ * @devlink: devlink
+ * @table_name: table name
+ * @resource_id: resource id
+ * @resource_units: number of resource's units consumed per table's entry
+ */
+int devl_dpipe_table_resource_set(struct devlink *devlink,
+ const char *table_name, u64 resource_id,
+ u64 resource_units)
+{
+ struct devlink_dpipe_table *table;
+
+ table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
+ table_name, devlink);
+ if (!table)
+ return -EINVAL;
+
+ table->resource_id = resource_id;
+ table->resource_units = resource_units;
+ table->resource_valid = true;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devl_dpipe_table_resource_set);
diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c
deleted file mode 100644
index e2cd13958cc2..000000000000
--- a/net/devlink/leftover.c
+++ /dev/null
@@ -1,9427 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * net/core/devlink.c - Network physical/parent device Netlink interface
- *
- * Heavily inspired by net/wireless/
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
- */
-
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/gfp.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/refcount.h>
-#include <linux/workqueue.h>
-#include <linux/u64_stats_sync.h>
-#include <linux/timekeeping.h>
-#include <rdma/ib_verbs.h>
-#include <net/netlink.h>
-#include <net/genetlink.h>
-#include <net/rtnetlink.h>
-#include <net/net_namespace.h>
-#include <net/sock.h>
-#include <net/devlink.h>
-#define CREATE_TRACE_POINTS
-#include <trace/events/devlink.h>
-
-#include "devl_internal.h"
-
-struct devlink_linecard {
- struct list_head list;
- struct devlink *devlink;
- unsigned int index;
- const struct devlink_linecard_ops *ops;
- void *priv;
- enum devlink_linecard_state state;
- struct mutex state_lock; /* Protects state */
- const char *type;
- struct devlink_linecard_type *types;
- unsigned int types_count;
- struct devlink *nested_devlink;
-};
-
-/**
- * struct devlink_resource - devlink resource
- * @name: name of the resource
- * @id: id, per devlink instance
- * @size: size of the resource
- * @size_new: updated size of the resource, reload is needed
- * @size_valid: valid in case the total size of the resource is valid
- * including its children
- * @parent: parent resource
- * @size_params: size parameters
- * @list: parent list
- * @resource_list: list of child resources
- * @occ_get: occupancy getter callback
- * @occ_get_priv: occupancy getter callback priv
- */
-struct devlink_resource {
- const char *name;
- u64 id;
- u64 size;
- u64 size_new;
- bool size_valid;
- struct devlink_resource *parent;
- struct devlink_resource_size_params size_params;
- struct list_head list;
- struct list_head resource_list;
- devlink_resource_occ_get_t *occ_get;
- void *occ_get_priv;
-};
-
-static struct devlink_dpipe_field devlink_dpipe_fields_ethernet[] = {
- {
- .name = "destination mac",
- .id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC,
- .bitwidth = 48,
- },
-};
-
-struct devlink_dpipe_header devlink_dpipe_header_ethernet = {
- .name = "ethernet",
- .id = DEVLINK_DPIPE_HEADER_ETHERNET,
- .fields = devlink_dpipe_fields_ethernet,
- .fields_count = ARRAY_SIZE(devlink_dpipe_fields_ethernet),
- .global = true,
-};
-EXPORT_SYMBOL_GPL(devlink_dpipe_header_ethernet);
-
-static struct devlink_dpipe_field devlink_dpipe_fields_ipv4[] = {
- {
- .name = "destination ip",
- .id = DEVLINK_DPIPE_FIELD_IPV4_DST_IP,
- .bitwidth = 32,
- },
-};
-
-struct devlink_dpipe_header devlink_dpipe_header_ipv4 = {
- .name = "ipv4",
- .id = DEVLINK_DPIPE_HEADER_IPV4,
- .fields = devlink_dpipe_fields_ipv4,
- .fields_count = ARRAY_SIZE(devlink_dpipe_fields_ipv4),
- .global = true,
-};
-EXPORT_SYMBOL_GPL(devlink_dpipe_header_ipv4);
-
-static struct devlink_dpipe_field devlink_dpipe_fields_ipv6[] = {
- {
- .name = "destination ip",
- .id = DEVLINK_DPIPE_FIELD_IPV6_DST_IP,
- .bitwidth = 128,
- },
-};
-
-struct devlink_dpipe_header devlink_dpipe_header_ipv6 = {
- .name = "ipv6",
- .id = DEVLINK_DPIPE_HEADER_IPV6,
- .fields = devlink_dpipe_fields_ipv6,
- .fields_count = ARRAY_SIZE(devlink_dpipe_fields_ipv6),
- .global = true,
-};
-EXPORT_SYMBOL_GPL(devlink_dpipe_header_ipv6);
-
-EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
-EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
-EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report);
-
-#define DEVLINK_PORT_FN_CAPS_VALID_MASK \
- (_BITUL(__DEVLINK_PORT_FN_ATTR_CAPS_MAX) - 1)
-
-static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1] = {
- [DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY },
- [DEVLINK_PORT_FN_ATTR_STATE] =
- NLA_POLICY_RANGE(NLA_U8, DEVLINK_PORT_FN_STATE_INACTIVE,
- DEVLINK_PORT_FN_STATE_ACTIVE),
- [DEVLINK_PORT_FN_ATTR_CAPS] =
- NLA_POLICY_BITFIELD32(DEVLINK_PORT_FN_CAPS_VALID_MASK),
-};
-
-#define ASSERT_DEVLINK_PORT_REGISTERED(devlink_port) \
- WARN_ON_ONCE(!(devlink_port)->registered)
-#define ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port) \
- WARN_ON_ONCE((devlink_port)->registered)
-#define ASSERT_DEVLINK_PORT_INITIALIZED(devlink_port) \
- WARN_ON_ONCE(!(devlink_port)->initialized)
-
-static struct devlink_port *devlink_port_get_by_index(struct devlink *devlink,
- unsigned int port_index)
-{
- return xa_load(&devlink->ports, port_index);
-}
-
-struct devlink_port *devlink_port_get_from_attrs(struct devlink *devlink,
- struct nlattr **attrs)
-{
- if (attrs[DEVLINK_ATTR_PORT_INDEX]) {
- u32 port_index = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]);
- struct devlink_port *devlink_port;
-
- devlink_port = devlink_port_get_by_index(devlink, port_index);
- if (!devlink_port)
- return ERR_PTR(-ENODEV);
- return devlink_port;
- }
- return ERR_PTR(-EINVAL);
-}
-
-struct devlink_port *devlink_port_get_from_info(struct devlink *devlink,
- struct genl_info *info)
-{
- return devlink_port_get_from_attrs(devlink, info->attrs);
-}
-
-static inline bool
-devlink_rate_is_leaf(struct devlink_rate *devlink_rate)
-{
- return devlink_rate->type == DEVLINK_RATE_TYPE_LEAF;
-}
-
-static inline bool
-devlink_rate_is_node(struct devlink_rate *devlink_rate)
-{
- return devlink_rate->type == DEVLINK_RATE_TYPE_NODE;
-}
-
-static struct devlink_rate *
-devlink_rate_leaf_get_from_info(struct devlink *devlink, struct genl_info *info)
-{
- struct devlink_rate *devlink_rate;
- struct devlink_port *devlink_port;
-
- devlink_port = devlink_port_get_from_attrs(devlink, info->attrs);
- if (IS_ERR(devlink_port))
- return ERR_CAST(devlink_port);
- devlink_rate = devlink_port->devlink_rate;
- return devlink_rate ?: ERR_PTR(-ENODEV);
-}
-
-static struct devlink_rate *
-devlink_rate_node_get_by_name(struct devlink *devlink, const char *node_name)
-{
- static struct devlink_rate *devlink_rate;
-
- list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
- if (devlink_rate_is_node(devlink_rate) &&
- !strcmp(node_name, devlink_rate->name))
- return devlink_rate;
- }
- return ERR_PTR(-ENODEV);
-}
-
-static struct devlink_rate *
-devlink_rate_node_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
-{
- const char *rate_node_name;
- size_t len;
-
- if (!attrs[DEVLINK_ATTR_RATE_NODE_NAME])
- return ERR_PTR(-EINVAL);
- rate_node_name = nla_data(attrs[DEVLINK_ATTR_RATE_NODE_NAME]);
- len = strlen(rate_node_name);
- /* Name cannot be empty or decimal number */
- if (!len || strspn(rate_node_name, "0123456789") == len)
- return ERR_PTR(-EINVAL);
-
- return devlink_rate_node_get_by_name(devlink, rate_node_name);
-}
-
-static struct devlink_rate *
-devlink_rate_node_get_from_info(struct devlink *devlink, struct genl_info *info)
-{
- return devlink_rate_node_get_from_attrs(devlink, info->attrs);
-}
-
-static struct devlink_rate *
-devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info)
-{
- struct nlattr **attrs = info->attrs;
-
- if (attrs[DEVLINK_ATTR_PORT_INDEX])
- return devlink_rate_leaf_get_from_info(devlink, info);
- else if (attrs[DEVLINK_ATTR_RATE_NODE_NAME])
- return devlink_rate_node_get_from_info(devlink, info);
- else
- return ERR_PTR(-EINVAL);
-}
-
-static struct devlink_linecard *
-devlink_linecard_get_by_index(struct devlink *devlink,
- unsigned int linecard_index)
-{
- struct devlink_linecard *devlink_linecard;
-
- list_for_each_entry(devlink_linecard, &devlink->linecard_list, list) {
- if (devlink_linecard->index == linecard_index)
- return devlink_linecard;
- }
- return NULL;
-}
-
-static bool devlink_linecard_index_exists(struct devlink *devlink,
- unsigned int linecard_index)
-{
- return devlink_linecard_get_by_index(devlink, linecard_index);
-}
-
-static struct devlink_linecard *
-devlink_linecard_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
-{
- if (attrs[DEVLINK_ATTR_LINECARD_INDEX]) {
- u32 linecard_index = nla_get_u32(attrs[DEVLINK_ATTR_LINECARD_INDEX]);
- struct devlink_linecard *linecard;
-
- linecard = devlink_linecard_get_by_index(devlink, linecard_index);
- if (!linecard)
- return ERR_PTR(-ENODEV);
- return linecard;
- }
- return ERR_PTR(-EINVAL);
-}
-
-static struct devlink_linecard *
-devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info)
-{
- return devlink_linecard_get_from_attrs(devlink, info->attrs);
-}
-
-struct devlink_sb {
- struct list_head list;
- unsigned int index;
- u32 size;
- u16 ingress_pools_count;
- u16 egress_pools_count;
- u16 ingress_tc_count;
- u16 egress_tc_count;
-};
-
-static u16 devlink_sb_pool_count(struct devlink_sb *devlink_sb)
-{
- return devlink_sb->ingress_pools_count + devlink_sb->egress_pools_count;
-}
-
-static struct devlink_sb *devlink_sb_get_by_index(struct devlink *devlink,
- unsigned int sb_index)
-{
- struct devlink_sb *devlink_sb;
-
- list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
- if (devlink_sb->index == sb_index)
- return devlink_sb;
- }
- return NULL;
-}
-
-static bool devlink_sb_index_exists(struct devlink *devlink,
- unsigned int sb_index)
-{
- return devlink_sb_get_by_index(devlink, sb_index);
-}
-
-static struct devlink_sb *devlink_sb_get_from_attrs(struct devlink *devlink,
- struct nlattr **attrs)
-{
- if (attrs[DEVLINK_ATTR_SB_INDEX]) {
- u32 sb_index = nla_get_u32(attrs[DEVLINK_ATTR_SB_INDEX]);
- struct devlink_sb *devlink_sb;
-
- devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
- if (!devlink_sb)
- return ERR_PTR(-ENODEV);
- return devlink_sb;
- }
- return ERR_PTR(-EINVAL);
-}
-
-static struct devlink_sb *devlink_sb_get_from_info(struct devlink *devlink,
- struct genl_info *info)
-{
- return devlink_sb_get_from_attrs(devlink, info->attrs);
-}
-
-static int devlink_sb_pool_index_get_from_attrs(struct devlink_sb *devlink_sb,
- struct nlattr **attrs,
- u16 *p_pool_index)
-{
- u16 val;
-
- if (!attrs[DEVLINK_ATTR_SB_POOL_INDEX])
- return -EINVAL;
-
- val = nla_get_u16(attrs[DEVLINK_ATTR_SB_POOL_INDEX]);
- if (val >= devlink_sb_pool_count(devlink_sb))
- return -EINVAL;
- *p_pool_index = val;
- return 0;
-}
-
-static int devlink_sb_pool_index_get_from_info(struct devlink_sb *devlink_sb,
- struct genl_info *info,
- u16 *p_pool_index)
-{
- return devlink_sb_pool_index_get_from_attrs(devlink_sb, info->attrs,
- p_pool_index);
-}
-
-static int
-devlink_sb_pool_type_get_from_attrs(struct nlattr **attrs,
- enum devlink_sb_pool_type *p_pool_type)
-{
- u8 val;
-
- if (!attrs[DEVLINK_ATTR_SB_POOL_TYPE])
- return -EINVAL;
-
- val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_TYPE]);
- if (val != DEVLINK_SB_POOL_TYPE_INGRESS &&
- val != DEVLINK_SB_POOL_TYPE_EGRESS)
- return -EINVAL;
- *p_pool_type = val;
- return 0;
-}
-
-static int
-devlink_sb_pool_type_get_from_info(struct genl_info *info,
- enum devlink_sb_pool_type *p_pool_type)
-{
- return devlink_sb_pool_type_get_from_attrs(info->attrs, p_pool_type);
-}
-
-static int
-devlink_sb_th_type_get_from_attrs(struct nlattr **attrs,
- enum devlink_sb_threshold_type *p_th_type)
-{
- u8 val;
-
- if (!attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE])
- return -EINVAL;
-
- val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]);
- if (val != DEVLINK_SB_THRESHOLD_TYPE_STATIC &&
- val != DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC)
- return -EINVAL;
- *p_th_type = val;
- return 0;
-}
-
-static int
-devlink_sb_th_type_get_from_info(struct genl_info *info,
- enum devlink_sb_threshold_type *p_th_type)
-{
- return devlink_sb_th_type_get_from_attrs(info->attrs, p_th_type);
-}
-
-static int
-devlink_sb_tc_index_get_from_attrs(struct devlink_sb *devlink_sb,
- struct nlattr **attrs,
- enum devlink_sb_pool_type pool_type,
- u16 *p_tc_index)
-{
- u16 val;
-
- if (!attrs[DEVLINK_ATTR_SB_TC_INDEX])
- return -EINVAL;
-
- val = nla_get_u16(attrs[DEVLINK_ATTR_SB_TC_INDEX]);
- if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS &&
- val >= devlink_sb->ingress_tc_count)
- return -EINVAL;
- if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS &&
- val >= devlink_sb->egress_tc_count)
- return -EINVAL;
- *p_tc_index = val;
- return 0;
-}
-
-static void devlink_port_fn_cap_fill(struct nla_bitfield32 *caps,
- u32 cap, bool is_enable)
-{
- caps->selector |= cap;
- if (is_enable)
- caps->value |= cap;
-}
-
-static int devlink_port_fn_roce_fill(struct devlink_port *devlink_port,
- struct nla_bitfield32 *caps,
- struct netlink_ext_ack *extack)
-{
- bool is_enable;
- int err;
-
- if (!devlink_port->ops->port_fn_roce_get)
- return 0;
-
- err = devlink_port->ops->port_fn_roce_get(devlink_port, &is_enable,
- extack);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
- }
-
- devlink_port_fn_cap_fill(caps, DEVLINK_PORT_FN_CAP_ROCE, is_enable);
- return 0;
-}
-
-static int devlink_port_fn_migratable_fill(struct devlink_port *devlink_port,
- struct nla_bitfield32 *caps,
- struct netlink_ext_ack *extack)
-{
- bool is_enable;
- int err;
-
- if (!devlink_port->ops->port_fn_migratable_get ||
- devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PCI_VF)
- return 0;
-
- err = devlink_port->ops->port_fn_migratable_get(devlink_port,
- &is_enable, extack);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
- }
-
- devlink_port_fn_cap_fill(caps, DEVLINK_PORT_FN_CAP_MIGRATABLE, is_enable);
- return 0;
-}
-
-static int devlink_port_fn_caps_fill(struct devlink_port *devlink_port,
- struct sk_buff *msg,
- struct netlink_ext_ack *extack,
- bool *msg_updated)
-{
- struct nla_bitfield32 caps = {};
- int err;
-
- err = devlink_port_fn_roce_fill(devlink_port, &caps, extack);
- if (err)
- return err;
-
- err = devlink_port_fn_migratable_fill(devlink_port, &caps, extack);
- if (err)
- return err;
-
- if (!caps.selector)
- return 0;
- err = nla_put_bitfield32(msg, DEVLINK_PORT_FN_ATTR_CAPS, caps.value,
- caps.selector);
- if (err)
- return err;
-
- *msg_updated = true;
- return 0;
-}
-
-static int
-devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
- struct genl_info *info,
- enum devlink_sb_pool_type pool_type,
- u16 *p_tc_index)
-{
- return devlink_sb_tc_index_get_from_attrs(devlink_sb, info->attrs,
- pool_type, p_tc_index);
-}
-
-struct devlink_region {
- struct devlink *devlink;
- struct devlink_port *port;
- struct list_head list;
- union {
- const struct devlink_region_ops *ops;
- const struct devlink_port_region_ops *port_ops;
- };
- struct mutex snapshot_lock; /* protects snapshot_list,
- * max_snapshots and cur_snapshots
- * consistency.
- */
- struct list_head snapshot_list;
- u32 max_snapshots;
- u32 cur_snapshots;
- u64 size;
-};
-
-struct devlink_snapshot {
- struct list_head list;
- struct devlink_region *region;
- u8 *data;
- u32 id;
-};
-
-static struct devlink_region *
-devlink_region_get_by_name(struct devlink *devlink, const char *region_name)
-{
- struct devlink_region *region;
-
- list_for_each_entry(region, &devlink->region_list, list)
- if (!strcmp(region->ops->name, region_name))
- return region;
-
- return NULL;
-}
-
-static struct devlink_region *
-devlink_port_region_get_by_name(struct devlink_port *port,
- const char *region_name)
-{
- struct devlink_region *region;
-
- list_for_each_entry(region, &port->region_list, list)
- if (!strcmp(region->ops->name, region_name))
- return region;
-
- return NULL;
-}
-
-static struct devlink_snapshot *
-devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
-{
- struct devlink_snapshot *snapshot;
-
- list_for_each_entry(snapshot, &region->snapshot_list, list)
- if (snapshot->id == id)
- return snapshot;
-
- return NULL;
-}
-
-static int devlink_nl_put_nested_handle(struct sk_buff *msg, struct devlink *devlink)
-{
- struct nlattr *nested_attr;
-
- nested_attr = nla_nest_start(msg, DEVLINK_ATTR_NESTED_DEVLINK);
- if (!nested_attr)
- return -EMSGSIZE;
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
-
- nla_nest_end(msg, nested_attr);
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(msg, nested_attr);
- return -EMSGSIZE;
-}
-
-int devlink_nl_port_handle_fill(struct sk_buff *msg, struct devlink_port *devlink_port)
-{
- if (devlink_nl_put_handle(msg, devlink_port->devlink))
- return -EMSGSIZE;
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
- return -EMSGSIZE;
- return 0;
-}
-
-size_t devlink_nl_port_handle_size(struct devlink_port *devlink_port)
-{
- struct devlink *devlink = devlink_port->devlink;
-
- return nla_total_size(strlen(devlink->dev->bus->name) + 1) /* DEVLINK_ATTR_BUS_NAME */
- + nla_total_size(strlen(dev_name(devlink->dev)) + 1) /* DEVLINK_ATTR_DEV_NAME */
- + nla_total_size(4); /* DEVLINK_ATTR_PORT_INDEX */
-}
-
-static int devlink_nl_port_attrs_put(struct sk_buff *msg,
- struct devlink_port *devlink_port)
-{
- struct devlink_port_attrs *attrs = &devlink_port->attrs;
-
- if (!devlink_port->attrs_set)
- return 0;
- if (attrs->lanes) {
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_LANES, attrs->lanes))
- return -EMSGSIZE;
- }
- if (nla_put_u8(msg, DEVLINK_ATTR_PORT_SPLITTABLE, attrs->splittable))
- return -EMSGSIZE;
- if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour))
- return -EMSGSIZE;
- switch (devlink_port->attrs.flavour) {
- case DEVLINK_PORT_FLAVOUR_PCI_PF:
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
- attrs->pci_pf.controller) ||
- nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_pf.pf))
- return -EMSGSIZE;
- if (nla_put_u8(msg, DEVLINK_ATTR_PORT_EXTERNAL, attrs->pci_pf.external))
- return -EMSGSIZE;
- break;
- case DEVLINK_PORT_FLAVOUR_PCI_VF:
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
- attrs->pci_vf.controller) ||
- nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_vf.pf) ||
- nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_VF_NUMBER, attrs->pci_vf.vf))
- return -EMSGSIZE;
- if (nla_put_u8(msg, DEVLINK_ATTR_PORT_EXTERNAL, attrs->pci_vf.external))
- return -EMSGSIZE;
- break;
- case DEVLINK_PORT_FLAVOUR_PCI_SF:
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
- attrs->pci_sf.controller) ||
- nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER,
- attrs->pci_sf.pf) ||
- nla_put_u32(msg, DEVLINK_ATTR_PORT_PCI_SF_NUMBER,
- attrs->pci_sf.sf))
- return -EMSGSIZE;
- break;
- case DEVLINK_PORT_FLAVOUR_PHYSICAL:
- case DEVLINK_PORT_FLAVOUR_CPU:
- case DEVLINK_PORT_FLAVOUR_DSA:
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
- attrs->phys.port_number))
- return -EMSGSIZE;
- if (!attrs->split)
- return 0;
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_GROUP,
- attrs->phys.port_number))
- return -EMSGSIZE;
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER,
- attrs->phys.split_subport_number))
- return -EMSGSIZE;
- break;
- default:
- break;
- }
- return 0;
-}
-
-static int devlink_port_fn_hw_addr_fill(struct devlink_port *port,
- struct sk_buff *msg,
- struct netlink_ext_ack *extack,
- bool *msg_updated)
-{
- u8 hw_addr[MAX_ADDR_LEN];
- int hw_addr_len;
- int err;
-
- if (!port->ops->port_fn_hw_addr_get)
- return 0;
-
- err = port->ops->port_fn_hw_addr_get(port, hw_addr, &hw_addr_len,
- extack);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
- }
- err = nla_put(msg, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, hw_addr_len, hw_addr);
- if (err)
- return err;
- *msg_updated = true;
- return 0;
-}
-
-static int devlink_nl_rate_fill(struct sk_buff *msg,
- struct devlink_rate *devlink_rate,
- enum devlink_command cmd, u32 portid, u32 seq,
- int flags, struct netlink_ext_ack *extack)
-{
- struct devlink *devlink = devlink_rate->devlink;
- void *hdr;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
-
- if (nla_put_u16(msg, DEVLINK_ATTR_RATE_TYPE, devlink_rate->type))
- goto nla_put_failure;
-
- if (devlink_rate_is_leaf(devlink_rate)) {
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
- devlink_rate->devlink_port->index))
- goto nla_put_failure;
- } else if (devlink_rate_is_node(devlink_rate)) {
- if (nla_put_string(msg, DEVLINK_ATTR_RATE_NODE_NAME,
- devlink_rate->name))
- goto nla_put_failure;
- }
-
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_SHARE,
- devlink_rate->tx_share, DEVLINK_ATTR_PAD))
- goto nla_put_failure;
-
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_MAX,
- devlink_rate->tx_max, DEVLINK_ATTR_PAD))
- goto nla_put_failure;
-
- if (nla_put_u32(msg, DEVLINK_ATTR_RATE_TX_PRIORITY,
- devlink_rate->tx_priority))
- goto nla_put_failure;
-
- if (nla_put_u32(msg, DEVLINK_ATTR_RATE_TX_WEIGHT,
- devlink_rate->tx_weight))
- goto nla_put_failure;
-
- if (devlink_rate->parent)
- if (nla_put_string(msg, DEVLINK_ATTR_RATE_PARENT_NODE_NAME,
- devlink_rate->parent->name))
- goto nla_put_failure;
-
- genlmsg_end(msg, hdr);
- return 0;
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-static bool
-devlink_port_fn_state_valid(enum devlink_port_fn_state state)
-{
- return state == DEVLINK_PORT_FN_STATE_INACTIVE ||
- state == DEVLINK_PORT_FN_STATE_ACTIVE;
-}
-
-static bool
-devlink_port_fn_opstate_valid(enum devlink_port_fn_opstate opstate)
-{
- return opstate == DEVLINK_PORT_FN_OPSTATE_DETACHED ||
- opstate == DEVLINK_PORT_FN_OPSTATE_ATTACHED;
-}
-
-static int devlink_port_fn_state_fill(struct devlink_port *port,
- struct sk_buff *msg,
- struct netlink_ext_ack *extack,
- bool *msg_updated)
-{
- enum devlink_port_fn_opstate opstate;
- enum devlink_port_fn_state state;
- int err;
-
- if (!port->ops->port_fn_state_get)
- return 0;
-
- err = port->ops->port_fn_state_get(port, &state, &opstate, extack);
- if (err) {
- if (err == -EOPNOTSUPP)
- return 0;
- return err;
- }
- if (!devlink_port_fn_state_valid(state)) {
- WARN_ON_ONCE(1);
- NL_SET_ERR_MSG(extack, "Invalid state read from driver");
- return -EINVAL;
- }
- if (!devlink_port_fn_opstate_valid(opstate)) {
- WARN_ON_ONCE(1);
- NL_SET_ERR_MSG(extack, "Invalid operational state read from driver");
- return -EINVAL;
- }
- if (nla_put_u8(msg, DEVLINK_PORT_FN_ATTR_STATE, state) ||
- nla_put_u8(msg, DEVLINK_PORT_FN_ATTR_OPSTATE, opstate))
- return -EMSGSIZE;
- *msg_updated = true;
- return 0;
-}
-
-static int
-devlink_port_fn_mig_set(struct devlink_port *devlink_port, bool enable,
- struct netlink_ext_ack *extack)
-{
- return devlink_port->ops->port_fn_migratable_set(devlink_port, enable,
- extack);
-}
-
-static int
-devlink_port_fn_roce_set(struct devlink_port *devlink_port, bool enable,
- struct netlink_ext_ack *extack)
-{
- return devlink_port->ops->port_fn_roce_set(devlink_port, enable,
- extack);
-}
-
-static int devlink_port_fn_caps_set(struct devlink_port *devlink_port,
- const struct nlattr *attr,
- struct netlink_ext_ack *extack)
-{
- struct nla_bitfield32 caps;
- u32 caps_value;
- int err;
-
- caps = nla_get_bitfield32(attr);
- caps_value = caps.value & caps.selector;
- if (caps.selector & DEVLINK_PORT_FN_CAP_ROCE) {
- err = devlink_port_fn_roce_set(devlink_port,
- caps_value & DEVLINK_PORT_FN_CAP_ROCE,
- extack);
- if (err)
- return err;
- }
- if (caps.selector & DEVLINK_PORT_FN_CAP_MIGRATABLE) {
- err = devlink_port_fn_mig_set(devlink_port, caps_value &
- DEVLINK_PORT_FN_CAP_MIGRATABLE,
- extack);
- if (err)
- return err;
- }
- return 0;
-}
-
-static int
-devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *port,
- struct netlink_ext_ack *extack)
-{
- struct nlattr *function_attr;
- bool msg_updated = false;
- int err;
-
- function_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PORT_FUNCTION);
- if (!function_attr)
- return -EMSGSIZE;
-
- err = devlink_port_fn_hw_addr_fill(port, msg, extack, &msg_updated);
- if (err)
- goto out;
- err = devlink_port_fn_caps_fill(port, msg, extack, &msg_updated);
- if (err)
- goto out;
- err = devlink_port_fn_state_fill(port, msg, extack, &msg_updated);
-out:
- if (err || !msg_updated)
- nla_nest_cancel(msg, function_attr);
- else
- nla_nest_end(msg, function_attr);
- return err;
-}
-
-static int devlink_nl_port_fill(struct sk_buff *msg,
- struct devlink_port *devlink_port,
- enum devlink_command cmd, u32 portid, u32 seq,
- int flags, struct netlink_ext_ack *extack)
-{
- struct devlink *devlink = devlink_port->devlink;
- void *hdr;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
- goto nla_put_failure;
-
- spin_lock_bh(&devlink_port->type_lock);
- if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type))
- goto nla_put_failure_type_locked;
- if (devlink_port->desired_type != DEVLINK_PORT_TYPE_NOTSET &&
- nla_put_u16(msg, DEVLINK_ATTR_PORT_DESIRED_TYPE,
- devlink_port->desired_type))
- goto nla_put_failure_type_locked;
- if (devlink_port->type == DEVLINK_PORT_TYPE_ETH) {
- if (devlink_port->type_eth.netdev &&
- (nla_put_u32(msg, DEVLINK_ATTR_PORT_NETDEV_IFINDEX,
- devlink_port->type_eth.ifindex) ||
- nla_put_string(msg, DEVLINK_ATTR_PORT_NETDEV_NAME,
- devlink_port->type_eth.ifname)))
- goto nla_put_failure_type_locked;
- }
- if (devlink_port->type == DEVLINK_PORT_TYPE_IB) {
- struct ib_device *ibdev = devlink_port->type_ib.ibdev;
-
- if (ibdev &&
- nla_put_string(msg, DEVLINK_ATTR_PORT_IBDEV_NAME,
- ibdev->name))
- goto nla_put_failure_type_locked;
- }
- spin_unlock_bh(&devlink_port->type_lock);
- if (devlink_nl_port_attrs_put(msg, devlink_port))
- goto nla_put_failure;
- if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack))
- goto nla_put_failure;
- if (devlink_port->linecard &&
- nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX,
- devlink_port->linecard->index))
- goto nla_put_failure;
-
- genlmsg_end(msg, hdr);
- return 0;
-
-nla_put_failure_type_locked:
- spin_unlock_bh(&devlink_port->type_lock);
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-static void devlink_port_notify(struct devlink_port *devlink_port,
- enum devlink_command cmd)
-{
- struct devlink *devlink = devlink_port->devlink;
- struct sk_buff *msg;
- int err;
-
- WARN_ON(cmd != DEVLINK_CMD_PORT_NEW && cmd != DEVLINK_CMD_PORT_DEL);
-
- if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
- return;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return;
-
- err = devlink_nl_port_fill(msg, devlink_port, cmd, 0, 0, 0, NULL);
- if (err) {
- nlmsg_free(msg);
- return;
- }
-
- genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
- 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
-}
-
-static void devlink_rate_notify(struct devlink_rate *devlink_rate,
- enum devlink_command cmd)
-{
- struct devlink *devlink = devlink_rate->devlink;
- struct sk_buff *msg;
- int err;
-
- WARN_ON(cmd != DEVLINK_CMD_RATE_NEW && cmd != DEVLINK_CMD_RATE_DEL);
-
- if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
- return;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return;
-
- err = devlink_nl_rate_fill(msg, devlink_rate, cmd, 0, 0, 0, NULL);
- if (err) {
- nlmsg_free(msg);
- return;
- }
-
- genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
- 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
-}
-
-static int
-devlink_nl_rate_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
- struct netlink_callback *cb, int flags)
-{
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct devlink_rate *devlink_rate;
- int idx = 0;
- int err = 0;
-
- list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
- enum devlink_command cmd = DEVLINK_CMD_RATE_NEW;
- u32 id = NETLINK_CB(cb->skb).portid;
-
- if (idx < state->idx) {
- idx++;
- continue;
- }
- err = devlink_nl_rate_fill(msg, devlink_rate, cmd, id,
- cb->nlh->nlmsg_seq, flags, NULL);
- if (err) {
- state->idx = idx;
- break;
- }
- idx++;
- }
-
- return err;
-}
-
-int devlink_nl_rate_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
-{
- return devlink_nl_dumpit(skb, cb, devlink_nl_rate_get_dump_one);
-}
-
-int devlink_nl_rate_get_doit(struct sk_buff *skb, struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_rate *devlink_rate;
- struct sk_buff *msg;
- int err;
-
- devlink_rate = devlink_rate_get_from_info(devlink, info);
- if (IS_ERR(devlink_rate))
- return PTR_ERR(devlink_rate);
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_rate_fill(msg, devlink_rate, DEVLINK_CMD_RATE_NEW,
- info->snd_portid, info->snd_seq, 0,
- info->extack);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static bool
-devlink_rate_is_parent_node(struct devlink_rate *devlink_rate,
- struct devlink_rate *parent)
-{
- while (parent) {
- if (parent == devlink_rate)
- return true;
- parent = parent->parent;
- }
- return false;
-}
-
-int devlink_nl_port_get_doit(struct sk_buff *skb, struct genl_info *info)
-{
- struct devlink_port *devlink_port = info->user_ptr[1];
- struct sk_buff *msg;
- int err;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_PORT_NEW,
- info->snd_portid, info->snd_seq, 0,
- info->extack);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static int
-devlink_nl_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
- struct netlink_callback *cb, int flags)
-{
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct devlink_port *devlink_port;
- unsigned long port_index;
- int err = 0;
-
- xa_for_each_start(&devlink->ports, port_index, devlink_port, state->idx) {
- err = devlink_nl_port_fill(msg, devlink_port,
- DEVLINK_CMD_NEW,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, flags,
- cb->extack);
- if (err) {
- state->idx = port_index;
- break;
- }
- }
-
- return err;
-}
-
-int devlink_nl_port_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
-{
- return devlink_nl_dumpit(skb, cb, devlink_nl_port_get_dump_one);
-}
-
-static int devlink_port_type_set(struct devlink_port *devlink_port,
- enum devlink_port_type port_type)
-
-{
- int err;
-
- if (!devlink_port->ops->port_type_set)
- return -EOPNOTSUPP;
-
- if (port_type == devlink_port->type)
- return 0;
-
- err = devlink_port->ops->port_type_set(devlink_port, port_type);
- if (err)
- return err;
-
- devlink_port->desired_type = port_type;
- devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
- return 0;
-}
-
-static int devlink_port_function_hw_addr_set(struct devlink_port *port,
- const struct nlattr *attr,
- struct netlink_ext_ack *extack)
-{
- const u8 *hw_addr;
- int hw_addr_len;
-
- hw_addr = nla_data(attr);
- hw_addr_len = nla_len(attr);
- if (hw_addr_len > MAX_ADDR_LEN) {
- NL_SET_ERR_MSG(extack, "Port function hardware address too long");
- return -EINVAL;
- }
- if (port->type == DEVLINK_PORT_TYPE_ETH) {
- if (hw_addr_len != ETH_ALEN) {
- NL_SET_ERR_MSG(extack, "Address must be 6 bytes for Ethernet device");
- return -EINVAL;
- }
- if (!is_unicast_ether_addr(hw_addr)) {
- NL_SET_ERR_MSG(extack, "Non-unicast hardware address unsupported");
- return -EINVAL;
- }
- }
-
- return port->ops->port_fn_hw_addr_set(port, hw_addr, hw_addr_len,
- extack);
-}
-
-static int devlink_port_fn_state_set(struct devlink_port *port,
- const struct nlattr *attr,
- struct netlink_ext_ack *extack)
-{
- enum devlink_port_fn_state state;
-
- state = nla_get_u8(attr);
- return port->ops->port_fn_state_set(port, state, extack);
-}
-
-static int devlink_port_function_validate(struct devlink_port *devlink_port,
- struct nlattr **tb,
- struct netlink_ext_ack *extack)
-{
- const struct devlink_port_ops *ops = devlink_port->ops;
- struct nlattr *attr;
-
- if (tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] &&
- !ops->port_fn_hw_addr_set) {
- NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR],
- "Port doesn't support function attributes");
- return -EOPNOTSUPP;
- }
- if (tb[DEVLINK_PORT_FN_ATTR_STATE] && !ops->port_fn_state_set) {
- NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR],
- "Function does not support state setting");
- return -EOPNOTSUPP;
- }
- attr = tb[DEVLINK_PORT_FN_ATTR_CAPS];
- if (attr) {
- struct nla_bitfield32 caps;
-
- caps = nla_get_bitfield32(attr);
- if (caps.selector & DEVLINK_PORT_FN_CAP_ROCE &&
- !ops->port_fn_roce_set) {
- NL_SET_ERR_MSG_ATTR(extack, attr,
- "Port doesn't support RoCE function attribute");
- return -EOPNOTSUPP;
- }
- if (caps.selector & DEVLINK_PORT_FN_CAP_MIGRATABLE) {
- if (!ops->port_fn_migratable_set) {
- NL_SET_ERR_MSG_ATTR(extack, attr,
- "Port doesn't support migratable function attribute");
- return -EOPNOTSUPP;
- }
- if (devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PCI_VF) {
- NL_SET_ERR_MSG_ATTR(extack, attr,
- "migratable function attribute supported for VFs only");
- return -EOPNOTSUPP;
- }
- }
- }
- return 0;
-}
-
-static int devlink_port_function_set(struct devlink_port *port,
- const struct nlattr *attr,
- struct netlink_ext_ack *extack)
-{
- struct nlattr *tb[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1];
- int err;
-
- err = nla_parse_nested(tb, DEVLINK_PORT_FUNCTION_ATTR_MAX, attr,
- devlink_function_nl_policy, extack);
- if (err < 0) {
- NL_SET_ERR_MSG(extack, "Fail to parse port function attributes");
- return err;
- }
-
- err = devlink_port_function_validate(port, tb, extack);
- if (err)
- return err;
-
- attr = tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR];
- if (attr) {
- err = devlink_port_function_hw_addr_set(port, attr, extack);
- if (err)
- return err;
- }
-
- attr = tb[DEVLINK_PORT_FN_ATTR_CAPS];
- if (attr) {
- err = devlink_port_fn_caps_set(port, attr, extack);
- if (err)
- return err;
- }
-
- /* Keep this as the last function attribute set, so that when
- * multiple port function attributes are set along with state,
- * Those can be applied first before activating the state.
- */
- attr = tb[DEVLINK_PORT_FN_ATTR_STATE];
- if (attr)
- err = devlink_port_fn_state_set(port, attr, extack);
-
- if (!err)
- devlink_port_notify(port, DEVLINK_CMD_PORT_NEW);
- return err;
-}
-
-static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink_port *devlink_port = info->user_ptr[1];
- int err;
-
- if (info->attrs[DEVLINK_ATTR_PORT_TYPE]) {
- enum devlink_port_type port_type;
-
- port_type = nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_TYPE]);
- err = devlink_port_type_set(devlink_port, port_type);
- if (err)
- return err;
- }
-
- if (info->attrs[DEVLINK_ATTR_PORT_FUNCTION]) {
- struct nlattr *attr = info->attrs[DEVLINK_ATTR_PORT_FUNCTION];
- struct netlink_ext_ack *extack = info->extack;
-
- err = devlink_port_function_set(devlink_port, attr, extack);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink_port *devlink_port = info->user_ptr[1];
- struct devlink *devlink = info->user_ptr[0];
- u32 count;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_SPLIT_COUNT))
- return -EINVAL;
- if (!devlink_port->ops->port_split)
- return -EOPNOTSUPP;
-
- count = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]);
-
- if (!devlink_port->attrs.splittable) {
- /* Split ports cannot be split. */
- if (devlink_port->attrs.split)
- NL_SET_ERR_MSG(info->extack, "Port cannot be split further");
- else
- NL_SET_ERR_MSG(info->extack, "Port cannot be split");
- return -EINVAL;
- }
-
- if (count < 2 || !is_power_of_2(count) || count > devlink_port->attrs.lanes) {
- NL_SET_ERR_MSG(info->extack, "Invalid split count");
- return -EINVAL;
- }
-
- return devlink_port->ops->port_split(devlink, devlink_port, count,
- info->extack);
-}
-
-static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink_port *devlink_port = info->user_ptr[1];
- struct devlink *devlink = info->user_ptr[0];
-
- if (!devlink_port->ops->port_unsplit)
- return -EOPNOTSUPP;
- return devlink_port->ops->port_unsplit(devlink, devlink_port, info->extack);
-}
-
-static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct netlink_ext_ack *extack = info->extack;
- struct devlink_port_new_attrs new_attrs = {};
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_port *devlink_port;
- struct sk_buff *msg;
- int err;
-
- if (!devlink->ops->port_new)
- return -EOPNOTSUPP;
-
- if (!info->attrs[DEVLINK_ATTR_PORT_FLAVOUR] ||
- !info->attrs[DEVLINK_ATTR_PORT_PCI_PF_NUMBER]) {
- NL_SET_ERR_MSG(extack, "Port flavour or PCI PF are not specified");
- return -EINVAL;
- }
- new_attrs.flavour = nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_FLAVOUR]);
- new_attrs.pfnum =
- nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_PCI_PF_NUMBER]);
-
- if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
- /* Port index of the new port being created by driver. */
- new_attrs.port_index =
- nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
- new_attrs.port_index_valid = true;
- }
- if (info->attrs[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER]) {
- new_attrs.controller =
- nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER]);
- new_attrs.controller_valid = true;
- }
- if (new_attrs.flavour == DEVLINK_PORT_FLAVOUR_PCI_SF &&
- info->attrs[DEVLINK_ATTR_PORT_PCI_SF_NUMBER]) {
- new_attrs.sfnum = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_PCI_SF_NUMBER]);
- new_attrs.sfnum_valid = true;
- }
-
- err = devlink->ops->port_new(devlink, &new_attrs,
- extack, &devlink_port);
- if (err)
- return err;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg) {
- err = -ENOMEM;
- goto err_out_port_del;
- }
- err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_NEW,
- info->snd_portid, info->snd_seq, 0, NULL);
- if (WARN_ON_ONCE(err))
- goto err_out_msg_free;
- err = genlmsg_reply(msg, info);
- if (err)
- goto err_out_port_del;
- return 0;
-
-err_out_msg_free:
- nlmsg_free(msg);
-err_out_port_del:
- devlink_port->ops->port_del(devlink, devlink_port, NULL);
- return err;
-}
-
-static int devlink_nl_cmd_port_del_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink_port *devlink_port = info->user_ptr[1];
- struct netlink_ext_ack *extack = info->extack;
- struct devlink *devlink = info->user_ptr[0];
-
- if (!devlink_port->ops->port_del)
- return -EOPNOTSUPP;
-
- return devlink_port->ops->port_del(devlink, devlink_port, extack);
-}
-
-static int
-devlink_nl_rate_parent_node_set(struct devlink_rate *devlink_rate,
- struct genl_info *info,
- struct nlattr *nla_parent)
-{
- struct devlink *devlink = devlink_rate->devlink;
- const char *parent_name = nla_data(nla_parent);
- const struct devlink_ops *ops = devlink->ops;
- size_t len = strlen(parent_name);
- struct devlink_rate *parent;
- int err = -EOPNOTSUPP;
-
- parent = devlink_rate->parent;
-
- if (parent && !len) {
- if (devlink_rate_is_leaf(devlink_rate))
- err = ops->rate_leaf_parent_set(devlink_rate, NULL,
- devlink_rate->priv, NULL,
- info->extack);
- else if (devlink_rate_is_node(devlink_rate))
- err = ops->rate_node_parent_set(devlink_rate, NULL,
- devlink_rate->priv, NULL,
- info->extack);
- if (err)
- return err;
-
- refcount_dec(&parent->refcnt);
- devlink_rate->parent = NULL;
- } else if (len) {
- parent = devlink_rate_node_get_by_name(devlink, parent_name);
- if (IS_ERR(parent))
- return -ENODEV;
-
- if (parent == devlink_rate) {
- NL_SET_ERR_MSG(info->extack, "Parent to self is not allowed");
- return -EINVAL;
- }
-
- if (devlink_rate_is_node(devlink_rate) &&
- devlink_rate_is_parent_node(devlink_rate, parent->parent)) {
- NL_SET_ERR_MSG(info->extack, "Node is already a parent of parent node.");
- return -EEXIST;
- }
-
- if (devlink_rate_is_leaf(devlink_rate))
- err = ops->rate_leaf_parent_set(devlink_rate, parent,
- devlink_rate->priv, parent->priv,
- info->extack);
- else if (devlink_rate_is_node(devlink_rate))
- err = ops->rate_node_parent_set(devlink_rate, parent,
- devlink_rate->priv, parent->priv,
- info->extack);
- if (err)
- return err;
-
- if (devlink_rate->parent)
- /* we're reassigning to other parent in this case */
- refcount_dec(&devlink_rate->parent->refcnt);
-
- refcount_inc(&parent->refcnt);
- devlink_rate->parent = parent;
- }
-
- return 0;
-}
-
-static int devlink_nl_rate_set(struct devlink_rate *devlink_rate,
- const struct devlink_ops *ops,
- struct genl_info *info)
-{
- struct nlattr *nla_parent, **attrs = info->attrs;
- int err = -EOPNOTSUPP;
- u32 priority;
- u32 weight;
- u64 rate;
-
- if (attrs[DEVLINK_ATTR_RATE_TX_SHARE]) {
- rate = nla_get_u64(attrs[DEVLINK_ATTR_RATE_TX_SHARE]);
- if (devlink_rate_is_leaf(devlink_rate))
- err = ops->rate_leaf_tx_share_set(devlink_rate, devlink_rate->priv,
- rate, info->extack);
- else if (devlink_rate_is_node(devlink_rate))
- err = ops->rate_node_tx_share_set(devlink_rate, devlink_rate->priv,
- rate, info->extack);
- if (err)
- return err;
- devlink_rate->tx_share = rate;
- }
-
- if (attrs[DEVLINK_ATTR_RATE_TX_MAX]) {
- rate = nla_get_u64(attrs[DEVLINK_ATTR_RATE_TX_MAX]);
- if (devlink_rate_is_leaf(devlink_rate))
- err = ops->rate_leaf_tx_max_set(devlink_rate, devlink_rate->priv,
- rate, info->extack);
- else if (devlink_rate_is_node(devlink_rate))
- err = ops->rate_node_tx_max_set(devlink_rate, devlink_rate->priv,
- rate, info->extack);
- if (err)
- return err;
- devlink_rate->tx_max = rate;
- }
-
- if (attrs[DEVLINK_ATTR_RATE_TX_PRIORITY]) {
- priority = nla_get_u32(attrs[DEVLINK_ATTR_RATE_TX_PRIORITY]);
- if (devlink_rate_is_leaf(devlink_rate))
- err = ops->rate_leaf_tx_priority_set(devlink_rate, devlink_rate->priv,
- priority, info->extack);
- else if (devlink_rate_is_node(devlink_rate))
- err = ops->rate_node_tx_priority_set(devlink_rate, devlink_rate->priv,
- priority, info->extack);
-
- if (err)
- return err;
- devlink_rate->tx_priority = priority;
- }
-
- if (attrs[DEVLINK_ATTR_RATE_TX_WEIGHT]) {
- weight = nla_get_u32(attrs[DEVLINK_ATTR_RATE_TX_WEIGHT]);
- if (devlink_rate_is_leaf(devlink_rate))
- err = ops->rate_leaf_tx_weight_set(devlink_rate, devlink_rate->priv,
- weight, info->extack);
- else if (devlink_rate_is_node(devlink_rate))
- err = ops->rate_node_tx_weight_set(devlink_rate, devlink_rate->priv,
- weight, info->extack);
-
- if (err)
- return err;
- devlink_rate->tx_weight = weight;
- }
-
- nla_parent = attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME];
- if (nla_parent) {
- err = devlink_nl_rate_parent_node_set(devlink_rate, info,
- nla_parent);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static bool devlink_rate_set_ops_supported(const struct devlink_ops *ops,
- struct genl_info *info,
- enum devlink_rate_type type)
-{
- struct nlattr **attrs = info->attrs;
-
- if (type == DEVLINK_RATE_TYPE_LEAF) {
- if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_leaf_tx_share_set) {
- NL_SET_ERR_MSG(info->extack, "TX share set isn't supported for the leafs");
- return false;
- }
- if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_leaf_tx_max_set) {
- NL_SET_ERR_MSG(info->extack, "TX max set isn't supported for the leafs");
- return false;
- }
- if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
- !ops->rate_leaf_parent_set) {
- NL_SET_ERR_MSG(info->extack, "Parent set isn't supported for the leafs");
- return false;
- }
- if (attrs[DEVLINK_ATTR_RATE_TX_PRIORITY] && !ops->rate_leaf_tx_priority_set) {
- NL_SET_ERR_MSG_ATTR(info->extack,
- attrs[DEVLINK_ATTR_RATE_TX_PRIORITY],
- "TX priority set isn't supported for the leafs");
- return false;
- }
- if (attrs[DEVLINK_ATTR_RATE_TX_WEIGHT] && !ops->rate_leaf_tx_weight_set) {
- NL_SET_ERR_MSG_ATTR(info->extack,
- attrs[DEVLINK_ATTR_RATE_TX_WEIGHT],
- "TX weight set isn't supported for the leafs");
- return false;
- }
- } else if (type == DEVLINK_RATE_TYPE_NODE) {
- if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_node_tx_share_set) {
- NL_SET_ERR_MSG(info->extack, "TX share set isn't supported for the nodes");
- return false;
- }
- if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_node_tx_max_set) {
- NL_SET_ERR_MSG(info->extack, "TX max set isn't supported for the nodes");
- return false;
- }
- if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
- !ops->rate_node_parent_set) {
- NL_SET_ERR_MSG(info->extack, "Parent set isn't supported for the nodes");
- return false;
- }
- if (attrs[DEVLINK_ATTR_RATE_TX_PRIORITY] && !ops->rate_node_tx_priority_set) {
- NL_SET_ERR_MSG_ATTR(info->extack,
- attrs[DEVLINK_ATTR_RATE_TX_PRIORITY],
- "TX priority set isn't supported for the nodes");
- return false;
- }
- if (attrs[DEVLINK_ATTR_RATE_TX_WEIGHT] && !ops->rate_node_tx_weight_set) {
- NL_SET_ERR_MSG_ATTR(info->extack,
- attrs[DEVLINK_ATTR_RATE_TX_WEIGHT],
- "TX weight set isn't supported for the nodes");
- return false;
- }
- } else {
- WARN(1, "Unknown type of rate object");
- return false;
- }
-
- return true;
-}
-
-static int devlink_nl_cmd_rate_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_rate *devlink_rate;
- const struct devlink_ops *ops;
- int err;
-
- devlink_rate = devlink_rate_get_from_info(devlink, info);
- if (IS_ERR(devlink_rate))
- return PTR_ERR(devlink_rate);
-
- ops = devlink->ops;
- if (!ops || !devlink_rate_set_ops_supported(ops, info, devlink_rate->type))
- return -EOPNOTSUPP;
-
- err = devlink_nl_rate_set(devlink_rate, ops, info);
-
- if (!err)
- devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
- return err;
-}
-
-static int devlink_nl_cmd_rate_new_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_rate *rate_node;
- const struct devlink_ops *ops;
- int err;
-
- ops = devlink->ops;
- if (!ops || !ops->rate_node_new || !ops->rate_node_del) {
- NL_SET_ERR_MSG(info->extack, "Rate nodes aren't supported");
- return -EOPNOTSUPP;
- }
-
- if (!devlink_rate_set_ops_supported(ops, info, DEVLINK_RATE_TYPE_NODE))
- return -EOPNOTSUPP;
-
- rate_node = devlink_rate_node_get_from_attrs(devlink, info->attrs);
- if (!IS_ERR(rate_node))
- return -EEXIST;
- else if (rate_node == ERR_PTR(-EINVAL))
- return -EINVAL;
-
- rate_node = kzalloc(sizeof(*rate_node), GFP_KERNEL);
- if (!rate_node)
- return -ENOMEM;
-
- rate_node->devlink = devlink;
- rate_node->type = DEVLINK_RATE_TYPE_NODE;
- rate_node->name = nla_strdup(info->attrs[DEVLINK_ATTR_RATE_NODE_NAME], GFP_KERNEL);
- if (!rate_node->name) {
- err = -ENOMEM;
- goto err_strdup;
- }
-
- err = ops->rate_node_new(rate_node, &rate_node->priv, info->extack);
- if (err)
- goto err_node_new;
-
- err = devlink_nl_rate_set(rate_node, ops, info);
- if (err)
- goto err_rate_set;
-
- refcount_set(&rate_node->refcnt, 1);
- list_add(&rate_node->list, &devlink->rate_list);
- devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
- return 0;
-
-err_rate_set:
- ops->rate_node_del(rate_node, rate_node->priv, info->extack);
-err_node_new:
- kfree(rate_node->name);
-err_strdup:
- kfree(rate_node);
- return err;
-}
-
-static int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_rate *rate_node;
- int err;
-
- rate_node = devlink_rate_node_get_from_info(devlink, info);
- if (IS_ERR(rate_node))
- return PTR_ERR(rate_node);
-
- if (refcount_read(&rate_node->refcnt) > 1) {
- NL_SET_ERR_MSG(info->extack, "Node has children. Cannot delete node.");
- return -EBUSY;
- }
-
- devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
- err = devlink->ops->rate_node_del(rate_node, rate_node->priv,
- info->extack);
- if (rate_node->parent)
- refcount_dec(&rate_node->parent->refcnt);
- list_del(&rate_node->list);
- kfree(rate_node->name);
- kfree(rate_node);
- return err;
-}
-
-struct devlink_linecard_type {
- const char *type;
- const void *priv;
-};
-
-static int devlink_nl_linecard_fill(struct sk_buff *msg,
- struct devlink *devlink,
- struct devlink_linecard *linecard,
- enum devlink_command cmd, u32 portid,
- u32 seq, int flags,
- struct netlink_ext_ack *extack)
-{
- struct devlink_linecard_type *linecard_type;
- struct nlattr *attr;
- void *hdr;
- int i;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index))
- goto nla_put_failure;
- if (nla_put_u8(msg, DEVLINK_ATTR_LINECARD_STATE, linecard->state))
- goto nla_put_failure;
- if (linecard->type &&
- nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE, linecard->type))
- goto nla_put_failure;
-
- if (linecard->types_count) {
- attr = nla_nest_start(msg,
- DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES);
- if (!attr)
- goto nla_put_failure;
- for (i = 0; i < linecard->types_count; i++) {
- linecard_type = &linecard->types[i];
- if (nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE,
- linecard_type->type)) {
- nla_nest_cancel(msg, attr);
- goto nla_put_failure;
- }
- }
- nla_nest_end(msg, attr);
- }
-
- if (linecard->nested_devlink &&
- devlink_nl_put_nested_handle(msg, linecard->nested_devlink))
- goto nla_put_failure;
-
- genlmsg_end(msg, hdr);
- return 0;
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-static void devlink_linecard_notify(struct devlink_linecard *linecard,
- enum devlink_command cmd)
-{
- struct devlink *devlink = linecard->devlink;
- struct sk_buff *msg;
- int err;
-
- WARN_ON(cmd != DEVLINK_CMD_LINECARD_NEW &&
- cmd != DEVLINK_CMD_LINECARD_DEL);
-
- if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
- return;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return;
-
- err = devlink_nl_linecard_fill(msg, devlink, linecard, cmd, 0, 0, 0,
- NULL);
- if (err) {
- nlmsg_free(msg);
- return;
- }
-
- genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
- msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
-}
-
-int devlink_nl_linecard_get_doit(struct sk_buff *skb, struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_linecard *linecard;
- struct sk_buff *msg;
- int err;
-
- linecard = devlink_linecard_get_from_info(devlink, info);
- if (IS_ERR(linecard))
- return PTR_ERR(linecard);
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- mutex_lock(&linecard->state_lock);
- err = devlink_nl_linecard_fill(msg, devlink, linecard,
- DEVLINK_CMD_LINECARD_NEW,
- info->snd_portid, info->snd_seq, 0,
- info->extack);
- mutex_unlock(&linecard->state_lock);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static int devlink_nl_linecard_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb,
- int flags)
-{
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct devlink_linecard *linecard;
- int idx = 0;
- int err = 0;
-
- list_for_each_entry(linecard, &devlink->linecard_list, list) {
- if (idx < state->idx) {
- idx++;
- continue;
- }
- mutex_lock(&linecard->state_lock);
- err = devlink_nl_linecard_fill(msg, devlink, linecard,
- DEVLINK_CMD_LINECARD_NEW,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, flags,
- cb->extack);
- mutex_unlock(&linecard->state_lock);
- if (err) {
- state->idx = idx;
- break;
- }
- idx++;
- }
-
- return err;
-}
-
-int devlink_nl_linecard_get_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- return devlink_nl_dumpit(skb, cb, devlink_nl_linecard_get_dump_one);
-}
-
-static struct devlink_linecard_type *
-devlink_linecard_type_lookup(struct devlink_linecard *linecard,
- const char *type)
-{
- struct devlink_linecard_type *linecard_type;
- int i;
-
- for (i = 0; i < linecard->types_count; i++) {
- linecard_type = &linecard->types[i];
- if (!strcmp(type, linecard_type->type))
- return linecard_type;
- }
- return NULL;
-}
-
-static int devlink_linecard_type_set(struct devlink_linecard *linecard,
- const char *type,
- struct netlink_ext_ack *extack)
-{
- const struct devlink_linecard_ops *ops = linecard->ops;
- struct devlink_linecard_type *linecard_type;
- int err;
-
- mutex_lock(&linecard->state_lock);
- if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
- NL_SET_ERR_MSG(extack, "Line card is currently being provisioned");
- err = -EBUSY;
- goto out;
- }
- if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
- NL_SET_ERR_MSG(extack, "Line card is currently being unprovisioned");
- err = -EBUSY;
- goto out;
- }
-
- linecard_type = devlink_linecard_type_lookup(linecard, type);
- if (!linecard_type) {
- NL_SET_ERR_MSG(extack, "Unsupported line card type provided");
- err = -EINVAL;
- goto out;
- }
-
- if (linecard->state != DEVLINK_LINECARD_STATE_UNPROVISIONED &&
- linecard->state != DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
- NL_SET_ERR_MSG(extack, "Line card already provisioned");
- err = -EBUSY;
- /* Check if the line card is provisioned in the same
- * way the user asks. In case it is, make the operation
- * to return success.
- */
- if (ops->same_provision &&
- ops->same_provision(linecard, linecard->priv,
- linecard_type->type,
- linecard_type->priv))
- err = 0;
- goto out;
- }
-
- linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING;
- linecard->type = linecard_type->type;
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- mutex_unlock(&linecard->state_lock);
- err = ops->provision(linecard, linecard->priv, linecard_type->type,
- linecard_type->priv, extack);
- if (err) {
- /* Provisioning failed. Assume the linecard is unprovisioned
- * for future operations.
- */
- mutex_lock(&linecard->state_lock);
- linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
- linecard->type = NULL;
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- mutex_unlock(&linecard->state_lock);
- }
- return err;
-
-out:
- mutex_unlock(&linecard->state_lock);
- return err;
-}
-
-static int devlink_linecard_type_unset(struct devlink_linecard *linecard,
- struct netlink_ext_ack *extack)
-{
- int err;
-
- mutex_lock(&linecard->state_lock);
- if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
- NL_SET_ERR_MSG(extack, "Line card is currently being provisioned");
- err = -EBUSY;
- goto out;
- }
- if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
- NL_SET_ERR_MSG(extack, "Line card is currently being unprovisioned");
- err = -EBUSY;
- goto out;
- }
- if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
- linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
- linecard->type = NULL;
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- err = 0;
- goto out;
- }
-
- if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONED) {
- NL_SET_ERR_MSG(extack, "Line card is not provisioned");
- err = 0;
- goto out;
- }
- linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONING;
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- mutex_unlock(&linecard->state_lock);
- err = linecard->ops->unprovision(linecard, linecard->priv,
- extack);
- if (err) {
- /* Unprovisioning failed. Assume the linecard is unprovisioned
- * for future operations.
- */
- mutex_lock(&linecard->state_lock);
- linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
- linecard->type = NULL;
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- mutex_unlock(&linecard->state_lock);
- }
- return err;
-
-out:
- mutex_unlock(&linecard->state_lock);
- return err;
-}
-
-static int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct netlink_ext_ack *extack = info->extack;
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_linecard *linecard;
- int err;
-
- linecard = devlink_linecard_get_from_info(devlink, info);
- if (IS_ERR(linecard))
- return PTR_ERR(linecard);
-
- if (info->attrs[DEVLINK_ATTR_LINECARD_TYPE]) {
- const char *type;
-
- type = nla_data(info->attrs[DEVLINK_ATTR_LINECARD_TYPE]);
- if (strcmp(type, "")) {
- err = devlink_linecard_type_set(linecard, type, extack);
- if (err)
- return err;
- } else {
- err = devlink_linecard_type_unset(linecard, extack);
- if (err)
- return err;
- }
- }
-
- return 0;
-}
-
-static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink,
- struct devlink_sb *devlink_sb,
- enum devlink_command cmd, u32 portid,
- u32 seq, int flags)
-{
- void *hdr;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_SIZE, devlink_sb->size))
- goto nla_put_failure;
- if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT,
- devlink_sb->ingress_pools_count))
- goto nla_put_failure;
- if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT,
- devlink_sb->egress_pools_count))
- goto nla_put_failure;
- if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_TC_COUNT,
- devlink_sb->ingress_tc_count))
- goto nla_put_failure;
- if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_TC_COUNT,
- devlink_sb->egress_tc_count))
- goto nla_put_failure;
-
- genlmsg_end(msg, hdr);
- return 0;
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-int devlink_nl_sb_get_doit(struct sk_buff *skb, struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_sb *devlink_sb;
- struct sk_buff *msg;
- int err;
-
- devlink_sb = devlink_sb_get_from_info(devlink, info);
- if (IS_ERR(devlink_sb))
- return PTR_ERR(devlink_sb);
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
- DEVLINK_CMD_SB_NEW,
- info->snd_portid, info->snd_seq, 0);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static int
-devlink_nl_sb_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
- struct netlink_callback *cb, int flags)
-{
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct devlink_sb *devlink_sb;
- int idx = 0;
- int err = 0;
-
- list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
- if (idx < state->idx) {
- idx++;
- continue;
- }
- err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
- DEVLINK_CMD_SB_NEW,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, flags);
- if (err) {
- state->idx = idx;
- break;
- }
- idx++;
- }
-
- return err;
-}
-
-int devlink_nl_sb_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
-{
- return devlink_nl_dumpit(skb, cb, devlink_nl_sb_get_dump_one);
-}
-
-static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink,
- struct devlink_sb *devlink_sb,
- u16 pool_index, enum devlink_command cmd,
- u32 portid, u32 seq, int flags)
-{
- struct devlink_sb_pool_info pool_info;
- void *hdr;
- int err;
-
- err = devlink->ops->sb_pool_get(devlink, devlink_sb->index,
- pool_index, &pool_info);
- if (err)
- return err;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
- goto nla_put_failure;
- if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
- goto nla_put_failure;
- if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_info.pool_type))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_SIZE, pool_info.size))
- goto nla_put_failure;
- if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,
- pool_info.threshold_type))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_CELL_SIZE,
- pool_info.cell_size))
- goto nla_put_failure;
-
- genlmsg_end(msg, hdr);
- return 0;
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-int devlink_nl_sb_pool_get_doit(struct sk_buff *skb, struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_sb *devlink_sb;
- struct sk_buff *msg;
- u16 pool_index;
- int err;
-
- devlink_sb = devlink_sb_get_from_info(devlink, info);
- if (IS_ERR(devlink_sb))
- return PTR_ERR(devlink_sb);
-
- err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
- &pool_index);
- if (err)
- return err;
-
- if (!devlink->ops->sb_pool_get)
- return -EOPNOTSUPP;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_sb_pool_fill(msg, devlink, devlink_sb, pool_index,
- DEVLINK_CMD_SB_POOL_NEW,
- info->snd_portid, info->snd_seq, 0);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
- struct devlink *devlink,
- struct devlink_sb *devlink_sb,
- u32 portid, u32 seq, int flags)
-{
- u16 pool_count = devlink_sb_pool_count(devlink_sb);
- u16 pool_index;
- int err;
-
- for (pool_index = 0; pool_index < pool_count; pool_index++) {
- if (*p_idx < start) {
- (*p_idx)++;
- continue;
- }
- err = devlink_nl_sb_pool_fill(msg, devlink,
- devlink_sb,
- pool_index,
- DEVLINK_CMD_SB_POOL_NEW,
- portid, seq, flags);
- if (err)
- return err;
- (*p_idx)++;
- }
- return 0;
-}
-
-static int
-devlink_nl_sb_pool_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
- struct netlink_callback *cb, int flags)
-{
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct devlink_sb *devlink_sb;
- int err = 0;
- int idx = 0;
-
- if (!devlink->ops->sb_pool_get)
- return 0;
-
- list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
- err = __sb_pool_get_dumpit(msg, state->idx, &idx,
- devlink, devlink_sb,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, flags);
- if (err == -EOPNOTSUPP) {
- err = 0;
- } else if (err) {
- state->idx = idx;
- break;
- }
- }
-
- return err;
-}
-
-int devlink_nl_sb_pool_get_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- return devlink_nl_dumpit(skb, cb, devlink_nl_sb_pool_get_dump_one);
-}
-
-static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
- u16 pool_index, u32 size,
- enum devlink_sb_threshold_type threshold_type,
- struct netlink_ext_ack *extack)
-
-{
- const struct devlink_ops *ops = devlink->ops;
-
- if (ops->sb_pool_set)
- return ops->sb_pool_set(devlink, sb_index, pool_index,
- size, threshold_type, extack);
- return -EOPNOTSUPP;
-}
-
-static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- enum devlink_sb_threshold_type threshold_type;
- struct devlink_sb *devlink_sb;
- u16 pool_index;
- u32 size;
- int err;
-
- devlink_sb = devlink_sb_get_from_info(devlink, info);
- if (IS_ERR(devlink_sb))
- return PTR_ERR(devlink_sb);
-
- err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
- &pool_index);
- if (err)
- return err;
-
- err = devlink_sb_th_type_get_from_info(info, &threshold_type);
- if (err)
- return err;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_POOL_SIZE))
- return -EINVAL;
-
- size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]);
- return devlink_sb_pool_set(devlink, devlink_sb->index,
- pool_index, size, threshold_type,
- info->extack);
-}
-
-static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
- struct devlink *devlink,
- struct devlink_port *devlink_port,
- struct devlink_sb *devlink_sb,
- u16 pool_index,
- enum devlink_command cmd,
- u32 portid, u32 seq, int flags)
-{
- const struct devlink_ops *ops = devlink->ops;
- u32 threshold;
- void *hdr;
- int err;
-
- err = ops->sb_port_pool_get(devlink_port, devlink_sb->index,
- pool_index, &threshold);
- if (err)
- return err;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
- goto nla_put_failure;
- if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
- goto nla_put_failure;
-
- if (ops->sb_occ_port_pool_get) {
- u32 cur;
- u32 max;
-
- err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index,
- pool_index, &cur, &max);
- if (err && err != -EOPNOTSUPP)
- goto sb_occ_get_failure;
- if (!err) {
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
- goto nla_put_failure;
- }
- }
-
- genlmsg_end(msg, hdr);
- return 0;
-
-nla_put_failure:
- err = -EMSGSIZE;
-sb_occ_get_failure:
- genlmsg_cancel(msg, hdr);
- return err;
-}
-
-int devlink_nl_sb_port_pool_get_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink_port *devlink_port = info->user_ptr[1];
- struct devlink *devlink = devlink_port->devlink;
- struct devlink_sb *devlink_sb;
- struct sk_buff *msg;
- u16 pool_index;
- int err;
-
- devlink_sb = devlink_sb_get_from_info(devlink, info);
- if (IS_ERR(devlink_sb))
- return PTR_ERR(devlink_sb);
-
- err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
- &pool_index);
- if (err)
- return err;
-
- if (!devlink->ops->sb_port_pool_get)
- return -EOPNOTSUPP;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_sb_port_pool_fill(msg, devlink, devlink_port,
- devlink_sb, pool_index,
- DEVLINK_CMD_SB_PORT_POOL_NEW,
- info->snd_portid, info->snd_seq, 0);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
- struct devlink *devlink,
- struct devlink_sb *devlink_sb,
- u32 portid, u32 seq, int flags)
-{
- struct devlink_port *devlink_port;
- u16 pool_count = devlink_sb_pool_count(devlink_sb);
- unsigned long port_index;
- u16 pool_index;
- int err;
-
- xa_for_each(&devlink->ports, port_index, devlink_port) {
- for (pool_index = 0; pool_index < pool_count; pool_index++) {
- if (*p_idx < start) {
- (*p_idx)++;
- continue;
- }
- err = devlink_nl_sb_port_pool_fill(msg, devlink,
- devlink_port,
- devlink_sb,
- pool_index,
- DEVLINK_CMD_SB_PORT_POOL_NEW,
- portid, seq, flags);
- if (err)
- return err;
- (*p_idx)++;
- }
- }
- return 0;
-}
-
-static int
-devlink_nl_sb_port_pool_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb, int flags)
-{
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct devlink_sb *devlink_sb;
- int idx = 0;
- int err = 0;
-
- if (!devlink->ops->sb_port_pool_get)
- return 0;
-
- list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
- err = __sb_port_pool_get_dumpit(msg, state->idx, &idx,
- devlink, devlink_sb,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, flags);
- if (err == -EOPNOTSUPP) {
- err = 0;
- } else if (err) {
- state->idx = idx;
- break;
- }
- }
-
- return err;
-}
-
-int devlink_nl_sb_port_pool_get_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- return devlink_nl_dumpit(skb, cb, devlink_nl_sb_port_pool_get_dump_one);
-}
-
-static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
- unsigned int sb_index, u16 pool_index,
- u32 threshold,
- struct netlink_ext_ack *extack)
-
-{
- const struct devlink_ops *ops = devlink_port->devlink->ops;
-
- if (ops->sb_port_pool_set)
- return ops->sb_port_pool_set(devlink_port, sb_index,
- pool_index, threshold, extack);
- return -EOPNOTSUPP;
-}
-
-static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink_port *devlink_port = info->user_ptr[1];
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_sb *devlink_sb;
- u16 pool_index;
- u32 threshold;
- int err;
-
- devlink_sb = devlink_sb_get_from_info(devlink, info);
- if (IS_ERR(devlink_sb))
- return PTR_ERR(devlink_sb);
-
- err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
- &pool_index);
- if (err)
- return err;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD))
- return -EINVAL;
-
- threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
- return devlink_sb_port_pool_set(devlink_port, devlink_sb->index,
- pool_index, threshold, info->extack);
-}
-
-static int
-devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink,
- struct devlink_port *devlink_port,
- struct devlink_sb *devlink_sb, u16 tc_index,
- enum devlink_sb_pool_type pool_type,
- enum devlink_command cmd,
- u32 portid, u32 seq, int flags)
-{
- const struct devlink_ops *ops = devlink->ops;
- u16 pool_index;
- u32 threshold;
- void *hdr;
- int err;
-
- err = ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index,
- tc_index, pool_type,
- &pool_index, &threshold);
- if (err)
- return err;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
- goto nla_put_failure;
- if (nla_put_u16(msg, DEVLINK_ATTR_SB_TC_INDEX, tc_index))
- goto nla_put_failure;
- if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_type))
- goto nla_put_failure;
- if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
- goto nla_put_failure;
-
- if (ops->sb_occ_tc_port_bind_get) {
- u32 cur;
- u32 max;
-
- err = ops->sb_occ_tc_port_bind_get(devlink_port,
- devlink_sb->index,
- tc_index, pool_type,
- &cur, &max);
- if (err && err != -EOPNOTSUPP)
- return err;
- if (!err) {
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
- goto nla_put_failure;
- if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
- goto nla_put_failure;
- }
- }
-
- genlmsg_end(msg, hdr);
- return 0;
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-int devlink_nl_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink_port *devlink_port = info->user_ptr[1];
- struct devlink *devlink = devlink_port->devlink;
- struct devlink_sb *devlink_sb;
- struct sk_buff *msg;
- enum devlink_sb_pool_type pool_type;
- u16 tc_index;
- int err;
-
- devlink_sb = devlink_sb_get_from_info(devlink, info);
- if (IS_ERR(devlink_sb))
- return PTR_ERR(devlink_sb);
-
- err = devlink_sb_pool_type_get_from_info(info, &pool_type);
- if (err)
- return err;
-
- err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
- pool_type, &tc_index);
- if (err)
- return err;
-
- if (!devlink->ops->sb_tc_pool_bind_get)
- return -EOPNOTSUPP;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port,
- devlink_sb, tc_index, pool_type,
- DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
- info->snd_portid,
- info->snd_seq, 0);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
- int start, int *p_idx,
- struct devlink *devlink,
- struct devlink_sb *devlink_sb,
- u32 portid, u32 seq, int flags)
-{
- struct devlink_port *devlink_port;
- unsigned long port_index;
- u16 tc_index;
- int err;
-
- xa_for_each(&devlink->ports, port_index, devlink_port) {
- for (tc_index = 0;
- tc_index < devlink_sb->ingress_tc_count; tc_index++) {
- if (*p_idx < start) {
- (*p_idx)++;
- continue;
- }
- err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
- devlink_port,
- devlink_sb,
- tc_index,
- DEVLINK_SB_POOL_TYPE_INGRESS,
- DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
- portid, seq,
- flags);
- if (err)
- return err;
- (*p_idx)++;
- }
- for (tc_index = 0;
- tc_index < devlink_sb->egress_tc_count; tc_index++) {
- if (*p_idx < start) {
- (*p_idx)++;
- continue;
- }
- err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
- devlink_port,
- devlink_sb,
- tc_index,
- DEVLINK_SB_POOL_TYPE_EGRESS,
- DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
- portid, seq,
- flags);
- if (err)
- return err;
- (*p_idx)++;
- }
- }
- return 0;
-}
-
-static int devlink_nl_sb_tc_pool_bind_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb,
- int flags)
-{
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct devlink_sb *devlink_sb;
- int idx = 0;
- int err = 0;
-
- if (!devlink->ops->sb_tc_pool_bind_get)
- return 0;
-
- list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
- err = __sb_tc_pool_bind_get_dumpit(msg, state->idx, &idx,
- devlink, devlink_sb,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, flags);
- if (err == -EOPNOTSUPP) {
- err = 0;
- } else if (err) {
- state->idx = idx;
- break;
- }
- }
-
- return err;
-}
-
-int devlink_nl_sb_tc_pool_bind_get_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- return devlink_nl_dumpit(skb, cb,
- devlink_nl_sb_tc_pool_bind_get_dump_one);
-}
-
-static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
- unsigned int sb_index, u16 tc_index,
- enum devlink_sb_pool_type pool_type,
- u16 pool_index, u32 threshold,
- struct netlink_ext_ack *extack)
-
-{
- const struct devlink_ops *ops = devlink_port->devlink->ops;
-
- if (ops->sb_tc_pool_bind_set)
- return ops->sb_tc_pool_bind_set(devlink_port, sb_index,
- tc_index, pool_type,
- pool_index, threshold, extack);
- return -EOPNOTSUPP;
-}
-
-static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink_port *devlink_port = info->user_ptr[1];
- struct devlink *devlink = info->user_ptr[0];
- enum devlink_sb_pool_type pool_type;
- struct devlink_sb *devlink_sb;
- u16 tc_index;
- u16 pool_index;
- u32 threshold;
- int err;
-
- devlink_sb = devlink_sb_get_from_info(devlink, info);
- if (IS_ERR(devlink_sb))
- return PTR_ERR(devlink_sb);
-
- err = devlink_sb_pool_type_get_from_info(info, &pool_type);
- if (err)
- return err;
-
- err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
- pool_type, &tc_index);
- if (err)
- return err;
-
- err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
- &pool_index);
- if (err)
- return err;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD))
- return -EINVAL;
-
- threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
- return devlink_sb_tc_pool_bind_set(devlink_port, devlink_sb->index,
- tc_index, pool_type,
- pool_index, threshold, info->extack);
-}
-
-static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- const struct devlink_ops *ops = devlink->ops;
- struct devlink_sb *devlink_sb;
-
- devlink_sb = devlink_sb_get_from_info(devlink, info);
- if (IS_ERR(devlink_sb))
- return PTR_ERR(devlink_sb);
-
- if (ops->sb_occ_snapshot)
- return ops->sb_occ_snapshot(devlink, devlink_sb->index);
- return -EOPNOTSUPP;
-}
-
-static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- const struct devlink_ops *ops = devlink->ops;
- struct devlink_sb *devlink_sb;
-
- devlink_sb = devlink_sb_get_from_info(devlink, info);
- if (IS_ERR(devlink_sb))
- return PTR_ERR(devlink_sb);
-
- if (ops->sb_occ_max_clear)
- return ops->sb_occ_max_clear(devlink, devlink_sb->index);
- return -EOPNOTSUPP;
-}
-
-int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
- struct netlink_ext_ack *extack)
-{
- struct devlink_rate *devlink_rate;
-
- list_for_each_entry(devlink_rate, &devlink->rate_list, list)
- if (devlink_rate_is_node(devlink_rate)) {
- NL_SET_ERR_MSG(extack, "Rate node(s) exists.");
- return -EBUSY;
- }
- return 0;
-}
-
-int devlink_dpipe_match_put(struct sk_buff *skb,
- struct devlink_dpipe_match *match)
-{
- struct devlink_dpipe_header *header = match->header;
- struct devlink_dpipe_field *field = &header->fields[match->field_id];
- struct nlattr *match_attr;
-
- match_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_MATCH);
- if (!match_attr)
- return -EMSGSIZE;
-
- if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_MATCH_TYPE, match->type) ||
- nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, match->header_index) ||
- nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
- nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
- nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
- goto nla_put_failure;
-
- nla_nest_end(skb, match_attr);
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(skb, match_attr);
- return -EMSGSIZE;
-}
-EXPORT_SYMBOL_GPL(devlink_dpipe_match_put);
-
-static int devlink_dpipe_matches_put(struct devlink_dpipe_table *table,
- struct sk_buff *skb)
-{
- struct nlattr *matches_attr;
-
- matches_attr = nla_nest_start_noflag(skb,
- DEVLINK_ATTR_DPIPE_TABLE_MATCHES);
- if (!matches_attr)
- return -EMSGSIZE;
-
- if (table->table_ops->matches_dump(table->priv, skb))
- goto nla_put_failure;
-
- nla_nest_end(skb, matches_attr);
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(skb, matches_attr);
- return -EMSGSIZE;
-}
-
-int devlink_dpipe_action_put(struct sk_buff *skb,
- struct devlink_dpipe_action *action)
-{
- struct devlink_dpipe_header *header = action->header;
- struct devlink_dpipe_field *field = &header->fields[action->field_id];
- struct nlattr *action_attr;
-
- action_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ACTION);
- if (!action_attr)
- return -EMSGSIZE;
-
- if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_ACTION_TYPE, action->type) ||
- nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_INDEX, action->header_index) ||
- nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
- nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
- nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
- goto nla_put_failure;
-
- nla_nest_end(skb, action_attr);
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(skb, action_attr);
- return -EMSGSIZE;
-}
-EXPORT_SYMBOL_GPL(devlink_dpipe_action_put);
-
-static int devlink_dpipe_actions_put(struct devlink_dpipe_table *table,
- struct sk_buff *skb)
-{
- struct nlattr *actions_attr;
-
- actions_attr = nla_nest_start_noflag(skb,
- DEVLINK_ATTR_DPIPE_TABLE_ACTIONS);
- if (!actions_attr)
- return -EMSGSIZE;
-
- if (table->table_ops->actions_dump(table->priv, skb))
- goto nla_put_failure;
-
- nla_nest_end(skb, actions_attr);
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(skb, actions_attr);
- return -EMSGSIZE;
-}
-
-static int devlink_dpipe_table_put(struct sk_buff *skb,
- struct devlink_dpipe_table *table)
-{
- struct nlattr *table_attr;
- u64 table_size;
-
- table_size = table->table_ops->size_get(table->priv);
- table_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLE);
- if (!table_attr)
- return -EMSGSIZE;
-
- if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_TABLE_NAME, table->name) ||
- nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_SIZE, table_size,
- DEVLINK_ATTR_PAD))
- goto nla_put_failure;
- if (nla_put_u8(skb, DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED,
- table->counters_enabled))
- goto nla_put_failure;
-
- if (table->resource_valid) {
- if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
- table->resource_id, DEVLINK_ATTR_PAD) ||
- nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
- table->resource_units, DEVLINK_ATTR_PAD))
- goto nla_put_failure;
- }
- if (devlink_dpipe_matches_put(table, skb))
- goto nla_put_failure;
-
- if (devlink_dpipe_actions_put(table, skb))
- goto nla_put_failure;
-
- nla_nest_end(skb, table_attr);
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(skb, table_attr);
- return -EMSGSIZE;
-}
-
-static int devlink_dpipe_send_and_alloc_skb(struct sk_buff **pskb,
- struct genl_info *info)
-{
- int err;
-
- if (*pskb) {
- err = genlmsg_reply(*pskb, info);
- if (err)
- return err;
- }
- *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!*pskb)
- return -ENOMEM;
- return 0;
-}
-
-static int devlink_dpipe_tables_fill(struct genl_info *info,
- enum devlink_command cmd, int flags,
- struct list_head *dpipe_tables,
- const char *table_name)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_dpipe_table *table;
- struct nlattr *tables_attr;
- struct sk_buff *skb = NULL;
- struct nlmsghdr *nlh;
- bool incomplete;
- void *hdr;
- int i;
- int err;
-
- table = list_first_entry(dpipe_tables,
- struct devlink_dpipe_table, list);
-start_again:
- err = devlink_dpipe_send_and_alloc_skb(&skb, info);
- if (err)
- return err;
-
- hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
- &devlink_nl_family, NLM_F_MULTI, cmd);
- if (!hdr) {
- nlmsg_free(skb);
- return -EMSGSIZE;
- }
-
- if (devlink_nl_put_handle(skb, devlink))
- goto nla_put_failure;
- tables_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_TABLES);
- if (!tables_attr)
- goto nla_put_failure;
-
- i = 0;
- incomplete = false;
- list_for_each_entry_from(table, dpipe_tables, list) {
- if (!table_name) {
- err = devlink_dpipe_table_put(skb, table);
- if (err) {
- if (!i)
- goto err_table_put;
- incomplete = true;
- break;
- }
- } else {
- if (!strcmp(table->name, table_name)) {
- err = devlink_dpipe_table_put(skb, table);
- if (err)
- break;
- }
- }
- i++;
- }
-
- nla_nest_end(skb, tables_attr);
- genlmsg_end(skb, hdr);
- if (incomplete)
- goto start_again;
-
-send_done:
- nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
- NLMSG_DONE, 0, flags | NLM_F_MULTI);
- if (!nlh) {
- err = devlink_dpipe_send_and_alloc_skb(&skb, info);
- if (err)
- return err;
- goto send_done;
- }
-
- return genlmsg_reply(skb, info);
-
-nla_put_failure:
- err = -EMSGSIZE;
-err_table_put:
- nlmsg_free(skb);
- return err;
-}
-
-static int devlink_nl_cmd_dpipe_table_get(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- const char *table_name = NULL;
-
- if (info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME])
- table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
-
- return devlink_dpipe_tables_fill(info, DEVLINK_CMD_DPIPE_TABLE_GET, 0,
- &devlink->dpipe_table_list,
- table_name);
-}
-
-static int devlink_dpipe_value_put(struct sk_buff *skb,
- struct devlink_dpipe_value *value)
-{
- if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE,
- value->value_size, value->value))
- return -EMSGSIZE;
- if (value->mask)
- if (nla_put(skb, DEVLINK_ATTR_DPIPE_VALUE_MASK,
- value->value_size, value->mask))
- return -EMSGSIZE;
- if (value->mapping_valid)
- if (nla_put_u32(skb, DEVLINK_ATTR_DPIPE_VALUE_MAPPING,
- value->mapping_value))
- return -EMSGSIZE;
- return 0;
-}
-
-static int devlink_dpipe_action_value_put(struct sk_buff *skb,
- struct devlink_dpipe_value *value)
-{
- if (!value->action)
- return -EINVAL;
- if (devlink_dpipe_action_put(skb, value->action))
- return -EMSGSIZE;
- if (devlink_dpipe_value_put(skb, value))
- return -EMSGSIZE;
- return 0;
-}
-
-static int devlink_dpipe_action_values_put(struct sk_buff *skb,
- struct devlink_dpipe_value *values,
- unsigned int values_count)
-{
- struct nlattr *action_attr;
- int i;
- int err;
-
- for (i = 0; i < values_count; i++) {
- action_attr = nla_nest_start_noflag(skb,
- DEVLINK_ATTR_DPIPE_ACTION_VALUE);
- if (!action_attr)
- return -EMSGSIZE;
- err = devlink_dpipe_action_value_put(skb, &values[i]);
- if (err)
- goto err_action_value_put;
- nla_nest_end(skb, action_attr);
- }
- return 0;
-
-err_action_value_put:
- nla_nest_cancel(skb, action_attr);
- return err;
-}
-
-static int devlink_dpipe_match_value_put(struct sk_buff *skb,
- struct devlink_dpipe_value *value)
-{
- if (!value->match)
- return -EINVAL;
- if (devlink_dpipe_match_put(skb, value->match))
- return -EMSGSIZE;
- if (devlink_dpipe_value_put(skb, value))
- return -EMSGSIZE;
- return 0;
-}
-
-static int devlink_dpipe_match_values_put(struct sk_buff *skb,
- struct devlink_dpipe_value *values,
- unsigned int values_count)
-{
- struct nlattr *match_attr;
- int i;
- int err;
-
- for (i = 0; i < values_count; i++) {
- match_attr = nla_nest_start_noflag(skb,
- DEVLINK_ATTR_DPIPE_MATCH_VALUE);
- if (!match_attr)
- return -EMSGSIZE;
- err = devlink_dpipe_match_value_put(skb, &values[i]);
- if (err)
- goto err_match_value_put;
- nla_nest_end(skb, match_attr);
- }
- return 0;
-
-err_match_value_put:
- nla_nest_cancel(skb, match_attr);
- return err;
-}
-
-static int devlink_dpipe_entry_put(struct sk_buff *skb,
- struct devlink_dpipe_entry *entry)
-{
- struct nlattr *entry_attr, *matches_attr, *actions_attr;
- int err;
-
- entry_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_ENTRY);
- if (!entry_attr)
- return -EMSGSIZE;
-
- if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_INDEX, entry->index,
- DEVLINK_ATTR_PAD))
- goto nla_put_failure;
- if (entry->counter_valid)
- if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_ENTRY_COUNTER,
- entry->counter, DEVLINK_ATTR_PAD))
- goto nla_put_failure;
-
- matches_attr = nla_nest_start_noflag(skb,
- DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES);
- if (!matches_attr)
- goto nla_put_failure;
-
- err = devlink_dpipe_match_values_put(skb, entry->match_values,
- entry->match_values_count);
- if (err) {
- nla_nest_cancel(skb, matches_attr);
- goto err_match_values_put;
- }
- nla_nest_end(skb, matches_attr);
-
- actions_attr = nla_nest_start_noflag(skb,
- DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES);
- if (!actions_attr)
- goto nla_put_failure;
-
- err = devlink_dpipe_action_values_put(skb, entry->action_values,
- entry->action_values_count);
- if (err) {
- nla_nest_cancel(skb, actions_attr);
- goto err_action_values_put;
- }
- nla_nest_end(skb, actions_attr);
-
- nla_nest_end(skb, entry_attr);
- return 0;
-
-nla_put_failure:
- err = -EMSGSIZE;
-err_match_values_put:
-err_action_values_put:
- nla_nest_cancel(skb, entry_attr);
- return err;
-}
-
-static struct devlink_dpipe_table *
-devlink_dpipe_table_find(struct list_head *dpipe_tables,
- const char *table_name, struct devlink *devlink)
-{
- struct devlink_dpipe_table *table;
- list_for_each_entry_rcu(table, dpipe_tables, list,
- lockdep_is_held(&devlink->lock)) {
- if (!strcmp(table->name, table_name))
- return table;
- }
- return NULL;
-}
-
-int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx)
-{
- struct devlink *devlink;
- int err;
-
- err = devlink_dpipe_send_and_alloc_skb(&dump_ctx->skb,
- dump_ctx->info);
- if (err)
- return err;
-
- dump_ctx->hdr = genlmsg_put(dump_ctx->skb,
- dump_ctx->info->snd_portid,
- dump_ctx->info->snd_seq,
- &devlink_nl_family, NLM_F_MULTI,
- dump_ctx->cmd);
- if (!dump_ctx->hdr)
- goto nla_put_failure;
-
- devlink = dump_ctx->info->user_ptr[0];
- if (devlink_nl_put_handle(dump_ctx->skb, devlink))
- goto nla_put_failure;
- dump_ctx->nest = nla_nest_start_noflag(dump_ctx->skb,
- DEVLINK_ATTR_DPIPE_ENTRIES);
- if (!dump_ctx->nest)
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- nlmsg_free(dump_ctx->skb);
- return -EMSGSIZE;
-}
-EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_prepare);
-
-int devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
- struct devlink_dpipe_entry *entry)
-{
- return devlink_dpipe_entry_put(dump_ctx->skb, entry);
-}
-EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_append);
-
-int devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx)
-{
- nla_nest_end(dump_ctx->skb, dump_ctx->nest);
- genlmsg_end(dump_ctx->skb, dump_ctx->hdr);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devlink_dpipe_entry_ctx_close);
-
-void devlink_dpipe_entry_clear(struct devlink_dpipe_entry *entry)
-
-{
- unsigned int value_count, value_index;
- struct devlink_dpipe_value *value;
-
- value = entry->action_values;
- value_count = entry->action_values_count;
- for (value_index = 0; value_index < value_count; value_index++) {
- kfree(value[value_index].value);
- kfree(value[value_index].mask);
- }
-
- value = entry->match_values;
- value_count = entry->match_values_count;
- for (value_index = 0; value_index < value_count; value_index++) {
- kfree(value[value_index].value);
- kfree(value[value_index].mask);
- }
-}
-EXPORT_SYMBOL_GPL(devlink_dpipe_entry_clear);
-
-static int devlink_dpipe_entries_fill(struct genl_info *info,
- enum devlink_command cmd, int flags,
- struct devlink_dpipe_table *table)
-{
- struct devlink_dpipe_dump_ctx dump_ctx;
- struct nlmsghdr *nlh;
- int err;
-
- dump_ctx.skb = NULL;
- dump_ctx.cmd = cmd;
- dump_ctx.info = info;
-
- err = table->table_ops->entries_dump(table->priv,
- table->counters_enabled,
- &dump_ctx);
- if (err)
- return err;
-
-send_done:
- nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
- NLMSG_DONE, 0, flags | NLM_F_MULTI);
- if (!nlh) {
- err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
- if (err)
- return err;
- goto send_done;
- }
- return genlmsg_reply(dump_ctx.skb, info);
-}
-
-static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_dpipe_table *table;
- const char *table_name;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_DPIPE_TABLE_NAME))
- return -EINVAL;
-
- table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
- table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
- table_name, devlink);
- if (!table)
- return -EINVAL;
-
- if (!table->table_ops->entries_dump)
- return -EINVAL;
-
- return devlink_dpipe_entries_fill(info, DEVLINK_CMD_DPIPE_ENTRIES_GET,
- 0, table);
-}
-
-static int devlink_dpipe_fields_put(struct sk_buff *skb,
- const struct devlink_dpipe_header *header)
-{
- struct devlink_dpipe_field *field;
- struct nlattr *field_attr;
- int i;
-
- for (i = 0; i < header->fields_count; i++) {
- field = &header->fields[i];
- field_attr = nla_nest_start_noflag(skb,
- DEVLINK_ATTR_DPIPE_FIELD);
- if (!field_attr)
- return -EMSGSIZE;
- if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_FIELD_NAME, field->name) ||
- nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_ID, field->id) ||
- nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH, field->bitwidth) ||
- nla_put_u32(skb, DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE, field->mapping_type))
- goto nla_put_failure;
- nla_nest_end(skb, field_attr);
- }
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(skb, field_attr);
- return -EMSGSIZE;
-}
-
-static int devlink_dpipe_header_put(struct sk_buff *skb,
- struct devlink_dpipe_header *header)
-{
- struct nlattr *fields_attr, *header_attr;
- int err;
-
- header_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADER);
- if (!header_attr)
- return -EMSGSIZE;
-
- if (nla_put_string(skb, DEVLINK_ATTR_DPIPE_HEADER_NAME, header->name) ||
- nla_put_u32(skb, DEVLINK_ATTR_DPIPE_HEADER_ID, header->id) ||
- nla_put_u8(skb, DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, header->global))
- goto nla_put_failure;
-
- fields_attr = nla_nest_start_noflag(skb,
- DEVLINK_ATTR_DPIPE_HEADER_FIELDS);
- if (!fields_attr)
- goto nla_put_failure;
-
- err = devlink_dpipe_fields_put(skb, header);
- if (err) {
- nla_nest_cancel(skb, fields_attr);
- goto nla_put_failure;
- }
- nla_nest_end(skb, fields_attr);
- nla_nest_end(skb, header_attr);
- return 0;
-
-nla_put_failure:
- err = -EMSGSIZE;
- nla_nest_cancel(skb, header_attr);
- return err;
-}
-
-static int devlink_dpipe_headers_fill(struct genl_info *info,
- enum devlink_command cmd, int flags,
- struct devlink_dpipe_headers *
- dpipe_headers)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct nlattr *headers_attr;
- struct sk_buff *skb = NULL;
- struct nlmsghdr *nlh;
- void *hdr;
- int i, j;
- int err;
-
- i = 0;
-start_again:
- err = devlink_dpipe_send_and_alloc_skb(&skb, info);
- if (err)
- return err;
-
- hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
- &devlink_nl_family, NLM_F_MULTI, cmd);
- if (!hdr) {
- nlmsg_free(skb);
- return -EMSGSIZE;
- }
-
- if (devlink_nl_put_handle(skb, devlink))
- goto nla_put_failure;
- headers_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_DPIPE_HEADERS);
- if (!headers_attr)
- goto nla_put_failure;
-
- j = 0;
- for (; i < dpipe_headers->headers_count; i++) {
- err = devlink_dpipe_header_put(skb, dpipe_headers->headers[i]);
- if (err) {
- if (!j)
- goto err_table_put;
- break;
- }
- j++;
- }
- nla_nest_end(skb, headers_attr);
- genlmsg_end(skb, hdr);
- if (i != dpipe_headers->headers_count)
- goto start_again;
-
-send_done:
- nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
- NLMSG_DONE, 0, flags | NLM_F_MULTI);
- if (!nlh) {
- err = devlink_dpipe_send_and_alloc_skb(&skb, info);
- if (err)
- return err;
- goto send_done;
- }
- return genlmsg_reply(skb, info);
-
-nla_put_failure:
- err = -EMSGSIZE;
-err_table_put:
- nlmsg_free(skb);
- return err;
-}
-
-static int devlink_nl_cmd_dpipe_headers_get(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
-
- if (!devlink->dpipe_headers)
- return -EOPNOTSUPP;
- return devlink_dpipe_headers_fill(info, DEVLINK_CMD_DPIPE_HEADERS_GET,
- 0, devlink->dpipe_headers);
-}
-
-static int devlink_dpipe_table_counters_set(struct devlink *devlink,
- const char *table_name,
- bool enable)
-{
- struct devlink_dpipe_table *table;
-
- table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
- table_name, devlink);
- if (!table)
- return -EINVAL;
-
- if (table->counter_control_extern)
- return -EOPNOTSUPP;
-
- if (!(table->counters_enabled ^ enable))
- return 0;
-
- table->counters_enabled = enable;
- if (table->table_ops->counters_set_update)
- table->table_ops->counters_set_update(table->priv, enable);
- return 0;
-}
-
-static int devlink_nl_cmd_dpipe_table_counters_set(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- const char *table_name;
- bool counters_enable;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_DPIPE_TABLE_NAME) ||
- GENL_REQ_ATTR_CHECK(info,
- DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED))
- return -EINVAL;
-
- table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
- counters_enable = !!nla_get_u8(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED]);
-
- return devlink_dpipe_table_counters_set(devlink, table_name,
- counters_enable);
-}
-
-static struct devlink_resource *
-devlink_resource_find(struct devlink *devlink,
- struct devlink_resource *resource, u64 resource_id)
-{
- struct list_head *resource_list;
-
- if (resource)
- resource_list = &resource->resource_list;
- else
- resource_list = &devlink->resource_list;
-
- list_for_each_entry(resource, resource_list, list) {
- struct devlink_resource *child_resource;
-
- if (resource->id == resource_id)
- return resource;
-
- child_resource = devlink_resource_find(devlink, resource,
- resource_id);
- if (child_resource)
- return child_resource;
- }
- return NULL;
-}
-
-static void
-devlink_resource_validate_children(struct devlink_resource *resource)
-{
- struct devlink_resource *child_resource;
- bool size_valid = true;
- u64 parts_size = 0;
-
- if (list_empty(&resource->resource_list))
- goto out;
-
- list_for_each_entry(child_resource, &resource->resource_list, list)
- parts_size += child_resource->size_new;
-
- if (parts_size > resource->size_new)
- size_valid = false;
-out:
- resource->size_valid = size_valid;
-}
-
-static int
-devlink_resource_validate_size(struct devlink_resource *resource, u64 size,
- struct netlink_ext_ack *extack)
-{
- u64 reminder;
- int err = 0;
-
- if (size > resource->size_params.size_max) {
- NL_SET_ERR_MSG(extack, "Size larger than maximum");
- err = -EINVAL;
- }
-
- if (size < resource->size_params.size_min) {
- NL_SET_ERR_MSG(extack, "Size smaller than minimum");
- err = -EINVAL;
- }
-
- div64_u64_rem(size, resource->size_params.size_granularity, &reminder);
- if (reminder) {
- NL_SET_ERR_MSG(extack, "Wrong granularity");
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_resource *resource;
- u64 resource_id;
- u64 size;
- int err;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_RESOURCE_ID) ||
- GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_RESOURCE_SIZE))
- return -EINVAL;
- resource_id = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_ID]);
-
- resource = devlink_resource_find(devlink, NULL, resource_id);
- if (!resource)
- return -EINVAL;
-
- size = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_SIZE]);
- err = devlink_resource_validate_size(resource, size, info->extack);
- if (err)
- return err;
-
- resource->size_new = size;
- devlink_resource_validate_children(resource);
- if (resource->parent)
- devlink_resource_validate_children(resource->parent);
- return 0;
-}
-
-static int
-devlink_resource_size_params_put(struct devlink_resource *resource,
- struct sk_buff *skb)
-{
- struct devlink_resource_size_params *size_params;
-
- size_params = &resource->size_params;
- if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
- size_params->size_granularity, DEVLINK_ATTR_PAD) ||
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
- size_params->size_max, DEVLINK_ATTR_PAD) ||
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
- size_params->size_min, DEVLINK_ATTR_PAD) ||
- nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
- return -EMSGSIZE;
- return 0;
-}
-
-static int devlink_resource_occ_put(struct devlink_resource *resource,
- struct sk_buff *skb)
-{
- if (!resource->occ_get)
- return 0;
- return nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
- resource->occ_get(resource->occ_get_priv),
- DEVLINK_ATTR_PAD);
-}
-
-static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
- struct devlink_resource *resource)
-{
- struct devlink_resource *child_resource;
- struct nlattr *child_resource_attr;
- struct nlattr *resource_attr;
-
- resource_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_RESOURCE);
- if (!resource_attr)
- return -EMSGSIZE;
-
- if (nla_put_string(skb, DEVLINK_ATTR_RESOURCE_NAME, resource->name) ||
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE, resource->size,
- DEVLINK_ATTR_PAD) ||
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_ID, resource->id,
- DEVLINK_ATTR_PAD))
- goto nla_put_failure;
- if (resource->size != resource->size_new &&
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
- resource->size_new, DEVLINK_ATTR_PAD))
- goto nla_put_failure;
- if (devlink_resource_occ_put(resource, skb))
- goto nla_put_failure;
- if (devlink_resource_size_params_put(resource, skb))
- goto nla_put_failure;
- if (list_empty(&resource->resource_list))
- goto out;
-
- if (nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_SIZE_VALID,
- resource->size_valid))
- goto nla_put_failure;
-
- child_resource_attr = nla_nest_start_noflag(skb,
- DEVLINK_ATTR_RESOURCE_LIST);
- if (!child_resource_attr)
- goto nla_put_failure;
-
- list_for_each_entry(child_resource, &resource->resource_list, list) {
- if (devlink_resource_put(devlink, skb, child_resource))
- goto resource_put_failure;
- }
-
- nla_nest_end(skb, child_resource_attr);
-out:
- nla_nest_end(skb, resource_attr);
- return 0;
-
-resource_put_failure:
- nla_nest_cancel(skb, child_resource_attr);
-nla_put_failure:
- nla_nest_cancel(skb, resource_attr);
- return -EMSGSIZE;
-}
-
-static int devlink_resource_fill(struct genl_info *info,
- enum devlink_command cmd, int flags)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_resource *resource;
- struct nlattr *resources_attr;
- struct sk_buff *skb = NULL;
- struct nlmsghdr *nlh;
- bool incomplete;
- void *hdr;
- int i;
- int err;
-
- resource = list_first_entry(&devlink->resource_list,
- struct devlink_resource, list);
-start_again:
- err = devlink_dpipe_send_and_alloc_skb(&skb, info);
- if (err)
- return err;
-
- hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
- &devlink_nl_family, NLM_F_MULTI, cmd);
- if (!hdr) {
- nlmsg_free(skb);
- return -EMSGSIZE;
- }
-
- if (devlink_nl_put_handle(skb, devlink))
- goto nla_put_failure;
-
- resources_attr = nla_nest_start_noflag(skb,
- DEVLINK_ATTR_RESOURCE_LIST);
- if (!resources_attr)
- goto nla_put_failure;
-
- incomplete = false;
- i = 0;
- list_for_each_entry_from(resource, &devlink->resource_list, list) {
- err = devlink_resource_put(devlink, skb, resource);
- if (err) {
- if (!i)
- goto err_resource_put;
- incomplete = true;
- break;
- }
- i++;
- }
- nla_nest_end(skb, resources_attr);
- genlmsg_end(skb, hdr);
- if (incomplete)
- goto start_again;
-send_done:
- nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
- NLMSG_DONE, 0, flags | NLM_F_MULTI);
- if (!nlh) {
- err = devlink_dpipe_send_and_alloc_skb(&skb, info);
- if (err)
- return err;
- goto send_done;
- }
- return genlmsg_reply(skb, info);
-
-nla_put_failure:
- err = -EMSGSIZE;
-err_resource_put:
- nlmsg_free(skb);
- return err;
-}
-
-static int devlink_nl_cmd_resource_dump(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
-
- if (list_empty(&devlink->resource_list))
- return -EOPNOTSUPP;
-
- return devlink_resource_fill(info, DEVLINK_CMD_RESOURCE_DUMP, 0);
-}
-
-int devlink_resources_validate(struct devlink *devlink,
- struct devlink_resource *resource,
- struct genl_info *info)
-{
- struct list_head *resource_list;
- int err = 0;
-
- if (resource)
- resource_list = &resource->resource_list;
- else
- resource_list = &devlink->resource_list;
-
- list_for_each_entry(resource, resource_list, list) {
- if (!resource->size_valid)
- return -EINVAL;
- err = devlink_resources_validate(devlink, resource, info);
- if (err)
- return err;
- }
- return err;
-}
-
-static const struct devlink_param devlink_param_generic[] = {
- {
- .id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
- .name = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME,
- .type = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- .name = DEVLINK_PARAM_GENERIC_MAX_MACS_NAME,
- .type = DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
- .name = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME,
- .type = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
- .name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME,
- .type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
- .name = DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME,
- .type = DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
- .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME,
- .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
- .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME,
- .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
- .name = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME,
- .type = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
- .name = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME,
- .type = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- .name = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME,
- .type = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET,
- .name = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME,
- .type = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
- .name = DEVLINK_PARAM_GENERIC_ENABLE_ETH_NAME,
- .type = DEVLINK_PARAM_GENERIC_ENABLE_ETH_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
- .name = DEVLINK_PARAM_GENERIC_ENABLE_RDMA_NAME,
- .type = DEVLINK_PARAM_GENERIC_ENABLE_RDMA_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
- .name = DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME,
- .type = DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
- .name = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME,
- .type = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
- .name = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME,
- .type = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE,
- },
- {
- .id = DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
- .name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME,
- .type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE,
- },
-};
-
-static int devlink_param_generic_verify(const struct devlink_param *param)
-{
- /* verify it match generic parameter by id and name */
- if (param->id > DEVLINK_PARAM_GENERIC_ID_MAX)
- return -EINVAL;
- if (strcmp(param->name, devlink_param_generic[param->id].name))
- return -ENOENT;
-
- WARN_ON(param->type != devlink_param_generic[param->id].type);
-
- return 0;
-}
-
-static int devlink_param_driver_verify(const struct devlink_param *param)
-{
- int i;
-
- if (param->id <= DEVLINK_PARAM_GENERIC_ID_MAX)
- return -EINVAL;
- /* verify no such name in generic params */
- for (i = 0; i <= DEVLINK_PARAM_GENERIC_ID_MAX; i++)
- if (!strcmp(param->name, devlink_param_generic[i].name))
- return -EEXIST;
-
- return 0;
-}
-
-static struct devlink_param_item *
-devlink_param_find_by_name(struct xarray *params, const char *param_name)
-{
- struct devlink_param_item *param_item;
- unsigned long param_id;
-
- xa_for_each(params, param_id, param_item) {
- if (!strcmp(param_item->param->name, param_name))
- return param_item;
- }
- return NULL;
-}
-
-static struct devlink_param_item *
-devlink_param_find_by_id(struct xarray *params, u32 param_id)
-{
- return xa_load(params, param_id);
-}
-
-static bool
-devlink_param_cmode_is_supported(const struct devlink_param *param,
- enum devlink_param_cmode cmode)
-{
- return test_bit(cmode, &param->supported_cmodes);
-}
-
-static int devlink_param_get(struct devlink *devlink,
- const struct devlink_param *param,
- struct devlink_param_gset_ctx *ctx)
-{
- if (!param->get)
- return -EOPNOTSUPP;
- return param->get(devlink, param->id, ctx);
-}
-
-static int devlink_param_set(struct devlink *devlink,
- const struct devlink_param *param,
- struct devlink_param_gset_ctx *ctx)
-{
- if (!param->set)
- return -EOPNOTSUPP;
- return param->set(devlink, param->id, ctx);
-}
-
-static int
-devlink_param_type_to_nla_type(enum devlink_param_type param_type)
-{
- switch (param_type) {
- case DEVLINK_PARAM_TYPE_U8:
- return NLA_U8;
- case DEVLINK_PARAM_TYPE_U16:
- return NLA_U16;
- case DEVLINK_PARAM_TYPE_U32:
- return NLA_U32;
- case DEVLINK_PARAM_TYPE_STRING:
- return NLA_STRING;
- case DEVLINK_PARAM_TYPE_BOOL:
- return NLA_FLAG;
- default:
- return -EINVAL;
- }
-}
-
-static int
-devlink_nl_param_value_fill_one(struct sk_buff *msg,
- enum devlink_param_type type,
- enum devlink_param_cmode cmode,
- union devlink_param_value val)
-{
- struct nlattr *param_value_attr;
-
- param_value_attr = nla_nest_start_noflag(msg,
- DEVLINK_ATTR_PARAM_VALUE);
- if (!param_value_attr)
- goto nla_put_failure;
-
- if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_CMODE, cmode))
- goto value_nest_cancel;
-
- switch (type) {
- case DEVLINK_PARAM_TYPE_U8:
- if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu8))
- goto value_nest_cancel;
- break;
- case DEVLINK_PARAM_TYPE_U16:
- if (nla_put_u16(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu16))
- goto value_nest_cancel;
- break;
- case DEVLINK_PARAM_TYPE_U32:
- if (nla_put_u32(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu32))
- goto value_nest_cancel;
- break;
- case DEVLINK_PARAM_TYPE_STRING:
- if (nla_put_string(msg, DEVLINK_ATTR_PARAM_VALUE_DATA,
- val.vstr))
- goto value_nest_cancel;
- break;
- case DEVLINK_PARAM_TYPE_BOOL:
- if (val.vbool &&
- nla_put_flag(msg, DEVLINK_ATTR_PARAM_VALUE_DATA))
- goto value_nest_cancel;
- break;
- }
-
- nla_nest_end(msg, param_value_attr);
- return 0;
-
-value_nest_cancel:
- nla_nest_cancel(msg, param_value_attr);
-nla_put_failure:
- return -EMSGSIZE;
-}
-
-static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
- unsigned int port_index,
- struct devlink_param_item *param_item,
- enum devlink_command cmd,
- u32 portid, u32 seq, int flags)
-{
- union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1];
- bool param_value_set[DEVLINK_PARAM_CMODE_MAX + 1] = {};
- const struct devlink_param *param = param_item->param;
- struct devlink_param_gset_ctx ctx;
- struct nlattr *param_values_list;
- struct nlattr *param_attr;
- int nla_type;
- void *hdr;
- int err;
- int i;
-
- /* Get value from driver part to driverinit configuration mode */
- for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
- if (!devlink_param_cmode_is_supported(param, i))
- continue;
- if (i == DEVLINK_PARAM_CMODE_DRIVERINIT) {
- if (param_item->driverinit_value_new_valid)
- param_value[i] = param_item->driverinit_value_new;
- else if (param_item->driverinit_value_valid)
- param_value[i] = param_item->driverinit_value;
- else
- return -EOPNOTSUPP;
- } else {
- ctx.cmode = i;
- err = devlink_param_get(devlink, param, &ctx);
- if (err)
- return err;
- param_value[i] = ctx.val;
- }
- param_value_set[i] = true;
- }
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto genlmsg_cancel;
-
- if (cmd == DEVLINK_CMD_PORT_PARAM_GET ||
- cmd == DEVLINK_CMD_PORT_PARAM_NEW ||
- cmd == DEVLINK_CMD_PORT_PARAM_DEL)
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, port_index))
- goto genlmsg_cancel;
-
- param_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PARAM);
- if (!param_attr)
- goto genlmsg_cancel;
- if (nla_put_string(msg, DEVLINK_ATTR_PARAM_NAME, param->name))
- goto param_nest_cancel;
- if (param->generic && nla_put_flag(msg, DEVLINK_ATTR_PARAM_GENERIC))
- goto param_nest_cancel;
-
- nla_type = devlink_param_type_to_nla_type(param->type);
- if (nla_type < 0)
- goto param_nest_cancel;
- if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_TYPE, nla_type))
- goto param_nest_cancel;
-
- param_values_list = nla_nest_start_noflag(msg,
- DEVLINK_ATTR_PARAM_VALUES_LIST);
- if (!param_values_list)
- goto param_nest_cancel;
-
- for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
- if (!param_value_set[i])
- continue;
- err = devlink_nl_param_value_fill_one(msg, param->type,
- i, param_value[i]);
- if (err)
- goto values_list_nest_cancel;
- }
-
- nla_nest_end(msg, param_values_list);
- nla_nest_end(msg, param_attr);
- genlmsg_end(msg, hdr);
- return 0;
-
-values_list_nest_cancel:
- nla_nest_end(msg, param_values_list);
-param_nest_cancel:
- nla_nest_cancel(msg, param_attr);
-genlmsg_cancel:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-static void devlink_param_notify(struct devlink *devlink,
- unsigned int port_index,
- struct devlink_param_item *param_item,
- enum devlink_command cmd)
-{
- struct sk_buff *msg;
- int err;
-
- WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL &&
- cmd != DEVLINK_CMD_PORT_PARAM_NEW &&
- cmd != DEVLINK_CMD_PORT_PARAM_DEL);
-
- /* devlink_notify_register() / devlink_notify_unregister()
- * will replay the notifications if the params are added/removed
- * outside of the lifetime of the instance.
- */
- if (!devl_is_registered(devlink))
- return;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return;
- err = devlink_nl_param_fill(msg, devlink, port_index, param_item, cmd,
- 0, 0, 0);
- if (err) {
- nlmsg_free(msg);
- return;
- }
-
- genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
- msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
-}
-
-static int devlink_nl_param_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb,
- int flags)
-{
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct devlink_param_item *param_item;
- unsigned long param_id;
- int err = 0;
-
- xa_for_each_start(&devlink->params, param_id, param_item, state->idx) {
- err = devlink_nl_param_fill(msg, devlink, 0, param_item,
- DEVLINK_CMD_PARAM_GET,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, flags);
- if (err == -EOPNOTSUPP) {
- err = 0;
- } else if (err) {
- state->idx = param_id;
- break;
- }
- }
-
- return err;
-}
-
-int devlink_nl_param_get_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- return devlink_nl_dumpit(skb, cb, devlink_nl_param_get_dump_one);
-}
-
-static int
-devlink_param_type_get_from_info(struct genl_info *info,
- enum devlink_param_type *param_type)
-{
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_TYPE))
- return -EINVAL;
-
- switch (nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_TYPE])) {
- case NLA_U8:
- *param_type = DEVLINK_PARAM_TYPE_U8;
- break;
- case NLA_U16:
- *param_type = DEVLINK_PARAM_TYPE_U16;
- break;
- case NLA_U32:
- *param_type = DEVLINK_PARAM_TYPE_U32;
- break;
- case NLA_STRING:
- *param_type = DEVLINK_PARAM_TYPE_STRING;
- break;
- case NLA_FLAG:
- *param_type = DEVLINK_PARAM_TYPE_BOOL;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int
-devlink_param_value_get_from_info(const struct devlink_param *param,
- struct genl_info *info,
- union devlink_param_value *value)
-{
- struct nlattr *param_data;
- int len;
-
- param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA];
-
- if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data)
- return -EINVAL;
-
- switch (param->type) {
- case DEVLINK_PARAM_TYPE_U8:
- if (nla_len(param_data) != sizeof(u8))
- return -EINVAL;
- value->vu8 = nla_get_u8(param_data);
- break;
- case DEVLINK_PARAM_TYPE_U16:
- if (nla_len(param_data) != sizeof(u16))
- return -EINVAL;
- value->vu16 = nla_get_u16(param_data);
- break;
- case DEVLINK_PARAM_TYPE_U32:
- if (nla_len(param_data) != sizeof(u32))
- return -EINVAL;
- value->vu32 = nla_get_u32(param_data);
- break;
- case DEVLINK_PARAM_TYPE_STRING:
- len = strnlen(nla_data(param_data), nla_len(param_data));
- if (len == nla_len(param_data) ||
- len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
- return -EINVAL;
- strcpy(value->vstr, nla_data(param_data));
- break;
- case DEVLINK_PARAM_TYPE_BOOL:
- if (param_data && nla_len(param_data))
- return -EINVAL;
- value->vbool = nla_get_flag(param_data);
- break;
- }
- return 0;
-}
-
-static struct devlink_param_item *
-devlink_param_get_from_info(struct xarray *params, struct genl_info *info)
-{
- char *param_name;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_NAME))
- return NULL;
-
- param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]);
- return devlink_param_find_by_name(params, param_name);
-}
-
-int devlink_nl_param_get_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_param_item *param_item;
- struct sk_buff *msg;
- int err;
-
- param_item = devlink_param_get_from_info(&devlink->params, info);
- if (!param_item)
- return -EINVAL;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_param_fill(msg, devlink, 0, param_item,
- DEVLINK_CMD_PARAM_GET,
- info->snd_portid, info->snd_seq, 0);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
- unsigned int port_index,
- struct xarray *params,
- struct genl_info *info,
- enum devlink_command cmd)
-{
- enum devlink_param_type param_type;
- struct devlink_param_gset_ctx ctx;
- enum devlink_param_cmode cmode;
- struct devlink_param_item *param_item;
- const struct devlink_param *param;
- union devlink_param_value value;
- int err = 0;
-
- param_item = devlink_param_get_from_info(params, info);
- if (!param_item)
- return -EINVAL;
- param = param_item->param;
- err = devlink_param_type_get_from_info(info, &param_type);
- if (err)
- return err;
- if (param_type != param->type)
- return -EINVAL;
- err = devlink_param_value_get_from_info(param, info, &value);
- if (err)
- return err;
- if (param->validate) {
- err = param->validate(devlink, param->id, value, info->extack);
- if (err)
- return err;
- }
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_VALUE_CMODE))
- return -EINVAL;
- cmode = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]);
- if (!devlink_param_cmode_is_supported(param, cmode))
- return -EOPNOTSUPP;
-
- if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
- param_item->driverinit_value_new = value;
- param_item->driverinit_value_new_valid = true;
- } else {
- if (!param->set)
- return -EOPNOTSUPP;
- ctx.val = value;
- ctx.cmode = cmode;
- err = devlink_param_set(devlink, param, &ctx);
- if (err)
- return err;
- }
-
- devlink_param_notify(devlink, port_index, param_item, cmd);
- return 0;
-}
-
-static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
-
- return __devlink_nl_cmd_param_set_doit(devlink, 0, &devlink->params,
- info, DEVLINK_CMD_PARAM_NEW);
-}
-
-static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb)
-{
- NL_SET_ERR_MSG(cb->extack, "Port params are not supported");
- return msg->len;
-}
-
-static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- NL_SET_ERR_MSG(info->extack, "Port params are not supported");
- return -EINVAL;
-}
-
-static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- NL_SET_ERR_MSG(info->extack, "Port params are not supported");
- return -EINVAL;
-}
-
-static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg,
- struct devlink *devlink,
- struct devlink_snapshot *snapshot)
-{
- struct nlattr *snap_attr;
- int err;
-
- snap_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_SNAPSHOT);
- if (!snap_attr)
- return -EINVAL;
-
- err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID, snapshot->id);
- if (err)
- goto nla_put_failure;
-
- nla_nest_end(msg, snap_attr);
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(msg, snap_attr);
- return err;
-}
-
-static int devlink_nl_region_snapshots_id_put(struct sk_buff *msg,
- struct devlink *devlink,
- struct devlink_region *region)
-{
- struct devlink_snapshot *snapshot;
- struct nlattr *snapshots_attr;
- int err;
-
- snapshots_attr = nla_nest_start_noflag(msg,
- DEVLINK_ATTR_REGION_SNAPSHOTS);
- if (!snapshots_attr)
- return -EINVAL;
-
- list_for_each_entry(snapshot, &region->snapshot_list, list) {
- err = devlink_nl_region_snapshot_id_put(msg, devlink, snapshot);
- if (err)
- goto nla_put_failure;
- }
-
- nla_nest_end(msg, snapshots_attr);
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(msg, snapshots_attr);
- return err;
-}
-
-static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink,
- enum devlink_command cmd, u32 portid,
- u32 seq, int flags,
- struct devlink_region *region)
-{
- void *hdr;
- int err;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- err = devlink_nl_put_handle(msg, devlink);
- if (err)
- goto nla_put_failure;
-
- if (region->port) {
- err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
- region->port->index);
- if (err)
- goto nla_put_failure;
- }
-
- err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->ops->name);
- if (err)
- goto nla_put_failure;
-
- err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
- region->size,
- DEVLINK_ATTR_PAD);
- if (err)
- goto nla_put_failure;
-
- err = nla_put_u32(msg, DEVLINK_ATTR_REGION_MAX_SNAPSHOTS,
- region->max_snapshots);
- if (err)
- goto nla_put_failure;
-
- err = devlink_nl_region_snapshots_id_put(msg, devlink, region);
- if (err)
- goto nla_put_failure;
-
- genlmsg_end(msg, hdr);
- return 0;
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return err;
-}
-
-static struct sk_buff *
-devlink_nl_region_notify_build(struct devlink_region *region,
- struct devlink_snapshot *snapshot,
- enum devlink_command cmd, u32 portid, u32 seq)
-{
- struct devlink *devlink = region->devlink;
- struct sk_buff *msg;
- void *hdr;
- int err;
-
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return ERR_PTR(-ENOMEM);
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, 0, cmd);
- if (!hdr) {
- err = -EMSGSIZE;
- goto out_free_msg;
- }
-
- err = devlink_nl_put_handle(msg, devlink);
- if (err)
- goto out_cancel_msg;
-
- if (region->port) {
- err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
- region->port->index);
- if (err)
- goto out_cancel_msg;
- }
-
- err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME,
- region->ops->name);
- if (err)
- goto out_cancel_msg;
-
- if (snapshot) {
- err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID,
- snapshot->id);
- if (err)
- goto out_cancel_msg;
- } else {
- err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
- region->size, DEVLINK_ATTR_PAD);
- if (err)
- goto out_cancel_msg;
- }
- genlmsg_end(msg, hdr);
-
- return msg;
-
-out_cancel_msg:
- genlmsg_cancel(msg, hdr);
-out_free_msg:
- nlmsg_free(msg);
- return ERR_PTR(err);
-}
-
-static void devlink_nl_region_notify(struct devlink_region *region,
- struct devlink_snapshot *snapshot,
- enum devlink_command cmd)
-{
- struct devlink *devlink = region->devlink;
- struct sk_buff *msg;
-
- WARN_ON(cmd != DEVLINK_CMD_REGION_NEW && cmd != DEVLINK_CMD_REGION_DEL);
- if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
- return;
-
- msg = devlink_nl_region_notify_build(region, snapshot, cmd, 0, 0);
- if (IS_ERR(msg))
- return;
-
- genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
- 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
-}
-
-/**
- * __devlink_snapshot_id_increment - Increment number of snapshots using an id
- * @devlink: devlink instance
- * @id: the snapshot id
- *
- * Track when a new snapshot begins using an id. Load the count for the
- * given id from the snapshot xarray, increment it, and store it back.
- *
- * Called when a new snapshot is created with the given id.
- *
- * The id *must* have been previously allocated by
- * devlink_region_snapshot_id_get().
- *
- * Returns 0 on success, or an error on failure.
- */
-static int __devlink_snapshot_id_increment(struct devlink *devlink, u32 id)
-{
- unsigned long count;
- void *p;
- int err;
-
- xa_lock(&devlink->snapshot_ids);
- p = xa_load(&devlink->snapshot_ids, id);
- if (WARN_ON(!p)) {
- err = -EINVAL;
- goto unlock;
- }
-
- if (WARN_ON(!xa_is_value(p))) {
- err = -EINVAL;
- goto unlock;
- }
-
- count = xa_to_value(p);
- count++;
-
- err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
- GFP_ATOMIC));
-unlock:
- xa_unlock(&devlink->snapshot_ids);
- return err;
-}
-
-/**
- * __devlink_snapshot_id_decrement - Decrease number of snapshots using an id
- * @devlink: devlink instance
- * @id: the snapshot id
- *
- * Track when a snapshot is deleted and stops using an id. Load the count
- * for the given id from the snapshot xarray, decrement it, and store it
- * back.
- *
- * If the count reaches zero, erase this id from the xarray, freeing it
- * up for future re-use by devlink_region_snapshot_id_get().
- *
- * Called when a snapshot using the given id is deleted, and when the
- * initial allocator of the id is finished using it.
- */
-static void __devlink_snapshot_id_decrement(struct devlink *devlink, u32 id)
-{
- unsigned long count;
- void *p;
-
- xa_lock(&devlink->snapshot_ids);
- p = xa_load(&devlink->snapshot_ids, id);
- if (WARN_ON(!p))
- goto unlock;
-
- if (WARN_ON(!xa_is_value(p)))
- goto unlock;
-
- count = xa_to_value(p);
-
- if (count > 1) {
- count--;
- __xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
- GFP_ATOMIC);
- } else {
- /* If this was the last user, we can erase this id */
- __xa_erase(&devlink->snapshot_ids, id);
- }
-unlock:
- xa_unlock(&devlink->snapshot_ids);
-}
-
-/**
- * __devlink_snapshot_id_insert - Insert a specific snapshot ID
- * @devlink: devlink instance
- * @id: the snapshot id
- *
- * Mark the given snapshot id as used by inserting a zero value into the
- * snapshot xarray.
- *
- * This must be called while holding the devlink instance lock. Unlike
- * devlink_snapshot_id_get, the initial reference count is zero, not one.
- * It is expected that the id will immediately be used before
- * releasing the devlink instance lock.
- *
- * Returns zero on success, or an error code if the snapshot id could not
- * be inserted.
- */
-static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
-{
- int err;
-
- xa_lock(&devlink->snapshot_ids);
- if (xa_load(&devlink->snapshot_ids, id)) {
- xa_unlock(&devlink->snapshot_ids);
- return -EEXIST;
- }
- err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
- GFP_ATOMIC));
- xa_unlock(&devlink->snapshot_ids);
- return err;
-}
-
-/**
- * __devlink_region_snapshot_id_get - get snapshot ID
- * @devlink: devlink instance
- * @id: storage to return snapshot id
- *
- * Allocates a new snapshot id. Returns zero on success, or a negative
- * error on failure. Must be called while holding the devlink instance
- * lock.
- *
- * Snapshot IDs are tracked using an xarray which stores the number of
- * users of the snapshot id.
- *
- * Note that the caller of this function counts as a 'user', in order to
- * avoid race conditions. The caller must release its hold on the
- * snapshot by using devlink_region_snapshot_id_put.
- */
-static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
-{
- return xa_alloc(&devlink->snapshot_ids, id, xa_mk_value(1),
- xa_limit_32b, GFP_KERNEL);
-}
-
-/**
- * __devlink_region_snapshot_create - create a new snapshot
- * This will add a new snapshot of a region. The snapshot
- * will be stored on the region struct and can be accessed
- * from devlink. This is useful for future analyses of snapshots.
- * Multiple snapshots can be created on a region.
- * The @snapshot_id should be obtained using the getter function.
- *
- * Must be called only while holding the region snapshot lock.
- *
- * @region: devlink region of the snapshot
- * @data: snapshot data
- * @snapshot_id: snapshot id to be created
- */
-static int
-__devlink_region_snapshot_create(struct devlink_region *region,
- u8 *data, u32 snapshot_id)
-{
- struct devlink *devlink = region->devlink;
- struct devlink_snapshot *snapshot;
- int err;
-
- lockdep_assert_held(&region->snapshot_lock);
-
- /* check if region can hold one more snapshot */
- if (region->cur_snapshots == region->max_snapshots)
- return -ENOSPC;
-
- if (devlink_region_snapshot_get_by_id(region, snapshot_id))
- return -EEXIST;
-
- snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
- if (!snapshot)
- return -ENOMEM;
-
- err = __devlink_snapshot_id_increment(devlink, snapshot_id);
- if (err)
- goto err_snapshot_id_increment;
-
- snapshot->id = snapshot_id;
- snapshot->region = region;
- snapshot->data = data;
-
- list_add_tail(&snapshot->list, &region->snapshot_list);
-
- region->cur_snapshots++;
-
- devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_NEW);
- return 0;
-
-err_snapshot_id_increment:
- kfree(snapshot);
- return err;
-}
-
-static void devlink_region_snapshot_del(struct devlink_region *region,
- struct devlink_snapshot *snapshot)
-{
- struct devlink *devlink = region->devlink;
-
- lockdep_assert_held(&region->snapshot_lock);
-
- devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL);
- region->cur_snapshots--;
- list_del(&snapshot->list);
- region->ops->destructor(snapshot->data);
- __devlink_snapshot_id_decrement(devlink, snapshot->id);
- kfree(snapshot);
-}
-
-int devlink_nl_region_get_doit(struct sk_buff *skb, struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_port *port = NULL;
- struct devlink_region *region;
- const char *region_name;
- struct sk_buff *msg;
- unsigned int index;
- int err;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME))
- return -EINVAL;
-
- if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
- index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
-
- port = devlink_port_get_by_index(devlink, index);
- if (!port)
- return -ENODEV;
- }
-
- region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
- if (port)
- region = devlink_port_region_get_by_name(port, region_name);
- else
- region = devlink_region_get_by_name(devlink, region_name);
-
- if (!region)
- return -EINVAL;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_region_fill(msg, devlink, DEVLINK_CMD_REGION_GET,
- info->snd_portid, info->snd_seq, 0,
- region);
- if (err) {
- nlmsg_free(msg);
- return err;
- }
-
- return genlmsg_reply(msg, info);
-}
-
-static int devlink_nl_cmd_region_get_port_dumpit(struct sk_buff *msg,
- struct netlink_callback *cb,
- struct devlink_port *port,
- int *idx, int start, int flags)
-{
- struct devlink_region *region;
- int err = 0;
-
- list_for_each_entry(region, &port->region_list, list) {
- if (*idx < start) {
- (*idx)++;
- continue;
- }
- err = devlink_nl_region_fill(msg, port->devlink,
- DEVLINK_CMD_REGION_GET,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- flags, region);
- if (err)
- goto out;
- (*idx)++;
- }
-
-out:
- return err;
-}
-
-static int devlink_nl_region_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb,
- int flags)
-{
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct devlink_region *region;
- struct devlink_port *port;
- unsigned long port_index;
- int idx = 0;
- int err;
-
- list_for_each_entry(region, &devlink->region_list, list) {
- if (idx < state->idx) {
- idx++;
- continue;
- }
- err = devlink_nl_region_fill(msg, devlink,
- DEVLINK_CMD_REGION_GET,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, flags,
- region);
- if (err) {
- state->idx = idx;
- return err;
- }
- idx++;
- }
-
- xa_for_each(&devlink->ports, port_index, port) {
- err = devlink_nl_cmd_region_get_port_dumpit(msg, cb, port, &idx,
- state->idx, flags);
- if (err) {
- state->idx = idx;
- return err;
- }
- }
-
- return 0;
-}
-
-int devlink_nl_region_get_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- return devlink_nl_dumpit(skb, cb, devlink_nl_region_get_dump_one);
-}
-
-static int devlink_nl_cmd_region_del(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_snapshot *snapshot;
- struct devlink_port *port = NULL;
- struct devlink_region *region;
- const char *region_name;
- unsigned int index;
- u32 snapshot_id;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME) ||
- GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_SNAPSHOT_ID))
- return -EINVAL;
-
- region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
- snapshot_id = nla_get_u32(info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
-
- if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
- index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
-
- port = devlink_port_get_by_index(devlink, index);
- if (!port)
- return -ENODEV;
- }
-
- if (port)
- region = devlink_port_region_get_by_name(port, region_name);
- else
- region = devlink_region_get_by_name(devlink, region_name);
-
- if (!region)
- return -EINVAL;
-
- mutex_lock(&region->snapshot_lock);
- snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
- if (!snapshot) {
- mutex_unlock(&region->snapshot_lock);
- return -EINVAL;
- }
-
- devlink_region_snapshot_del(region, snapshot);
- mutex_unlock(&region->snapshot_lock);
- return 0;
-}
-
-static int
-devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
-{
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_snapshot *snapshot;
- struct devlink_port *port = NULL;
- struct nlattr *snapshot_id_attr;
- struct devlink_region *region;
- const char *region_name;
- unsigned int index;
- u32 snapshot_id;
- u8 *data;
- int err;
-
- if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME)) {
- NL_SET_ERR_MSG(info->extack, "No region name provided");
- return -EINVAL;
- }
-
- region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
-
- if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
- index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
-
- port = devlink_port_get_by_index(devlink, index);
- if (!port)
- return -ENODEV;
- }
-
- if (port)
- region = devlink_port_region_get_by_name(port, region_name);
- else
- region = devlink_region_get_by_name(devlink, region_name);
-
- if (!region) {
- NL_SET_ERR_MSG(info->extack, "The requested region does not exist");
- return -EINVAL;
- }
-
- if (!region->ops->snapshot) {
- NL_SET_ERR_MSG(info->extack, "The requested region does not support taking an immediate snapshot");
- return -EOPNOTSUPP;
- }
-
- mutex_lock(&region->snapshot_lock);
-
- if (region->cur_snapshots == region->max_snapshots) {
- NL_SET_ERR_MSG(info->extack, "The region has reached the maximum number of stored snapshots");
- err = -ENOSPC;
- goto unlock;
- }
-
- snapshot_id_attr = info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID];
- if (snapshot_id_attr) {
- snapshot_id = nla_get_u32(snapshot_id_attr);
-
- if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
- NL_SET_ERR_MSG(info->extack, "The requested snapshot id is already in use");
- err = -EEXIST;
- goto unlock;
- }
-
- err = __devlink_snapshot_id_insert(devlink, snapshot_id);
- if (err)
- goto unlock;
- } else {
- err = __devlink_region_snapshot_id_get(devlink, &snapshot_id);
- if (err) {
- NL_SET_ERR_MSG(info->extack, "Failed to allocate a new snapshot id");
- goto unlock;
- }
- }
-
- if (port)
- err = region->port_ops->snapshot(port, region->port_ops,
- info->extack, &data);
- else
- err = region->ops->snapshot(devlink, region->ops,
- info->extack, &data);
- if (err)
- goto err_snapshot_capture;
-
- err = __devlink_region_snapshot_create(region, data, snapshot_id);
- if (err)
- goto err_snapshot_create;
-
- if (!snapshot_id_attr) {
- struct sk_buff *msg;
-
- snapshot = devlink_region_snapshot_get_by_id(region,
- snapshot_id);
- if (WARN_ON(!snapshot)) {
- err = -EINVAL;
- goto unlock;
- }
-
- msg = devlink_nl_region_notify_build(region, snapshot,
- DEVLINK_CMD_REGION_NEW,
- info->snd_portid,
- info->snd_seq);
- err = PTR_ERR_OR_ZERO(msg);
- if (err)
- goto err_notify;
-
- err = genlmsg_reply(msg, info);
- if (err)
- goto err_notify;
- }
-
- mutex_unlock(&region->snapshot_lock);
- return 0;
-
-err_snapshot_create:
- region->ops->destructor(data);
-err_snapshot_capture:
- __devlink_snapshot_id_decrement(devlink, snapshot_id);
- mutex_unlock(&region->snapshot_lock);
- return err;
-
-err_notify:
- devlink_region_snapshot_del(region, snapshot);
-unlock:
- mutex_unlock(&region->snapshot_lock);
- return err;
-}
-
-static int devlink_nl_cmd_region_read_chunk_fill(struct sk_buff *msg,
- u8 *chunk, u32 chunk_size,
- u64 addr)
-{
- struct nlattr *chunk_attr;
- int err;
-
- chunk_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_CHUNK);
- if (!chunk_attr)
- return -EINVAL;
-
- err = nla_put(msg, DEVLINK_ATTR_REGION_CHUNK_DATA, chunk_size, chunk);
- if (err)
- goto nla_put_failure;
-
- err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_CHUNK_ADDR, addr,
- DEVLINK_ATTR_PAD);
- if (err)
- goto nla_put_failure;
-
- nla_nest_end(msg, chunk_attr);
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(msg, chunk_attr);
- return err;
-}
-
-#define DEVLINK_REGION_READ_CHUNK_SIZE 256
-
-typedef int devlink_chunk_fill_t(void *cb_priv, u8 *chunk, u32 chunk_size,
- u64 curr_offset,
- struct netlink_ext_ack *extack);
-
-static int
-devlink_nl_region_read_fill(struct sk_buff *skb, devlink_chunk_fill_t *cb,
- void *cb_priv, u64 start_offset, u64 end_offset,
- u64 *new_offset, struct netlink_ext_ack *extack)
-{
- u64 curr_offset = start_offset;
- int err = 0;
- u8 *data;
-
- /* Allocate and re-use a single buffer */
- data = kmalloc(DEVLINK_REGION_READ_CHUNK_SIZE, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- *new_offset = start_offset;
-
- while (curr_offset < end_offset) {
- u32 data_size;
-
- data_size = min_t(u32, end_offset - curr_offset,
- DEVLINK_REGION_READ_CHUNK_SIZE);
-
- err = cb(cb_priv, data, data_size, curr_offset, extack);
- if (err)
- break;
-
- err = devlink_nl_cmd_region_read_chunk_fill(skb, data, data_size, curr_offset);
- if (err)
- break;
-
- curr_offset += data_size;
- }
- *new_offset = curr_offset;
-
- kfree(data);
-
- return err;
-}
-
-static int
-devlink_region_snapshot_fill(void *cb_priv, u8 *chunk, u32 chunk_size,
- u64 curr_offset,
- struct netlink_ext_ack __always_unused *extack)
-{
- struct devlink_snapshot *snapshot = cb_priv;
-
- memcpy(chunk, &snapshot->data[curr_offset], chunk_size);
-
- return 0;
-}
-
-static int
-devlink_region_port_direct_fill(void *cb_priv, u8 *chunk, u32 chunk_size,
- u64 curr_offset, struct netlink_ext_ack *extack)
-{
- struct devlink_region *region = cb_priv;
-
- return region->port_ops->read(region->port, region->port_ops, extack,
- curr_offset, chunk_size, chunk);
-}
-
-static int
-devlink_region_direct_fill(void *cb_priv, u8 *chunk, u32 chunk_size,
- u64 curr_offset, struct netlink_ext_ack *extack)
-{
- struct devlink_region *region = cb_priv;
-
- return region->ops->read(region->devlink, region->ops, extack,
- curr_offset, chunk_size, chunk);
-}
-
-static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- const struct genl_dumpit_info *info = genl_dumpit_info(cb);
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct nlattr *chunks_attr, *region_attr, *snapshot_attr;
- u64 ret_offset, start_offset, end_offset = U64_MAX;
- struct nlattr **attrs = info->info.attrs;
- struct devlink_port *port = NULL;
- devlink_chunk_fill_t *region_cb;
- struct devlink_region *region;
- const char *region_name;
- struct devlink *devlink;
- unsigned int index;
- void *region_cb_priv;
- void *hdr;
- int err;
-
- start_offset = state->start_offset;
-
- devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs);
- if (IS_ERR(devlink))
- return PTR_ERR(devlink);
-
- if (!attrs[DEVLINK_ATTR_REGION_NAME]) {
- NL_SET_ERR_MSG(cb->extack, "No region name provided");
- err = -EINVAL;
- goto out_unlock;
- }
-
- if (attrs[DEVLINK_ATTR_PORT_INDEX]) {
- index = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]);
-
- port = devlink_port_get_by_index(devlink, index);
- if (!port) {
- err = -ENODEV;
- goto out_unlock;
- }
- }
-
- region_attr = attrs[DEVLINK_ATTR_REGION_NAME];
- region_name = nla_data(region_attr);
-
- if (port)
- region = devlink_port_region_get_by_name(port, region_name);
- else
- region = devlink_region_get_by_name(devlink, region_name);
-
- if (!region) {
- NL_SET_ERR_MSG_ATTR(cb->extack, region_attr, "Requested region does not exist");
- err = -EINVAL;
- goto out_unlock;
- }
-
- snapshot_attr = attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID];
- if (!snapshot_attr) {
- if (!nla_get_flag(attrs[DEVLINK_ATTR_REGION_DIRECT])) {
- NL_SET_ERR_MSG(cb->extack, "No snapshot id provided");
- err = -EINVAL;
- goto out_unlock;
- }
-
- if (!region->ops->read) {
- NL_SET_ERR_MSG(cb->extack, "Requested region does not support direct read");
- err = -EOPNOTSUPP;
- goto out_unlock;
- }
-
- if (port)
- region_cb = &devlink_region_port_direct_fill;
- else
- region_cb = &devlink_region_direct_fill;
- region_cb_priv = region;
- } else {
- struct devlink_snapshot *snapshot;
- u32 snapshot_id;
-
- if (nla_get_flag(attrs[DEVLINK_ATTR_REGION_DIRECT])) {
- NL_SET_ERR_MSG_ATTR(cb->extack, snapshot_attr, "Direct region read does not use snapshot");
- err = -EINVAL;
- goto out_unlock;
- }
-
- snapshot_id = nla_get_u32(snapshot_attr);
- snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
- if (!snapshot) {
- NL_SET_ERR_MSG_ATTR(cb->extack, snapshot_attr, "Requested snapshot does not exist");
- err = -EINVAL;
- goto out_unlock;
- }
- region_cb = &devlink_region_snapshot_fill;
- region_cb_priv = snapshot;
- }
-
- if (attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR] &&
- attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]) {
- if (!start_offset)
- start_offset =
- nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
-
- end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
- end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
- }
-
- if (end_offset > region->size)
- end_offset = region->size;
-
- /* return 0 if there is no further data to read */
- if (start_offset == end_offset) {
- err = 0;
- goto out_unlock;
- }
-
- hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
- &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI,
- DEVLINK_CMD_REGION_READ);
- if (!hdr) {
- err = -EMSGSIZE;
- goto out_unlock;
- }
-
- err = devlink_nl_put_handle(skb, devlink);
- if (err)
- goto nla_put_failure;
-
- if (region->port) {
- err = nla_put_u32(skb, DEVLINK_ATTR_PORT_INDEX,
- region->port->index);
- if (err)
- goto nla_put_failure;
- }
-
- err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name);
- if (err)
- goto nla_put_failure;
-
- chunks_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_REGION_CHUNKS);
- if (!chunks_attr) {
- err = -EMSGSIZE;
- goto nla_put_failure;
- }
-
- err = devlink_nl_region_read_fill(skb, region_cb, region_cb_priv,
- start_offset, end_offset, &ret_offset,
- cb->extack);
-
- if (err && err != -EMSGSIZE)
- goto nla_put_failure;
-
- /* Check if there was any progress done to prevent infinite loop */
- if (ret_offset == start_offset) {
- err = -EINVAL;
- goto nla_put_failure;
- }
-
- state->start_offset = ret_offset;
-
- nla_nest_end(skb, chunks_attr);
- genlmsg_end(skb, hdr);
- devl_unlock(devlink);
- devlink_put(devlink);
- return skb->len;
-
-nla_put_failure:
- genlmsg_cancel(skb, hdr);
-out_unlock:
- devl_unlock(devlink);
- devlink_put(devlink);
- return err;
-}
-
-struct devlink_stats {
- u64_stats_t rx_bytes;
- u64_stats_t rx_packets;
- struct u64_stats_sync syncp;
-};
-
-/**
- * struct devlink_trap_policer_item - Packet trap policer attributes.
- * @policer: Immutable packet trap policer attributes.
- * @rate: Rate in packets / sec.
- * @burst: Burst size in packets.
- * @list: trap_policer_list member.
- *
- * Describes packet trap policer attributes. Created by devlink during trap
- * policer registration.
- */
-struct devlink_trap_policer_item {
- const struct devlink_trap_policer *policer;
- u64 rate;
- u64 burst;
- struct list_head list;
-};
-
-/**
- * struct devlink_trap_group_item - Packet trap group attributes.
- * @group: Immutable packet trap group attributes.
- * @policer_item: Associated policer item. Can be NULL.
- * @list: trap_group_list member.
- * @stats: Trap group statistics.
- *
- * Describes packet trap group attributes. Created by devlink during trap
- * group registration.
- */
-struct devlink_trap_group_item {
- const struct devlink_trap_group *group;
- struct devlink_trap_policer_item *policer_item;
- struct list_head list;
- struct devlink_stats __percpu *stats;
-};
-
-/**
- * struct devlink_trap_item - Packet trap attributes.
- * @trap: Immutable packet trap attributes.
- * @group_item: Associated group item.
- * @list: trap_list member.
- * @action: Trap action.
- * @stats: Trap statistics.
- * @priv: Driver private information.
- *
- * Describes both mutable and immutable packet trap attributes. Created by
- * devlink during trap registration and used for all trap related operations.
- */
-struct devlink_trap_item {
- const struct devlink_trap *trap;
- struct devlink_trap_group_item *group_item;
- struct list_head list;
- enum devlink_trap_action action;
- struct devlink_stats __percpu *stats;
- void *priv;
-};
-
-static struct devlink_trap_policer_item *
-devlink_trap_policer_item_lookup(struct devlink *devlink, u32 id)
-{
- struct devlink_trap_policer_item *policer_item;
-
- list_for_each_entry(policer_item, &devlink->trap_policer_list, list) {
- if (policer_item->policer->id == id)
- return policer_item;
- }
-
- return NULL;
-}
-
-static struct devlink_trap_item *
-devlink_trap_item_lookup(struct devlink *devlink, const char *name)
-{
- struct devlink_trap_item *trap_item;
-
- list_for_each_entry(trap_item, &devlink->trap_list, list) {
- if (!strcmp(trap_item->trap->name, name))
- return trap_item;
- }
-
- return NULL;
-}
-
-static struct devlink_trap_item *
-devlink_trap_item_get_from_info(struct devlink *devlink,
- struct genl_info *info)
-{
- struct nlattr *attr;
-
- if (!info->attrs[DEVLINK_ATTR_TRAP_NAME])
- return NULL;
- attr = info->attrs[DEVLINK_ATTR_TRAP_NAME];
-
- return devlink_trap_item_lookup(devlink, nla_data(attr));
-}
-
-static int
-devlink_trap_action_get_from_info(struct genl_info *info,
- enum devlink_trap_action *p_trap_action)
-{
- u8 val;
-
- val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]);
- switch (val) {
- case DEVLINK_TRAP_ACTION_DROP:
- case DEVLINK_TRAP_ACTION_TRAP:
- case DEVLINK_TRAP_ACTION_MIRROR:
- *p_trap_action = val;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int devlink_trap_metadata_put(struct sk_buff *msg,
- const struct devlink_trap *trap)
-{
- struct nlattr *attr;
-
- attr = nla_nest_start(msg, DEVLINK_ATTR_TRAP_METADATA);
- if (!attr)
- return -EMSGSIZE;
-
- if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT) &&
- nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT))
- goto nla_put_failure;
- if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE) &&
- nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE))
- goto nla_put_failure;
-
- nla_nest_end(msg, attr);
-
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(msg, attr);
- return -EMSGSIZE;
-}
-
-static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats,
- struct devlink_stats *stats)
-{
- int i;
-
- memset(stats, 0, sizeof(*stats));
- for_each_possible_cpu(i) {
- struct devlink_stats *cpu_stats;
- u64 rx_packets, rx_bytes;
- unsigned int start;
-
- cpu_stats = per_cpu_ptr(trap_stats, i);
- do {
- start = u64_stats_fetch_begin(&cpu_stats->syncp);
- rx_packets = u64_stats_read(&cpu_stats->rx_packets);
- rx_bytes = u64_stats_read(&cpu_stats->rx_bytes);
- } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
-
- u64_stats_add(&stats->rx_packets, rx_packets);
- u64_stats_add(&stats->rx_bytes, rx_bytes);
- }
-}
-
-static int
-devlink_trap_group_stats_put(struct sk_buff *msg,
- struct devlink_stats __percpu *trap_stats)
-{
- struct devlink_stats stats;
- struct nlattr *attr;
-
- devlink_trap_stats_read(trap_stats, &stats);
-
- attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
- if (!attr)
- return -EMSGSIZE;
-
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
- u64_stats_read(&stats.rx_packets),
- DEVLINK_ATTR_PAD))
- goto nla_put_failure;
-
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
- u64_stats_read(&stats.rx_bytes),
- DEVLINK_ATTR_PAD))
- goto nla_put_failure;
-
- nla_nest_end(msg, attr);
-
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(msg, attr);
- return -EMSGSIZE;
-}
-
-static int devlink_trap_stats_put(struct sk_buff *msg, struct devlink *devlink,
- const struct devlink_trap_item *trap_item)
-{
- struct devlink_stats stats;
- struct nlattr *attr;
- u64 drops = 0;
- int err;
-
- if (devlink->ops->trap_drop_counter_get) {
- err = devlink->ops->trap_drop_counter_get(devlink,
- trap_item->trap,
- &drops);
- if (err)
- return err;
- }
-
- devlink_trap_stats_read(trap_item->stats, &stats);
-
- attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
- if (!attr)
- return -EMSGSIZE;
-
- if (devlink->ops->trap_drop_counter_get &&
- nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
- DEVLINK_ATTR_PAD))
- goto nla_put_failure;
-
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
- u64_stats_read(&stats.rx_packets),
- DEVLINK_ATTR_PAD))
- goto nla_put_failure;
-
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
- u64_stats_read(&stats.rx_bytes),
- DEVLINK_ATTR_PAD))
- goto nla_put_failure;
-
- nla_nest_end(msg, attr);
-
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(msg, attr);
- return -EMSGSIZE;
-}
-
-static int devlink_nl_trap_fill(struct sk_buff *msg, struct devlink *devlink,
- const struct devlink_trap_item *trap_item,
- enum devlink_command cmd, u32 portid, u32 seq,
- int flags)
-{
- struct devlink_trap_group_item *group_item = trap_item->group_item;
- void *hdr;
- int err;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
-
- if (nla_put_string(msg, DEVLINK_ATTR_TRAP_GROUP_NAME,
- group_item->group->name))
- goto nla_put_failure;
-
- if (nla_put_string(msg, DEVLINK_ATTR_TRAP_NAME, trap_item->trap->name))
- goto nla_put_failure;
-
- if (nla_put_u8(msg, DEVLINK_ATTR_TRAP_TYPE, trap_item->trap->type))
- goto nla_put_failure;
-
- if (trap_item->trap->generic &&
- nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC))
- goto nla_put_failure;
-
- if (nla_put_u8(msg, DEVLINK_ATTR_TRAP_ACTION, trap_item->action))
- goto nla_put_failure;
-
- err = devlink_trap_metadata_put(msg, trap_item->trap);
- if (err)
- goto nla_put_failure;
-
- err = devlink_trap_stats_put(msg, devlink, trap_item);
- if (err)
- goto nla_put_failure;
-
- genlmsg_end(msg, hdr);
-
- return 0;
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-int devlink_nl_trap_get_doit(struct sk_buff *skb, struct genl_info *info)
-{
- struct netlink_ext_ack *extack = info->extack;
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_trap_item *trap_item;
- struct sk_buff *msg;
- int err;
-
- if (list_empty(&devlink->trap_list))
- return -EOPNOTSUPP;
-
- trap_item = devlink_trap_item_get_from_info(devlink, info);
- if (!trap_item) {
- NL_SET_ERR_MSG(extack, "Device did not register this trap");
- return -ENOENT;
- }
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_trap_fill(msg, devlink, trap_item,
- DEVLINK_CMD_TRAP_NEW, info->snd_portid,
- info->snd_seq, 0);
- if (err)
- goto err_trap_fill;
-
- return genlmsg_reply(msg, info);
-
-err_trap_fill:
- nlmsg_free(msg);
- return err;
-}
-
-static int devlink_nl_trap_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb, int flags)
-{
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct devlink_trap_item *trap_item;
- int idx = 0;
- int err = 0;
-
- list_for_each_entry(trap_item, &devlink->trap_list, list) {
- if (idx < state->idx) {
- idx++;
- continue;
- }
- err = devlink_nl_trap_fill(msg, devlink, trap_item,
- DEVLINK_CMD_TRAP_NEW,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, flags);
- if (err) {
- state->idx = idx;
- break;
- }
- idx++;
- }
-
- return err;
-}
-
-int devlink_nl_trap_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
-{
- return devlink_nl_dumpit(skb, cb, devlink_nl_trap_get_dump_one);
-}
-
-static int __devlink_trap_action_set(struct devlink *devlink,
- struct devlink_trap_item *trap_item,
- enum devlink_trap_action trap_action,
- struct netlink_ext_ack *extack)
-{
- int err;
-
- if (trap_item->action != trap_action &&
- trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP) {
- NL_SET_ERR_MSG(extack, "Cannot change action of non-drop traps. Skipping");
- return 0;
- }
-
- err = devlink->ops->trap_action_set(devlink, trap_item->trap,
- trap_action, extack);
- if (err)
- return err;
-
- trap_item->action = trap_action;
-
- return 0;
-}
-
-static int devlink_trap_action_set(struct devlink *devlink,
- struct devlink_trap_item *trap_item,
- struct genl_info *info)
-{
- enum devlink_trap_action trap_action;
- int err;
-
- if (!info->attrs[DEVLINK_ATTR_TRAP_ACTION])
- return 0;
-
- err = devlink_trap_action_get_from_info(info, &trap_action);
- if (err) {
- NL_SET_ERR_MSG(info->extack, "Invalid trap action");
- return -EINVAL;
- }
-
- return __devlink_trap_action_set(devlink, trap_item, trap_action,
- info->extack);
-}
-
-static int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct netlink_ext_ack *extack = info->extack;
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_trap_item *trap_item;
-
- if (list_empty(&devlink->trap_list))
- return -EOPNOTSUPP;
-
- trap_item = devlink_trap_item_get_from_info(devlink, info);
- if (!trap_item) {
- NL_SET_ERR_MSG(extack, "Device did not register this trap");
- return -ENOENT;
- }
-
- return devlink_trap_action_set(devlink, trap_item, info);
-}
-
-static struct devlink_trap_group_item *
-devlink_trap_group_item_lookup(struct devlink *devlink, const char *name)
-{
- struct devlink_trap_group_item *group_item;
-
- list_for_each_entry(group_item, &devlink->trap_group_list, list) {
- if (!strcmp(group_item->group->name, name))
- return group_item;
- }
-
- return NULL;
-}
-
-static struct devlink_trap_group_item *
-devlink_trap_group_item_lookup_by_id(struct devlink *devlink, u16 id)
-{
- struct devlink_trap_group_item *group_item;
-
- list_for_each_entry(group_item, &devlink->trap_group_list, list) {
- if (group_item->group->id == id)
- return group_item;
- }
-
- return NULL;
-}
-
-static struct devlink_trap_group_item *
-devlink_trap_group_item_get_from_info(struct devlink *devlink,
- struct genl_info *info)
-{
- char *name;
-
- if (!info->attrs[DEVLINK_ATTR_TRAP_GROUP_NAME])
- return NULL;
- name = nla_data(info->attrs[DEVLINK_ATTR_TRAP_GROUP_NAME]);
-
- return devlink_trap_group_item_lookup(devlink, name);
-}
-
-static int
-devlink_nl_trap_group_fill(struct sk_buff *msg, struct devlink *devlink,
- const struct devlink_trap_group_item *group_item,
- enum devlink_command cmd, u32 portid, u32 seq,
- int flags)
-{
- void *hdr;
- int err;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
-
- if (nla_put_string(msg, DEVLINK_ATTR_TRAP_GROUP_NAME,
- group_item->group->name))
- goto nla_put_failure;
-
- if (group_item->group->generic &&
- nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC))
- goto nla_put_failure;
-
- if (group_item->policer_item &&
- nla_put_u32(msg, DEVLINK_ATTR_TRAP_POLICER_ID,
- group_item->policer_item->policer->id))
- goto nla_put_failure;
-
- err = devlink_trap_group_stats_put(msg, group_item->stats);
- if (err)
- goto nla_put_failure;
-
- genlmsg_end(msg, hdr);
-
- return 0;
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-int devlink_nl_trap_group_get_doit(struct sk_buff *skb, struct genl_info *info)
-{
- struct netlink_ext_ack *extack = info->extack;
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_trap_group_item *group_item;
- struct sk_buff *msg;
- int err;
-
- if (list_empty(&devlink->trap_group_list))
- return -EOPNOTSUPP;
-
- group_item = devlink_trap_group_item_get_from_info(devlink, info);
- if (!group_item) {
- NL_SET_ERR_MSG(extack, "Device did not register this trap group");
- return -ENOENT;
- }
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_trap_group_fill(msg, devlink, group_item,
- DEVLINK_CMD_TRAP_GROUP_NEW,
- info->snd_portid, info->snd_seq, 0);
- if (err)
- goto err_trap_group_fill;
-
- return genlmsg_reply(msg, info);
-
-err_trap_group_fill:
- nlmsg_free(msg);
- return err;
-}
-
-static int devlink_nl_trap_group_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb,
- int flags)
-{
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct devlink_trap_group_item *group_item;
- int idx = 0;
- int err = 0;
-
-
- list_for_each_entry(group_item, &devlink->trap_group_list, list) {
- if (idx < state->idx) {
- idx++;
- continue;
- }
- err = devlink_nl_trap_group_fill(msg, devlink, group_item,
- DEVLINK_CMD_TRAP_GROUP_NEW,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, flags);
- if (err) {
- state->idx = idx;
- break;
- }
- idx++;
- }
-
- return err;
-}
-
-int devlink_nl_trap_group_get_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- return devlink_nl_dumpit(skb, cb, devlink_nl_trap_group_get_dump_one);
-}
-
-static int
-__devlink_trap_group_action_set(struct devlink *devlink,
- struct devlink_trap_group_item *group_item,
- enum devlink_trap_action trap_action,
- struct netlink_ext_ack *extack)
-{
- const char *group_name = group_item->group->name;
- struct devlink_trap_item *trap_item;
- int err;
-
- if (devlink->ops->trap_group_action_set) {
- err = devlink->ops->trap_group_action_set(devlink, group_item->group,
- trap_action, extack);
- if (err)
- return err;
-
- list_for_each_entry(trap_item, &devlink->trap_list, list) {
- if (strcmp(trap_item->group_item->group->name, group_name))
- continue;
- if (trap_item->action != trap_action &&
- trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP)
- continue;
- trap_item->action = trap_action;
- }
-
- return 0;
- }
-
- list_for_each_entry(trap_item, &devlink->trap_list, list) {
- if (strcmp(trap_item->group_item->group->name, group_name))
- continue;
- err = __devlink_trap_action_set(devlink, trap_item,
- trap_action, extack);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int
-devlink_trap_group_action_set(struct devlink *devlink,
- struct devlink_trap_group_item *group_item,
- struct genl_info *info, bool *p_modified)
-{
- enum devlink_trap_action trap_action;
- int err;
-
- if (!info->attrs[DEVLINK_ATTR_TRAP_ACTION])
- return 0;
-
- err = devlink_trap_action_get_from_info(info, &trap_action);
- if (err) {
- NL_SET_ERR_MSG(info->extack, "Invalid trap action");
- return -EINVAL;
- }
-
- err = __devlink_trap_group_action_set(devlink, group_item, trap_action,
- info->extack);
- if (err)
- return err;
-
- *p_modified = true;
-
- return 0;
-}
-
-static int devlink_trap_group_set(struct devlink *devlink,
- struct devlink_trap_group_item *group_item,
- struct genl_info *info)
-{
- struct devlink_trap_policer_item *policer_item;
- struct netlink_ext_ack *extack = info->extack;
- const struct devlink_trap_policer *policer;
- struct nlattr **attrs = info->attrs;
- u32 policer_id;
- int err;
-
- if (!attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
- return 0;
-
- if (!devlink->ops->trap_group_set)
- return -EOPNOTSUPP;
-
- policer_id = nla_get_u32(attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
- policer_item = devlink_trap_policer_item_lookup(devlink, policer_id);
- if (policer_id && !policer_item) {
- NL_SET_ERR_MSG(extack, "Device did not register this trap policer");
- return -ENOENT;
- }
- policer = policer_item ? policer_item->policer : NULL;
-
- err = devlink->ops->trap_group_set(devlink, group_item->group, policer,
- extack);
- if (err)
- return err;
-
- group_item->policer_item = policer_item;
-
- return 0;
-}
-
-static int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct netlink_ext_ack *extack = info->extack;
- struct devlink *devlink = info->user_ptr[0];
- struct devlink_trap_group_item *group_item;
- bool modified = false;
- int err;
-
- if (list_empty(&devlink->trap_group_list))
- return -EOPNOTSUPP;
-
- group_item = devlink_trap_group_item_get_from_info(devlink, info);
- if (!group_item) {
- NL_SET_ERR_MSG(extack, "Device did not register this trap group");
- return -ENOENT;
- }
-
- err = devlink_trap_group_action_set(devlink, group_item, info,
- &modified);
- if (err)
- return err;
-
- err = devlink_trap_group_set(devlink, group_item, info);
- if (err)
- goto err_trap_group_set;
-
- return 0;
-
-err_trap_group_set:
- if (modified)
- NL_SET_ERR_MSG(extack, "Trap group set failed, but some changes were committed already");
- return err;
-}
-
-static struct devlink_trap_policer_item *
-devlink_trap_policer_item_get_from_info(struct devlink *devlink,
- struct genl_info *info)
-{
- u32 id;
-
- if (!info->attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
- return NULL;
- id = nla_get_u32(info->attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
-
- return devlink_trap_policer_item_lookup(devlink, id);
-}
-
-static int
-devlink_trap_policer_stats_put(struct sk_buff *msg, struct devlink *devlink,
- const struct devlink_trap_policer *policer)
-{
- struct nlattr *attr;
- u64 drops;
- int err;
-
- if (!devlink->ops->trap_policer_counter_get)
- return 0;
-
- err = devlink->ops->trap_policer_counter_get(devlink, policer, &drops);
- if (err)
- return err;
-
- attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
- if (!attr)
- return -EMSGSIZE;
-
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
- DEVLINK_ATTR_PAD))
- goto nla_put_failure;
-
- nla_nest_end(msg, attr);
-
- return 0;
-
-nla_put_failure:
- nla_nest_cancel(msg, attr);
- return -EMSGSIZE;
-}
-
-static int
-devlink_nl_trap_policer_fill(struct sk_buff *msg, struct devlink *devlink,
- const struct devlink_trap_policer_item *policer_item,
- enum devlink_command cmd, u32 portid, u32 seq,
- int flags)
-{
- void *hdr;
- int err;
-
- hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
- if (!hdr)
- return -EMSGSIZE;
-
- if (devlink_nl_put_handle(msg, devlink))
- goto nla_put_failure;
-
- if (nla_put_u32(msg, DEVLINK_ATTR_TRAP_POLICER_ID,
- policer_item->policer->id))
- goto nla_put_failure;
-
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_RATE,
- policer_item->rate, DEVLINK_ATTR_PAD))
- goto nla_put_failure;
-
- if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_BURST,
- policer_item->burst, DEVLINK_ATTR_PAD))
- goto nla_put_failure;
-
- err = devlink_trap_policer_stats_put(msg, devlink,
- policer_item->policer);
- if (err)
- goto nla_put_failure;
-
- genlmsg_end(msg, hdr);
-
- return 0;
-
-nla_put_failure:
- genlmsg_cancel(msg, hdr);
- return -EMSGSIZE;
-}
-
-int devlink_nl_trap_policer_get_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink_trap_policer_item *policer_item;
- struct netlink_ext_ack *extack = info->extack;
- struct devlink *devlink = info->user_ptr[0];
- struct sk_buff *msg;
- int err;
-
- if (list_empty(&devlink->trap_policer_list))
- return -EOPNOTSUPP;
-
- policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
- if (!policer_item) {
- NL_SET_ERR_MSG(extack, "Device did not register this trap policer");
- return -ENOENT;
- }
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
- err = devlink_nl_trap_policer_fill(msg, devlink, policer_item,
- DEVLINK_CMD_TRAP_POLICER_NEW,
- info->snd_portid, info->snd_seq, 0);
- if (err)
- goto err_trap_policer_fill;
-
- return genlmsg_reply(msg, info);
-
-err_trap_policer_fill:
- nlmsg_free(msg);
- return err;
-}
-
-static int devlink_nl_trap_policer_get_dump_one(struct sk_buff *msg,
- struct devlink *devlink,
- struct netlink_callback *cb,
- int flags)
-{
- struct devlink_nl_dump_state *state = devlink_dump_state(cb);
- struct devlink_trap_policer_item *policer_item;
- int idx = 0;
- int err = 0;
-
- list_for_each_entry(policer_item, &devlink->trap_policer_list, list) {
- if (idx < state->idx) {
- idx++;
- continue;
- }
- err = devlink_nl_trap_policer_fill(msg, devlink, policer_item,
- DEVLINK_CMD_TRAP_POLICER_NEW,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, flags);
- if (err) {
- state->idx = idx;
- break;
- }
- idx++;
- }
-
- return err;
-}
-
-int devlink_nl_trap_policer_get_dumpit(struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- return devlink_nl_dumpit(skb, cb, devlink_nl_trap_policer_get_dump_one);
-}
-
-static int
-devlink_trap_policer_set(struct devlink *devlink,
- struct devlink_trap_policer_item *policer_item,
- struct genl_info *info)
-{
- struct netlink_ext_ack *extack = info->extack;
- struct nlattr **attrs = info->attrs;
- u64 rate, burst;
- int err;
-
- rate = policer_item->rate;
- burst = policer_item->burst;
-
- if (attrs[DEVLINK_ATTR_TRAP_POLICER_RATE])
- rate = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_RATE]);
-
- if (attrs[DEVLINK_ATTR_TRAP_POLICER_BURST])
- burst = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_BURST]);
-
- if (rate < policer_item->policer->min_rate) {
- NL_SET_ERR_MSG(extack, "Policer rate lower than limit");
- return -EINVAL;
- }
-
- if (rate > policer_item->policer->max_rate) {
- NL_SET_ERR_MSG(extack, "Policer rate higher than limit");
- return -EINVAL;
- }
-
- if (burst < policer_item->policer->min_burst) {
- NL_SET_ERR_MSG(extack, "Policer burst size lower than limit");
- return -EINVAL;
- }
-
- if (burst > policer_item->policer->max_burst) {
- NL_SET_ERR_MSG(extack, "Policer burst size higher than limit");
- return -EINVAL;
- }
-
- err = devlink->ops->trap_policer_set(devlink, policer_item->policer,
- rate, burst, info->extack);
- if (err)
- return err;
-
- policer_item->rate = rate;
- policer_item->burst = burst;
-
- return 0;
-}
-
-static int devlink_nl_cmd_trap_policer_set_doit(struct sk_buff *skb,
- struct genl_info *info)
-{
- struct devlink_trap_policer_item *policer_item;
- struct netlink_ext_ack *extack = info->extack;
- struct devlink *devlink = info->user_ptr[0];
-
- if (list_empty(&devlink->trap_policer_list))
- return -EOPNOTSUPP;
-
- if (!devlink->ops->trap_policer_set)
- return -EOPNOTSUPP;
-
- policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
- if (!policer_item) {
- NL_SET_ERR_MSG(extack, "Device did not register this trap policer");
- return -ENOENT;
- }
-
- return devlink_trap_policer_set(devlink, policer_item, info);
-}
-
-const struct genl_small_ops devlink_nl_small_ops[40] = {
- {
- .cmd = DEVLINK_CMD_PORT_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_port_set_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
- {
- .cmd = DEVLINK_CMD_RATE_SET,
- .doit = devlink_nl_cmd_rate_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_RATE_NEW,
- .doit = devlink_nl_cmd_rate_new_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_RATE_DEL,
- .doit = devlink_nl_cmd_rate_del_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_PORT_SPLIT,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_port_split_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
- {
- .cmd = DEVLINK_CMD_PORT_UNSPLIT,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_port_unsplit_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
- {
- .cmd = DEVLINK_CMD_PORT_NEW,
- .doit = devlink_nl_cmd_port_new_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_PORT_DEL,
- .doit = devlink_nl_cmd_port_del_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
-
- {
- .cmd = DEVLINK_CMD_LINECARD_SET,
- .doit = devlink_nl_cmd_linecard_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_SB_POOL_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_pool_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_port_pool_set_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
- {
- .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
- {
- .cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_occ_snapshot_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_sb_occ_max_clear_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_ESWITCH_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_eswitch_get_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_ESWITCH_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_eswitch_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_dpipe_table_get,
- /* can be retrieved by unprivileged users */
- },
- {
- .cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_dpipe_entries_get,
- /* can be retrieved by unprivileged users */
- },
- {
- .cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_dpipe_headers_get,
- /* can be retrieved by unprivileged users */
- },
- {
- .cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_dpipe_table_counters_set,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_RESOURCE_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_resource_set,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_RESOURCE_DUMP,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_resource_dump,
- /* can be retrieved by unprivileged users */
- },
- {
- .cmd = DEVLINK_CMD_RELOAD,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_reload,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_PARAM_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_param_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_PORT_PARAM_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_port_param_get_doit,
- .dumpit = devlink_nl_cmd_port_param_get_dumpit,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- /* can be retrieved by unprivileged users */
- },
- {
- .cmd = DEVLINK_CMD_PORT_PARAM_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_port_param_set_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
- },
- {
- .cmd = DEVLINK_CMD_REGION_NEW,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_region_new,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_REGION_DEL,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_region_del,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_REGION_READ,
- .validate = GENL_DONT_VALIDATE_STRICT |
- GENL_DONT_VALIDATE_DUMP_STRICT,
- .dumpit = devlink_nl_cmd_region_read_dumpit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_health_reporter_set_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
- },
- {
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_health_reporter_recover_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
- },
- {
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_health_reporter_diagnose_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
- },
- {
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
- .validate = GENL_DONT_VALIDATE_STRICT |
- GENL_DONT_VALIDATE_DUMP_STRICT,
- .dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
- },
- {
- .cmd = DEVLINK_CMD_HEALTH_REPORTER_TEST,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_health_reporter_test_doit,
- .flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
- },
- {
- .cmd = DEVLINK_CMD_FLASH_UPDATE,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = devlink_nl_cmd_flash_update,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_TRAP_SET,
- .doit = devlink_nl_cmd_trap_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_TRAP_GROUP_SET,
- .doit = devlink_nl_cmd_trap_group_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_TRAP_POLICER_SET,
- .doit = devlink_nl_cmd_trap_policer_set_doit,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = DEVLINK_CMD_SELFTESTS_RUN,
- .doit = devlink_nl_cmd_selftests_run,
- .flags = GENL_ADMIN_PERM,
- },
- /* -- No new ops here! Use split ops going forward! -- */
-};
-
-static void
-devlink_trap_policer_notify(struct devlink *devlink,
- const struct devlink_trap_policer_item *policer_item,
- enum devlink_command cmd);
-static void
-devlink_trap_group_notify(struct devlink *devlink,
- const struct devlink_trap_group_item *group_item,
- enum devlink_command cmd);
-static void devlink_trap_notify(struct devlink *devlink,
- const struct devlink_trap_item *trap_item,
- enum devlink_command cmd);
-
-void devlink_notify_register(struct devlink *devlink)
-{
- struct devlink_trap_policer_item *policer_item;
- struct devlink_trap_group_item *group_item;
- struct devlink_param_item *param_item;
- struct devlink_trap_item *trap_item;
- struct devlink_port *devlink_port;
- struct devlink_linecard *linecard;
- struct devlink_rate *rate_node;
- struct devlink_region *region;
- unsigned long port_index;
- unsigned long param_id;
-
- devlink_notify(devlink, DEVLINK_CMD_NEW);
- list_for_each_entry(linecard, &devlink->linecard_list, list)
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
-
- xa_for_each(&devlink->ports, port_index, devlink_port)
- devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
-
- list_for_each_entry(policer_item, &devlink->trap_policer_list, list)
- devlink_trap_policer_notify(devlink, policer_item,
- DEVLINK_CMD_TRAP_POLICER_NEW);
-
- list_for_each_entry(group_item, &devlink->trap_group_list, list)
- devlink_trap_group_notify(devlink, group_item,
- DEVLINK_CMD_TRAP_GROUP_NEW);
-
- list_for_each_entry(trap_item, &devlink->trap_list, list)
- devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW);
-
- list_for_each_entry(rate_node, &devlink->rate_list, list)
- devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
-
- list_for_each_entry(region, &devlink->region_list, list)
- devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
-
- xa_for_each(&devlink->params, param_id, param_item)
- devlink_param_notify(devlink, 0, param_item,
- DEVLINK_CMD_PARAM_NEW);
-}
-
-void devlink_notify_unregister(struct devlink *devlink)
-{
- struct devlink_trap_policer_item *policer_item;
- struct devlink_trap_group_item *group_item;
- struct devlink_param_item *param_item;
- struct devlink_trap_item *trap_item;
- struct devlink_port *devlink_port;
- struct devlink_linecard *linecard;
- struct devlink_rate *rate_node;
- struct devlink_region *region;
- unsigned long port_index;
- unsigned long param_id;
-
- xa_for_each(&devlink->params, param_id, param_item)
- devlink_param_notify(devlink, 0, param_item,
- DEVLINK_CMD_PARAM_DEL);
-
- list_for_each_entry_reverse(region, &devlink->region_list, list)
- devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
-
- list_for_each_entry_reverse(rate_node, &devlink->rate_list, list)
- devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
-
- list_for_each_entry_reverse(trap_item, &devlink->trap_list, list)
- devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL);
-
- list_for_each_entry_reverse(group_item, &devlink->trap_group_list, list)
- devlink_trap_group_notify(devlink, group_item,
- DEVLINK_CMD_TRAP_GROUP_DEL);
- list_for_each_entry_reverse(policer_item, &devlink->trap_policer_list,
- list)
- devlink_trap_policer_notify(devlink, policer_item,
- DEVLINK_CMD_TRAP_POLICER_DEL);
-
- xa_for_each(&devlink->ports, port_index, devlink_port)
- devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
- list_for_each_entry_reverse(linecard, &devlink->linecard_list, list)
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL);
- devlink_notify(devlink, DEVLINK_CMD_DEL);
-}
-
-static void devlink_port_type_warn(struct work_struct *work)
-{
- struct devlink_port *port = container_of(to_delayed_work(work),
- struct devlink_port,
- type_warn_dw);
- dev_warn(port->devlink->dev, "Type was not set for devlink port.");
-}
-
-static bool devlink_port_type_should_warn(struct devlink_port *devlink_port)
-{
- /* Ignore CPU and DSA flavours. */
- return devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_CPU &&
- devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA &&
- devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_UNUSED;
-}
-
-#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 3600)
-
-static void devlink_port_type_warn_schedule(struct devlink_port *devlink_port)
-{
- if (!devlink_port_type_should_warn(devlink_port))
- return;
- /* Schedule a work to WARN in case driver does not set port
- * type within timeout.
- */
- schedule_delayed_work(&devlink_port->type_warn_dw,
- DEVLINK_PORT_TYPE_WARN_TIMEOUT);
-}
-
-static void devlink_port_type_warn_cancel(struct devlink_port *devlink_port)
-{
- if (!devlink_port_type_should_warn(devlink_port))
- return;
- cancel_delayed_work_sync(&devlink_port->type_warn_dw);
-}
-
-/**
- * devlink_port_init() - Init devlink port
- *
- * @devlink: devlink
- * @devlink_port: devlink port
- *
- * Initialize essential stuff that is needed for functions
- * that may be called before devlink port registration.
- * Call to this function is optional and not needed
- * in case the driver does not use such functions.
- */
-void devlink_port_init(struct devlink *devlink,
- struct devlink_port *devlink_port)
-{
- if (devlink_port->initialized)
- return;
- devlink_port->devlink = devlink;
- INIT_LIST_HEAD(&devlink_port->region_list);
- devlink_port->initialized = true;
-}
-EXPORT_SYMBOL_GPL(devlink_port_init);
-
-/**
- * devlink_port_fini() - Deinitialize devlink port
- *
- * @devlink_port: devlink port
- *
- * Deinitialize essential stuff that is in use for functions
- * that may be called after devlink port unregistration.
- * Call to this function is optional and not needed
- * in case the driver does not use such functions.
- */
-void devlink_port_fini(struct devlink_port *devlink_port)
-{
- WARN_ON(!list_empty(&devlink_port->region_list));
-}
-EXPORT_SYMBOL_GPL(devlink_port_fini);
-
-static const struct devlink_port_ops devlink_port_dummy_ops = {};
-
-/**
- * devl_port_register_with_ops() - Register devlink port
- *
- * @devlink: devlink
- * @devlink_port: devlink port
- * @port_index: driver-specific numerical identifier of the port
- * @ops: port ops
- *
- * Register devlink port with provided port index. User can use
- * any indexing, even hw-related one. devlink_port structure
- * is convenient to be embedded inside user driver private structure.
- * Note that the caller should take care of zeroing the devlink_port
- * structure.
- */
-int devl_port_register_with_ops(struct devlink *devlink,
- struct devlink_port *devlink_port,
- unsigned int port_index,
- const struct devlink_port_ops *ops)
-{
- int err;
-
- devl_assert_locked(devlink);
-
- ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
-
- devlink_port_init(devlink, devlink_port);
- devlink_port->registered = true;
- devlink_port->index = port_index;
- devlink_port->ops = ops ? ops : &devlink_port_dummy_ops;
- spin_lock_init(&devlink_port->type_lock);
- INIT_LIST_HEAD(&devlink_port->reporter_list);
- err = xa_insert(&devlink->ports, port_index, devlink_port, GFP_KERNEL);
- if (err) {
- devlink_port->registered = false;
- return err;
- }
-
- INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
- devlink_port_type_warn_schedule(devlink_port);
- devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devl_port_register_with_ops);
-
-/**
- * devlink_port_register_with_ops - Register devlink port
- *
- * @devlink: devlink
- * @devlink_port: devlink port
- * @port_index: driver-specific numerical identifier of the port
- * @ops: port ops
- *
- * Register devlink port with provided port index. User can use
- * any indexing, even hw-related one. devlink_port structure
- * is convenient to be embedded inside user driver private structure.
- * Note that the caller should take care of zeroing the devlink_port
- * structure.
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-int devlink_port_register_with_ops(struct devlink *devlink,
- struct devlink_port *devlink_port,
- unsigned int port_index,
- const struct devlink_port_ops *ops)
-{
- int err;
-
- devl_lock(devlink);
- err = devl_port_register_with_ops(devlink, devlink_port,
- port_index, ops);
- devl_unlock(devlink);
- return err;
-}
-EXPORT_SYMBOL_GPL(devlink_port_register_with_ops);
-
-/**
- * devl_port_unregister() - Unregister devlink port
- *
- * @devlink_port: devlink port
- */
-void devl_port_unregister(struct devlink_port *devlink_port)
-{
- lockdep_assert_held(&devlink_port->devlink->lock);
- WARN_ON(devlink_port->type != DEVLINK_PORT_TYPE_NOTSET);
-
- devlink_port_type_warn_cancel(devlink_port);
- devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
- xa_erase(&devlink_port->devlink->ports, devlink_port->index);
- WARN_ON(!list_empty(&devlink_port->reporter_list));
- devlink_port->registered = false;
-}
-EXPORT_SYMBOL_GPL(devl_port_unregister);
-
-/**
- * devlink_port_unregister - Unregister devlink port
- *
- * @devlink_port: devlink port
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-void devlink_port_unregister(struct devlink_port *devlink_port)
-{
- struct devlink *devlink = devlink_port->devlink;
-
- devl_lock(devlink);
- devl_port_unregister(devlink_port);
- devl_unlock(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_port_unregister);
-
-static void devlink_port_type_netdev_checks(struct devlink_port *devlink_port,
- struct net_device *netdev)
-{
- const struct net_device_ops *ops = netdev->netdev_ops;
-
- /* If driver registers devlink port, it should set devlink port
- * attributes accordingly so the compat functions are called
- * and the original ops are not used.
- */
- if (ops->ndo_get_phys_port_name) {
- /* Some drivers use the same set of ndos for netdevs
- * that have devlink_port registered and also for
- * those who don't. Make sure that ndo_get_phys_port_name
- * returns -EOPNOTSUPP here in case it is defined.
- * Warn if not.
- */
- char name[IFNAMSIZ];
- int err;
-
- err = ops->ndo_get_phys_port_name(netdev, name, sizeof(name));
- WARN_ON(err != -EOPNOTSUPP);
- }
- if (ops->ndo_get_port_parent_id) {
- /* Some drivers use the same set of ndos for netdevs
- * that have devlink_port registered and also for
- * those who don't. Make sure that ndo_get_port_parent_id
- * returns -EOPNOTSUPP here in case it is defined.
- * Warn if not.
- */
- struct netdev_phys_item_id ppid;
- int err;
-
- err = ops->ndo_get_port_parent_id(netdev, &ppid);
- WARN_ON(err != -EOPNOTSUPP);
- }
-}
-
-static void __devlink_port_type_set(struct devlink_port *devlink_port,
- enum devlink_port_type type,
- void *type_dev)
-{
- struct net_device *netdev = type_dev;
-
- ASSERT_DEVLINK_PORT_REGISTERED(devlink_port);
-
- if (type == DEVLINK_PORT_TYPE_NOTSET) {
- devlink_port_type_warn_schedule(devlink_port);
- } else {
- devlink_port_type_warn_cancel(devlink_port);
- if (type == DEVLINK_PORT_TYPE_ETH && netdev)
- devlink_port_type_netdev_checks(devlink_port, netdev);
- }
-
- spin_lock_bh(&devlink_port->type_lock);
- devlink_port->type = type;
- switch (type) {
- case DEVLINK_PORT_TYPE_ETH:
- devlink_port->type_eth.netdev = netdev;
- if (netdev) {
- ASSERT_RTNL();
- devlink_port->type_eth.ifindex = netdev->ifindex;
- BUILD_BUG_ON(sizeof(devlink_port->type_eth.ifname) !=
- sizeof(netdev->name));
- strcpy(devlink_port->type_eth.ifname, netdev->name);
- }
- break;
- case DEVLINK_PORT_TYPE_IB:
- devlink_port->type_ib.ibdev = type_dev;
- break;
- default:
- break;
- }
- spin_unlock_bh(&devlink_port->type_lock);
- devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
-}
-
-/**
- * devlink_port_type_eth_set - Set port type to Ethernet
- *
- * @devlink_port: devlink port
- *
- * If driver is calling this, most likely it is doing something wrong.
- */
-void devlink_port_type_eth_set(struct devlink_port *devlink_port)
-{
- dev_warn(devlink_port->devlink->dev,
- "devlink port type for port %d set to Ethernet without a software interface reference, device type not supported by the kernel?\n",
- devlink_port->index);
- __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH, NULL);
-}
-EXPORT_SYMBOL_GPL(devlink_port_type_eth_set);
-
-/**
- * devlink_port_type_ib_set - Set port type to InfiniBand
- *
- * @devlink_port: devlink port
- * @ibdev: related IB device
- */
-void devlink_port_type_ib_set(struct devlink_port *devlink_port,
- struct ib_device *ibdev)
-{
- __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_IB, ibdev);
-}
-EXPORT_SYMBOL_GPL(devlink_port_type_ib_set);
-
-/**
- * devlink_port_type_clear - Clear port type
- *
- * @devlink_port: devlink port
- *
- * If driver is calling this for clearing Ethernet type, most likely
- * it is doing something wrong.
- */
-void devlink_port_type_clear(struct devlink_port *devlink_port)
-{
- if (devlink_port->type == DEVLINK_PORT_TYPE_ETH)
- dev_warn(devlink_port->devlink->dev,
- "devlink port type for port %d cleared without a software interface reference, device type not supported by the kernel?\n",
- devlink_port->index);
- __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_NOTSET, NULL);
-}
-EXPORT_SYMBOL_GPL(devlink_port_type_clear);
-
-int devlink_port_netdevice_event(struct notifier_block *nb,
- unsigned long event, void *ptr)
-{
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- struct devlink_port *devlink_port = netdev->devlink_port;
- struct devlink *devlink;
-
- if (!devlink_port)
- return NOTIFY_OK;
- devlink = devlink_port->devlink;
-
- switch (event) {
- case NETDEV_POST_INIT:
- /* Set the type but not netdev pointer. It is going to be set
- * later on by NETDEV_REGISTER event. Happens once during
- * netdevice register
- */
- __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH,
- NULL);
- break;
- case NETDEV_REGISTER:
- case NETDEV_CHANGENAME:
- if (devlink_net(devlink) != dev_net(netdev))
- return NOTIFY_OK;
- /* Set the netdev on top of previously set type. Note this
- * event happens also during net namespace change so here
- * we take into account netdev pointer appearing in this
- * namespace.
- */
- __devlink_port_type_set(devlink_port, devlink_port->type,
- netdev);
- break;
- case NETDEV_UNREGISTER:
- if (devlink_net(devlink) != dev_net(netdev))
- return NOTIFY_OK;
- /* Clear netdev pointer, but not the type. This event happens
- * also during net namespace change so we need to clear
- * pointer to netdev that is going to another net namespace.
- */
- __devlink_port_type_set(devlink_port, devlink_port->type,
- NULL);
- break;
- case NETDEV_PRE_UNINIT:
- /* Clear the type and the netdev pointer. Happens one during
- * netdevice unregister.
- */
- __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_NOTSET,
- NULL);
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static int __devlink_port_attrs_set(struct devlink_port *devlink_port,
- enum devlink_port_flavour flavour)
-{
- struct devlink_port_attrs *attrs = &devlink_port->attrs;
-
- devlink_port->attrs_set = true;
- attrs->flavour = flavour;
- if (attrs->switch_id.id_len) {
- devlink_port->switch_port = true;
- if (WARN_ON(attrs->switch_id.id_len > MAX_PHYS_ITEM_ID_LEN))
- attrs->switch_id.id_len = MAX_PHYS_ITEM_ID_LEN;
- } else {
- devlink_port->switch_port = false;
- }
- return 0;
-}
-
-/**
- * devlink_port_attrs_set - Set port attributes
- *
- * @devlink_port: devlink port
- * @attrs: devlink port attrs
- */
-void devlink_port_attrs_set(struct devlink_port *devlink_port,
- struct devlink_port_attrs *attrs)
-{
- int ret;
-
- ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
-
- devlink_port->attrs = *attrs;
- ret = __devlink_port_attrs_set(devlink_port, attrs->flavour);
- if (ret)
- return;
- WARN_ON(attrs->splittable && attrs->split);
-}
-EXPORT_SYMBOL_GPL(devlink_port_attrs_set);
-
-/**
- * devlink_port_attrs_pci_pf_set - Set PCI PF port attributes
- *
- * @devlink_port: devlink port
- * @controller: associated controller number for the devlink port instance
- * @pf: associated PF for the devlink port instance
- * @external: indicates if the port is for an external controller
- */
-void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
- u16 pf, bool external)
-{
- struct devlink_port_attrs *attrs = &devlink_port->attrs;
- int ret;
-
- ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
-
- ret = __devlink_port_attrs_set(devlink_port,
- DEVLINK_PORT_FLAVOUR_PCI_PF);
- if (ret)
- return;
- attrs->pci_pf.controller = controller;
- attrs->pci_pf.pf = pf;
- attrs->pci_pf.external = external;
-}
-EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set);
-
-/**
- * devlink_port_attrs_pci_vf_set - Set PCI VF port attributes
- *
- * @devlink_port: devlink port
- * @controller: associated controller number for the devlink port instance
- * @pf: associated PF for the devlink port instance
- * @vf: associated VF of a PF for the devlink port instance
- * @external: indicates if the port is for an external controller
- */
-void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
- u16 pf, u16 vf, bool external)
-{
- struct devlink_port_attrs *attrs = &devlink_port->attrs;
- int ret;
-
- ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
-
- ret = __devlink_port_attrs_set(devlink_port,
- DEVLINK_PORT_FLAVOUR_PCI_VF);
- if (ret)
- return;
- attrs->pci_vf.controller = controller;
- attrs->pci_vf.pf = pf;
- attrs->pci_vf.vf = vf;
- attrs->pci_vf.external = external;
-}
-EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set);
-
-/**
- * devlink_port_attrs_pci_sf_set - Set PCI SF port attributes
- *
- * @devlink_port: devlink port
- * @controller: associated controller number for the devlink port instance
- * @pf: associated PF for the devlink port instance
- * @sf: associated SF of a PF for the devlink port instance
- * @external: indicates if the port is for an external controller
- */
-void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller,
- u16 pf, u32 sf, bool external)
-{
- struct devlink_port_attrs *attrs = &devlink_port->attrs;
- int ret;
-
- ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
-
- ret = __devlink_port_attrs_set(devlink_port,
- DEVLINK_PORT_FLAVOUR_PCI_SF);
- if (ret)
- return;
- attrs->pci_sf.controller = controller;
- attrs->pci_sf.pf = pf;
- attrs->pci_sf.sf = sf;
- attrs->pci_sf.external = external;
-}
-EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set);
-
-/**
- * devl_rate_node_create - create devlink rate node
- * @devlink: devlink instance
- * @priv: driver private data
- * @node_name: name of the resulting node
- * @parent: parent devlink_rate struct
- *
- * Create devlink rate object of type node
- */
-struct devlink_rate *
-devl_rate_node_create(struct devlink *devlink, void *priv, char *node_name,
- struct devlink_rate *parent)
-{
- struct devlink_rate *rate_node;
-
- rate_node = devlink_rate_node_get_by_name(devlink, node_name);
- if (!IS_ERR(rate_node))
- return ERR_PTR(-EEXIST);
-
- rate_node = kzalloc(sizeof(*rate_node), GFP_KERNEL);
- if (!rate_node)
- return ERR_PTR(-ENOMEM);
-
- if (parent) {
- rate_node->parent = parent;
- refcount_inc(&rate_node->parent->refcnt);
- }
-
- rate_node->type = DEVLINK_RATE_TYPE_NODE;
- rate_node->devlink = devlink;
- rate_node->priv = priv;
-
- rate_node->name = kstrdup(node_name, GFP_KERNEL);
- if (!rate_node->name) {
- kfree(rate_node);
- return ERR_PTR(-ENOMEM);
- }
-
- refcount_set(&rate_node->refcnt, 1);
- list_add(&rate_node->list, &devlink->rate_list);
- devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
- return rate_node;
-}
-EXPORT_SYMBOL_GPL(devl_rate_node_create);
-
-/**
- * devl_rate_leaf_create - create devlink rate leaf
- * @devlink_port: devlink port object to create rate object on
- * @priv: driver private data
- * @parent: parent devlink_rate struct
- *
- * Create devlink rate object of type leaf on provided @devlink_port.
- */
-int devl_rate_leaf_create(struct devlink_port *devlink_port, void *priv,
- struct devlink_rate *parent)
-{
- struct devlink *devlink = devlink_port->devlink;
- struct devlink_rate *devlink_rate;
-
- devl_assert_locked(devlink_port->devlink);
-
- if (WARN_ON(devlink_port->devlink_rate))
- return -EBUSY;
-
- devlink_rate = kzalloc(sizeof(*devlink_rate), GFP_KERNEL);
- if (!devlink_rate)
- return -ENOMEM;
-
- if (parent) {
- devlink_rate->parent = parent;
- refcount_inc(&devlink_rate->parent->refcnt);
- }
-
- devlink_rate->type = DEVLINK_RATE_TYPE_LEAF;
- devlink_rate->devlink = devlink;
- devlink_rate->devlink_port = devlink_port;
- devlink_rate->priv = priv;
- list_add_tail(&devlink_rate->list, &devlink->rate_list);
- devlink_port->devlink_rate = devlink_rate;
- devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devl_rate_leaf_create);
-
-/**
- * devl_rate_leaf_destroy - destroy devlink rate leaf
- *
- * @devlink_port: devlink port linked to the rate object
- *
- * Destroy the devlink rate object of type leaf on provided @devlink_port.
- */
-void devl_rate_leaf_destroy(struct devlink_port *devlink_port)
-{
- struct devlink_rate *devlink_rate = devlink_port->devlink_rate;
-
- devl_assert_locked(devlink_port->devlink);
- if (!devlink_rate)
- return;
-
- devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_DEL);
- if (devlink_rate->parent)
- refcount_dec(&devlink_rate->parent->refcnt);
- list_del(&devlink_rate->list);
- devlink_port->devlink_rate = NULL;
- kfree(devlink_rate);
-}
-EXPORT_SYMBOL_GPL(devl_rate_leaf_destroy);
-
-/**
- * devl_rate_nodes_destroy - destroy all devlink rate nodes on device
- * @devlink: devlink instance
- *
- * Unset parent for all rate objects and destroy all rate nodes
- * on specified device.
- */
-void devl_rate_nodes_destroy(struct devlink *devlink)
-{
- static struct devlink_rate *devlink_rate, *tmp;
- const struct devlink_ops *ops = devlink->ops;
-
- devl_assert_locked(devlink);
-
- list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
- if (!devlink_rate->parent)
- continue;
-
- refcount_dec(&devlink_rate->parent->refcnt);
- if (devlink_rate_is_leaf(devlink_rate))
- ops->rate_leaf_parent_set(devlink_rate, NULL, devlink_rate->priv,
- NULL, NULL);
- else if (devlink_rate_is_node(devlink_rate))
- ops->rate_node_parent_set(devlink_rate, NULL, devlink_rate->priv,
- NULL, NULL);
- }
- list_for_each_entry_safe(devlink_rate, tmp, &devlink->rate_list, list) {
- if (devlink_rate_is_node(devlink_rate)) {
- ops->rate_node_del(devlink_rate, devlink_rate->priv, NULL);
- list_del(&devlink_rate->list);
- kfree(devlink_rate->name);
- kfree(devlink_rate);
- }
- }
-}
-EXPORT_SYMBOL_GPL(devl_rate_nodes_destroy);
-
-/**
- * devlink_port_linecard_set - Link port with a linecard
- *
- * @devlink_port: devlink port
- * @linecard: devlink linecard
- */
-void devlink_port_linecard_set(struct devlink_port *devlink_port,
- struct devlink_linecard *linecard)
-{
- ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
-
- devlink_port->linecard = linecard;
-}
-EXPORT_SYMBOL_GPL(devlink_port_linecard_set);
-
-static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
- char *name, size_t len)
-{
- struct devlink_port_attrs *attrs = &devlink_port->attrs;
- int n = 0;
-
- if (!devlink_port->attrs_set)
- return -EOPNOTSUPP;
-
- switch (attrs->flavour) {
- case DEVLINK_PORT_FLAVOUR_PHYSICAL:
- if (devlink_port->linecard)
- n = snprintf(name, len, "l%u",
- devlink_port->linecard->index);
- if (n < len)
- n += snprintf(name + n, len - n, "p%u",
- attrs->phys.port_number);
- if (n < len && attrs->split)
- n += snprintf(name + n, len - n, "s%u",
- attrs->phys.split_subport_number);
- break;
- case DEVLINK_PORT_FLAVOUR_CPU:
- case DEVLINK_PORT_FLAVOUR_DSA:
- case DEVLINK_PORT_FLAVOUR_UNUSED:
- /* As CPU and DSA ports do not have a netdevice associated
- * case should not ever happen.
- */
- WARN_ON(1);
- return -EINVAL;
- case DEVLINK_PORT_FLAVOUR_PCI_PF:
- if (attrs->pci_pf.external) {
- n = snprintf(name, len, "c%u", attrs->pci_pf.controller);
- if (n >= len)
- return -EINVAL;
- len -= n;
- name += n;
- }
- n = snprintf(name, len, "pf%u", attrs->pci_pf.pf);
- break;
- case DEVLINK_PORT_FLAVOUR_PCI_VF:
- if (attrs->pci_vf.external) {
- n = snprintf(name, len, "c%u", attrs->pci_vf.controller);
- if (n >= len)
- return -EINVAL;
- len -= n;
- name += n;
- }
- n = snprintf(name, len, "pf%uvf%u",
- attrs->pci_vf.pf, attrs->pci_vf.vf);
- break;
- case DEVLINK_PORT_FLAVOUR_PCI_SF:
- if (attrs->pci_sf.external) {
- n = snprintf(name, len, "c%u", attrs->pci_sf.controller);
- if (n >= len)
- return -EINVAL;
- len -= n;
- name += n;
- }
- n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
- attrs->pci_sf.sf);
- break;
- case DEVLINK_PORT_FLAVOUR_VIRTUAL:
- return -EOPNOTSUPP;
- }
-
- if (n >= len)
- return -EINVAL;
-
- return 0;
-}
-
-static int devlink_linecard_types_init(struct devlink_linecard *linecard)
-{
- struct devlink_linecard_type *linecard_type;
- unsigned int count;
- int i;
-
- count = linecard->ops->types_count(linecard, linecard->priv);
- linecard->types = kmalloc_array(count, sizeof(*linecard_type),
- GFP_KERNEL);
- if (!linecard->types)
- return -ENOMEM;
- linecard->types_count = count;
-
- for (i = 0; i < count; i++) {
- linecard_type = &linecard->types[i];
- linecard->ops->types_get(linecard, linecard->priv, i,
- &linecard_type->type,
- &linecard_type->priv);
- }
- return 0;
-}
-
-static void devlink_linecard_types_fini(struct devlink_linecard *linecard)
-{
- kfree(linecard->types);
-}
-
-/**
- * devl_linecard_create - Create devlink linecard
- *
- * @devlink: devlink
- * @linecard_index: driver-specific numerical identifier of the linecard
- * @ops: linecards ops
- * @priv: user priv pointer
- *
- * Create devlink linecard instance with provided linecard index.
- * Caller can use any indexing, even hw-related one.
- *
- * Return: Line card structure or an ERR_PTR() encoded error code.
- */
-struct devlink_linecard *
-devl_linecard_create(struct devlink *devlink, unsigned int linecard_index,
- const struct devlink_linecard_ops *ops, void *priv)
-{
- struct devlink_linecard *linecard;
- int err;
-
- if (WARN_ON(!ops || !ops->provision || !ops->unprovision ||
- !ops->types_count || !ops->types_get))
- return ERR_PTR(-EINVAL);
-
- if (devlink_linecard_index_exists(devlink, linecard_index))
- return ERR_PTR(-EEXIST);
-
- linecard = kzalloc(sizeof(*linecard), GFP_KERNEL);
- if (!linecard)
- return ERR_PTR(-ENOMEM);
-
- linecard->devlink = devlink;
- linecard->index = linecard_index;
- linecard->ops = ops;
- linecard->priv = priv;
- linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
- mutex_init(&linecard->state_lock);
-
- err = devlink_linecard_types_init(linecard);
- if (err) {
- mutex_destroy(&linecard->state_lock);
- kfree(linecard);
- return ERR_PTR(err);
- }
-
- list_add_tail(&linecard->list, &devlink->linecard_list);
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- return linecard;
-}
-EXPORT_SYMBOL_GPL(devl_linecard_create);
-
-/**
- * devl_linecard_destroy - Destroy devlink linecard
- *
- * @linecard: devlink linecard
- */
-void devl_linecard_destroy(struct devlink_linecard *linecard)
-{
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL);
- list_del(&linecard->list);
- devlink_linecard_types_fini(linecard);
- mutex_destroy(&linecard->state_lock);
- kfree(linecard);
-}
-EXPORT_SYMBOL_GPL(devl_linecard_destroy);
-
-/**
- * devlink_linecard_provision_set - Set provisioning on linecard
- *
- * @linecard: devlink linecard
- * @type: linecard type
- *
- * This is either called directly from the provision() op call or
- * as a result of the provision() op call asynchronously.
- */
-void devlink_linecard_provision_set(struct devlink_linecard *linecard,
- const char *type)
-{
- mutex_lock(&linecard->state_lock);
- WARN_ON(linecard->type && strcmp(linecard->type, type));
- linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED;
- linecard->type = type;
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- mutex_unlock(&linecard->state_lock);
-}
-EXPORT_SYMBOL_GPL(devlink_linecard_provision_set);
-
-/**
- * devlink_linecard_provision_clear - Clear provisioning on linecard
- *
- * @linecard: devlink linecard
- *
- * This is either called directly from the unprovision() op call or
- * as a result of the unprovision() op call asynchronously.
- */
-void devlink_linecard_provision_clear(struct devlink_linecard *linecard)
-{
- mutex_lock(&linecard->state_lock);
- WARN_ON(linecard->nested_devlink);
- linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
- linecard->type = NULL;
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- mutex_unlock(&linecard->state_lock);
-}
-EXPORT_SYMBOL_GPL(devlink_linecard_provision_clear);
-
-/**
- * devlink_linecard_provision_fail - Fail provisioning on linecard
- *
- * @linecard: devlink linecard
- *
- * This is either called directly from the provision() op call or
- * as a result of the provision() op call asynchronously.
- */
-void devlink_linecard_provision_fail(struct devlink_linecard *linecard)
-{
- mutex_lock(&linecard->state_lock);
- WARN_ON(linecard->nested_devlink);
- linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING_FAILED;
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- mutex_unlock(&linecard->state_lock);
-}
-EXPORT_SYMBOL_GPL(devlink_linecard_provision_fail);
-
-/**
- * devlink_linecard_activate - Set linecard active
- *
- * @linecard: devlink linecard
- */
-void devlink_linecard_activate(struct devlink_linecard *linecard)
-{
- mutex_lock(&linecard->state_lock);
- WARN_ON(linecard->state != DEVLINK_LINECARD_STATE_PROVISIONED);
- linecard->state = DEVLINK_LINECARD_STATE_ACTIVE;
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- mutex_unlock(&linecard->state_lock);
-}
-EXPORT_SYMBOL_GPL(devlink_linecard_activate);
-
-/**
- * devlink_linecard_deactivate - Set linecard inactive
- *
- * @linecard: devlink linecard
- */
-void devlink_linecard_deactivate(struct devlink_linecard *linecard)
-{
- mutex_lock(&linecard->state_lock);
- switch (linecard->state) {
- case DEVLINK_LINECARD_STATE_ACTIVE:
- linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED;
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- break;
- case DEVLINK_LINECARD_STATE_UNPROVISIONING:
- /* Line card is being deactivated as part
- * of unprovisioning flow.
- */
- break;
- default:
- WARN_ON(1);
- break;
- }
- mutex_unlock(&linecard->state_lock);
-}
-EXPORT_SYMBOL_GPL(devlink_linecard_deactivate);
-
-/**
- * devlink_linecard_nested_dl_set - Attach/detach nested devlink
- * instance to linecard.
- *
- * @linecard: devlink linecard
- * @nested_devlink: devlink instance to attach or NULL to detach
- */
-void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
- struct devlink *nested_devlink)
-{
- mutex_lock(&linecard->state_lock);
- linecard->nested_devlink = nested_devlink;
- devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
- mutex_unlock(&linecard->state_lock);
-}
-EXPORT_SYMBOL_GPL(devlink_linecard_nested_dl_set);
-
-int devl_sb_register(struct devlink *devlink, unsigned int sb_index,
- u32 size, u16 ingress_pools_count,
- u16 egress_pools_count, u16 ingress_tc_count,
- u16 egress_tc_count)
-{
- struct devlink_sb *devlink_sb;
-
- lockdep_assert_held(&devlink->lock);
-
- if (devlink_sb_index_exists(devlink, sb_index))
- return -EEXIST;
-
- devlink_sb = kzalloc(sizeof(*devlink_sb), GFP_KERNEL);
- if (!devlink_sb)
- return -ENOMEM;
- devlink_sb->index = sb_index;
- devlink_sb->size = size;
- devlink_sb->ingress_pools_count = ingress_pools_count;
- devlink_sb->egress_pools_count = egress_pools_count;
- devlink_sb->ingress_tc_count = ingress_tc_count;
- devlink_sb->egress_tc_count = egress_tc_count;
- list_add_tail(&devlink_sb->list, &devlink->sb_list);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devl_sb_register);
-
-int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
- u32 size, u16 ingress_pools_count,
- u16 egress_pools_count, u16 ingress_tc_count,
- u16 egress_tc_count)
-{
- int err;
-
- devl_lock(devlink);
- err = devl_sb_register(devlink, sb_index, size, ingress_pools_count,
- egress_pools_count, ingress_tc_count,
- egress_tc_count);
- devl_unlock(devlink);
- return err;
-}
-EXPORT_SYMBOL_GPL(devlink_sb_register);
-
-void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index)
-{
- struct devlink_sb *devlink_sb;
-
- lockdep_assert_held(&devlink->lock);
-
- devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
- WARN_ON(!devlink_sb);
- list_del(&devlink_sb->list);
- kfree(devlink_sb);
-}
-EXPORT_SYMBOL_GPL(devl_sb_unregister);
-
-void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
-{
- devl_lock(devlink);
- devl_sb_unregister(devlink, sb_index);
- devl_unlock(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_sb_unregister);
-
-/**
- * devl_dpipe_headers_register - register dpipe headers
- *
- * @devlink: devlink
- * @dpipe_headers: dpipe header array
- *
- * Register the headers supported by hardware.
- */
-void devl_dpipe_headers_register(struct devlink *devlink,
- struct devlink_dpipe_headers *dpipe_headers)
-{
- lockdep_assert_held(&devlink->lock);
-
- devlink->dpipe_headers = dpipe_headers;
-}
-EXPORT_SYMBOL_GPL(devl_dpipe_headers_register);
-
-/**
- * devl_dpipe_headers_unregister - unregister dpipe headers
- *
- * @devlink: devlink
- *
- * Unregister the headers supported by hardware.
- */
-void devl_dpipe_headers_unregister(struct devlink *devlink)
-{
- lockdep_assert_held(&devlink->lock);
-
- devlink->dpipe_headers = NULL;
-}
-EXPORT_SYMBOL_GPL(devl_dpipe_headers_unregister);
-
-/**
- * devlink_dpipe_table_counter_enabled - check if counter allocation
- * required
- * @devlink: devlink
- * @table_name: tables name
- *
- * Used by driver to check if counter allocation is required.
- * After counter allocation is turned on the table entries
- * are updated to include counter statistics.
- *
- * After that point on the driver must respect the counter
- * state so that each entry added to the table is added
- * with a counter.
- */
-bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
- const char *table_name)
-{
- struct devlink_dpipe_table *table;
- bool enabled;
-
- rcu_read_lock();
- table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
- table_name, devlink);
- enabled = false;
- if (table)
- enabled = table->counters_enabled;
- rcu_read_unlock();
- return enabled;
-}
-EXPORT_SYMBOL_GPL(devlink_dpipe_table_counter_enabled);
-
-/**
- * devl_dpipe_table_register - register dpipe table
- *
- * @devlink: devlink
- * @table_name: table name
- * @table_ops: table ops
- * @priv: priv
- * @counter_control_extern: external control for counters
- */
-int devl_dpipe_table_register(struct devlink *devlink,
- const char *table_name,
- struct devlink_dpipe_table_ops *table_ops,
- void *priv, bool counter_control_extern)
-{
- struct devlink_dpipe_table *table;
-
- lockdep_assert_held(&devlink->lock);
-
- if (WARN_ON(!table_ops->size_get))
- return -EINVAL;
-
- if (devlink_dpipe_table_find(&devlink->dpipe_table_list, table_name,
- devlink))
- return -EEXIST;
-
- table = kzalloc(sizeof(*table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
- table->name = table_name;
- table->table_ops = table_ops;
- table->priv = priv;
- table->counter_control_extern = counter_control_extern;
-
- list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devl_dpipe_table_register);
-
-/**
- * devl_dpipe_table_unregister - unregister dpipe table
- *
- * @devlink: devlink
- * @table_name: table name
- */
-void devl_dpipe_table_unregister(struct devlink *devlink,
- const char *table_name)
-{
- struct devlink_dpipe_table *table;
-
- lockdep_assert_held(&devlink->lock);
-
- table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
- table_name, devlink);
- if (!table)
- return;
- list_del_rcu(&table->list);
- kfree_rcu(table, rcu);
-}
-EXPORT_SYMBOL_GPL(devl_dpipe_table_unregister);
-
-/**
- * devl_resource_register - devlink resource register
- *
- * @devlink: devlink
- * @resource_name: resource's name
- * @resource_size: resource's size
- * @resource_id: resource's id
- * @parent_resource_id: resource's parent id
- * @size_params: size parameters
- *
- * Generic resources should reuse the same names across drivers.
- * Please see the generic resources list at:
- * Documentation/networking/devlink/devlink-resource.rst
- */
-int devl_resource_register(struct devlink *devlink,
- const char *resource_name,
- u64 resource_size,
- u64 resource_id,
- u64 parent_resource_id,
- const struct devlink_resource_size_params *size_params)
-{
- struct devlink_resource *resource;
- struct list_head *resource_list;
- bool top_hierarchy;
-
- lockdep_assert_held(&devlink->lock);
-
- top_hierarchy = parent_resource_id == DEVLINK_RESOURCE_ID_PARENT_TOP;
-
- resource = devlink_resource_find(devlink, NULL, resource_id);
- if (resource)
- return -EINVAL;
-
- resource = kzalloc(sizeof(*resource), GFP_KERNEL);
- if (!resource)
- return -ENOMEM;
-
- if (top_hierarchy) {
- resource_list = &devlink->resource_list;
- } else {
- struct devlink_resource *parent_resource;
-
- parent_resource = devlink_resource_find(devlink, NULL,
- parent_resource_id);
- if (parent_resource) {
- resource_list = &parent_resource->resource_list;
- resource->parent = parent_resource;
- } else {
- kfree(resource);
- return -EINVAL;
- }
- }
-
- resource->name = resource_name;
- resource->size = resource_size;
- resource->size_new = resource_size;
- resource->id = resource_id;
- resource->size_valid = true;
- memcpy(&resource->size_params, size_params,
- sizeof(resource->size_params));
- INIT_LIST_HEAD(&resource->resource_list);
- list_add_tail(&resource->list, resource_list);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devl_resource_register);
-
-/**
- * devlink_resource_register - devlink resource register
- *
- * @devlink: devlink
- * @resource_name: resource's name
- * @resource_size: resource's size
- * @resource_id: resource's id
- * @parent_resource_id: resource's parent id
- * @size_params: size parameters
- *
- * Generic resources should reuse the same names across drivers.
- * Please see the generic resources list at:
- * Documentation/networking/devlink/devlink-resource.rst
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-int devlink_resource_register(struct devlink *devlink,
- const char *resource_name,
- u64 resource_size,
- u64 resource_id,
- u64 parent_resource_id,
- const struct devlink_resource_size_params *size_params)
-{
- int err;
-
- devl_lock(devlink);
- err = devl_resource_register(devlink, resource_name, resource_size,
- resource_id, parent_resource_id, size_params);
- devl_unlock(devlink);
- return err;
-}
-EXPORT_SYMBOL_GPL(devlink_resource_register);
-
-static void devlink_resource_unregister(struct devlink *devlink,
- struct devlink_resource *resource)
-{
- struct devlink_resource *tmp, *child_resource;
-
- list_for_each_entry_safe(child_resource, tmp, &resource->resource_list,
- list) {
- devlink_resource_unregister(devlink, child_resource);
- list_del(&child_resource->list);
- kfree(child_resource);
- }
-}
-
-/**
- * devl_resources_unregister - free all resources
- *
- * @devlink: devlink
- */
-void devl_resources_unregister(struct devlink *devlink)
-{
- struct devlink_resource *tmp, *child_resource;
-
- lockdep_assert_held(&devlink->lock);
-
- list_for_each_entry_safe(child_resource, tmp, &devlink->resource_list,
- list) {
- devlink_resource_unregister(devlink, child_resource);
- list_del(&child_resource->list);
- kfree(child_resource);
- }
-}
-EXPORT_SYMBOL_GPL(devl_resources_unregister);
-
-/**
- * devlink_resources_unregister - free all resources
- *
- * @devlink: devlink
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-void devlink_resources_unregister(struct devlink *devlink)
-{
- devl_lock(devlink);
- devl_resources_unregister(devlink);
- devl_unlock(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_resources_unregister);
-
-/**
- * devl_resource_size_get - get and update size
- *
- * @devlink: devlink
- * @resource_id: the requested resource id
- * @p_resource_size: ptr to update
- */
-int devl_resource_size_get(struct devlink *devlink,
- u64 resource_id,
- u64 *p_resource_size)
-{
- struct devlink_resource *resource;
-
- lockdep_assert_held(&devlink->lock);
-
- resource = devlink_resource_find(devlink, NULL, resource_id);
- if (!resource)
- return -EINVAL;
- *p_resource_size = resource->size_new;
- resource->size = resource->size_new;
- return 0;
-}
-EXPORT_SYMBOL_GPL(devl_resource_size_get);
-
-/**
- * devl_dpipe_table_resource_set - set the resource id
- *
- * @devlink: devlink
- * @table_name: table name
- * @resource_id: resource id
- * @resource_units: number of resource's units consumed per table's entry
- */
-int devl_dpipe_table_resource_set(struct devlink *devlink,
- const char *table_name, u64 resource_id,
- u64 resource_units)
-{
- struct devlink_dpipe_table *table;
-
- table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
- table_name, devlink);
- if (!table)
- return -EINVAL;
-
- table->resource_id = resource_id;
- table->resource_units = resource_units;
- table->resource_valid = true;
- return 0;
-}
-EXPORT_SYMBOL_GPL(devl_dpipe_table_resource_set);
-
-/**
- * devl_resource_occ_get_register - register occupancy getter
- *
- * @devlink: devlink
- * @resource_id: resource id
- * @occ_get: occupancy getter callback
- * @occ_get_priv: occupancy getter callback priv
- */
-void devl_resource_occ_get_register(struct devlink *devlink,
- u64 resource_id,
- devlink_resource_occ_get_t *occ_get,
- void *occ_get_priv)
-{
- struct devlink_resource *resource;
-
- lockdep_assert_held(&devlink->lock);
-
- resource = devlink_resource_find(devlink, NULL, resource_id);
- if (WARN_ON(!resource))
- return;
- WARN_ON(resource->occ_get);
-
- resource->occ_get = occ_get;
- resource->occ_get_priv = occ_get_priv;
-}
-EXPORT_SYMBOL_GPL(devl_resource_occ_get_register);
-
-/**
- * devlink_resource_occ_get_register - register occupancy getter
- *
- * @devlink: devlink
- * @resource_id: resource id
- * @occ_get: occupancy getter callback
- * @occ_get_priv: occupancy getter callback priv
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-void devlink_resource_occ_get_register(struct devlink *devlink,
- u64 resource_id,
- devlink_resource_occ_get_t *occ_get,
- void *occ_get_priv)
-{
- devl_lock(devlink);
- devl_resource_occ_get_register(devlink, resource_id,
- occ_get, occ_get_priv);
- devl_unlock(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_resource_occ_get_register);
-
-/**
- * devl_resource_occ_get_unregister - unregister occupancy getter
- *
- * @devlink: devlink
- * @resource_id: resource id
- */
-void devl_resource_occ_get_unregister(struct devlink *devlink,
- u64 resource_id)
-{
- struct devlink_resource *resource;
-
- lockdep_assert_held(&devlink->lock);
-
- resource = devlink_resource_find(devlink, NULL, resource_id);
- if (WARN_ON(!resource))
- return;
- WARN_ON(!resource->occ_get);
-
- resource->occ_get = NULL;
- resource->occ_get_priv = NULL;
-}
-EXPORT_SYMBOL_GPL(devl_resource_occ_get_unregister);
-
-/**
- * devlink_resource_occ_get_unregister - unregister occupancy getter
- *
- * @devlink: devlink
- * @resource_id: resource id
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-void devlink_resource_occ_get_unregister(struct devlink *devlink,
- u64 resource_id)
-{
- devl_lock(devlink);
- devl_resource_occ_get_unregister(devlink, resource_id);
- devl_unlock(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
-
-static int devlink_param_verify(const struct devlink_param *param)
-{
- if (!param || !param->name || !param->supported_cmodes)
- return -EINVAL;
- if (param->generic)
- return devlink_param_generic_verify(param);
- else
- return devlink_param_driver_verify(param);
-}
-
-static int devlink_param_register(struct devlink *devlink,
- const struct devlink_param *param)
-{
- struct devlink_param_item *param_item;
- int err;
-
- WARN_ON(devlink_param_verify(param));
- WARN_ON(devlink_param_find_by_name(&devlink->params, param->name));
-
- if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT))
- WARN_ON(param->get || param->set);
- else
- WARN_ON(!param->get || !param->set);
-
- param_item = kzalloc(sizeof(*param_item), GFP_KERNEL);
- if (!param_item)
- return -ENOMEM;
-
- param_item->param = param;
-
- err = xa_insert(&devlink->params, param->id, param_item, GFP_KERNEL);
- if (err)
- goto err_xa_insert;
-
- devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
- return 0;
-
-err_xa_insert:
- kfree(param_item);
- return err;
-}
-
-static void devlink_param_unregister(struct devlink *devlink,
- const struct devlink_param *param)
-{
- struct devlink_param_item *param_item;
-
- param_item = devlink_param_find_by_id(&devlink->params, param->id);
- if (WARN_ON(!param_item))
- return;
- devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_DEL);
- xa_erase(&devlink->params, param->id);
- kfree(param_item);
-}
-
-/**
- * devl_params_register - register configuration parameters
- *
- * @devlink: devlink
- * @params: configuration parameters array
- * @params_count: number of parameters provided
- *
- * Register the configuration parameters supported by the driver.
- */
-int devl_params_register(struct devlink *devlink,
- const struct devlink_param *params,
- size_t params_count)
-{
- const struct devlink_param *param = params;
- int i, err;
-
- lockdep_assert_held(&devlink->lock);
-
- for (i = 0; i < params_count; i++, param++) {
- err = devlink_param_register(devlink, param);
- if (err)
- goto rollback;
- }
- return 0;
-
-rollback:
- if (!i)
- return err;
-
- for (param--; i > 0; i--, param--)
- devlink_param_unregister(devlink, param);
- return err;
-}
-EXPORT_SYMBOL_GPL(devl_params_register);
-
-int devlink_params_register(struct devlink *devlink,
- const struct devlink_param *params,
- size_t params_count)
-{
- int err;
-
- devl_lock(devlink);
- err = devl_params_register(devlink, params, params_count);
- devl_unlock(devlink);
- return err;
-}
-EXPORT_SYMBOL_GPL(devlink_params_register);
-
-/**
- * devl_params_unregister - unregister configuration parameters
- * @devlink: devlink
- * @params: configuration parameters to unregister
- * @params_count: number of parameters provided
- */
-void devl_params_unregister(struct devlink *devlink,
- const struct devlink_param *params,
- size_t params_count)
-{
- const struct devlink_param *param = params;
- int i;
-
- lockdep_assert_held(&devlink->lock);
-
- for (i = 0; i < params_count; i++, param++)
- devlink_param_unregister(devlink, param);
-}
-EXPORT_SYMBOL_GPL(devl_params_unregister);
-
-void devlink_params_unregister(struct devlink *devlink,
- const struct devlink_param *params,
- size_t params_count)
-{
- devl_lock(devlink);
- devl_params_unregister(devlink, params, params_count);
- devl_unlock(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_params_unregister);
-
-/**
- * devl_param_driverinit_value_get - get configuration parameter
- * value for driver initializing
- *
- * @devlink: devlink
- * @param_id: parameter ID
- * @val: pointer to store the value of parameter in driverinit
- * configuration mode
- *
- * This function should be used by the driver to get driverinit
- * configuration for initialization after reload command.
- *
- * Note that lockless call of this function relies on the
- * driver to maintain following basic sane behavior:
- * 1) Driver ensures a call to this function cannot race with
- * registering/unregistering the parameter with the same parameter ID.
- * 2) Driver ensures a call to this function cannot race with
- * devl_param_driverinit_value_set() call with the same parameter ID.
- * 3) Driver ensures a call to this function cannot race with
- * reload operation.
- * If the driver is not able to comply, it has to take the devlink->lock
- * while calling this.
- */
-int devl_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
- union devlink_param_value *val)
-{
- struct devlink_param_item *param_item;
-
- if (WARN_ON(!devlink_reload_supported(devlink->ops)))
- return -EOPNOTSUPP;
-
- param_item = devlink_param_find_by_id(&devlink->params, param_id);
- if (!param_item)
- return -EINVAL;
-
- if (!param_item->driverinit_value_valid)
- return -EOPNOTSUPP;
-
- if (WARN_ON(!devlink_param_cmode_is_supported(param_item->param,
- DEVLINK_PARAM_CMODE_DRIVERINIT)))
- return -EOPNOTSUPP;
-
- *val = param_item->driverinit_value;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devl_param_driverinit_value_get);
-
-/**
- * devl_param_driverinit_value_set - set value of configuration
- * parameter for driverinit
- * configuration mode
- *
- * @devlink: devlink
- * @param_id: parameter ID
- * @init_val: value of parameter to set for driverinit configuration mode
- *
- * This function should be used by the driver to set driverinit
- * configuration mode default value.
- */
-void devl_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
- union devlink_param_value init_val)
-{
- struct devlink_param_item *param_item;
-
- devl_assert_locked(devlink);
-
- param_item = devlink_param_find_by_id(&devlink->params, param_id);
- if (WARN_ON(!param_item))
- return;
-
- if (WARN_ON(!devlink_param_cmode_is_supported(param_item->param,
- DEVLINK_PARAM_CMODE_DRIVERINIT)))
- return;
-
- param_item->driverinit_value = init_val;
- param_item->driverinit_value_valid = true;
-
- devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
-}
-EXPORT_SYMBOL_GPL(devl_param_driverinit_value_set);
-
-void devlink_params_driverinit_load_new(struct devlink *devlink)
-{
- struct devlink_param_item *param_item;
- unsigned long param_id;
-
- xa_for_each(&devlink->params, param_id, param_item) {
- if (!devlink_param_cmode_is_supported(param_item->param,
- DEVLINK_PARAM_CMODE_DRIVERINIT) ||
- !param_item->driverinit_value_new_valid)
- continue;
- param_item->driverinit_value = param_item->driverinit_value_new;
- param_item->driverinit_value_valid = true;
- param_item->driverinit_value_new_valid = false;
- }
-}
-
-/**
- * devl_param_value_changed - notify devlink on a parameter's value
- * change. Should be called by the driver
- * right after the change.
- *
- * @devlink: devlink
- * @param_id: parameter ID
- *
- * This function should be used by the driver to notify devlink on value
- * change, excluding driverinit configuration mode.
- * For driverinit configuration mode driver should use the function
- */
-void devl_param_value_changed(struct devlink *devlink, u32 param_id)
-{
- struct devlink_param_item *param_item;
-
- param_item = devlink_param_find_by_id(&devlink->params, param_id);
- WARN_ON(!param_item);
-
- devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
-}
-EXPORT_SYMBOL_GPL(devl_param_value_changed);
-
-/**
- * devl_region_create - create a new address region
- *
- * @devlink: devlink
- * @ops: region operations and name
- * @region_max_snapshots: Maximum supported number of snapshots for region
- * @region_size: size of region
- */
-struct devlink_region *devl_region_create(struct devlink *devlink,
- const struct devlink_region_ops *ops,
- u32 region_max_snapshots,
- u64 region_size)
-{
- struct devlink_region *region;
-
- devl_assert_locked(devlink);
-
- if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
- return ERR_PTR(-EINVAL);
-
- if (devlink_region_get_by_name(devlink, ops->name))
- return ERR_PTR(-EEXIST);
-
- region = kzalloc(sizeof(*region), GFP_KERNEL);
- if (!region)
- return ERR_PTR(-ENOMEM);
-
- region->devlink = devlink;
- region->max_snapshots = region_max_snapshots;
- region->ops = ops;
- region->size = region_size;
- INIT_LIST_HEAD(&region->snapshot_list);
- mutex_init(&region->snapshot_lock);
- list_add_tail(&region->list, &devlink->region_list);
- devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
-
- return region;
-}
-EXPORT_SYMBOL_GPL(devl_region_create);
-
-/**
- * devlink_region_create - create a new address region
- *
- * @devlink: devlink
- * @ops: region operations and name
- * @region_max_snapshots: Maximum supported number of snapshots for region
- * @region_size: size of region
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-struct devlink_region *
-devlink_region_create(struct devlink *devlink,
- const struct devlink_region_ops *ops,
- u32 region_max_snapshots, u64 region_size)
-{
- struct devlink_region *region;
-
- devl_lock(devlink);
- region = devl_region_create(devlink, ops, region_max_snapshots,
- region_size);
- devl_unlock(devlink);
- return region;
-}
-EXPORT_SYMBOL_GPL(devlink_region_create);
-
-/**
- * devlink_port_region_create - create a new address region for a port
- *
- * @port: devlink port
- * @ops: region operations and name
- * @region_max_snapshots: Maximum supported number of snapshots for region
- * @region_size: size of region
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-struct devlink_region *
-devlink_port_region_create(struct devlink_port *port,
- const struct devlink_port_region_ops *ops,
- u32 region_max_snapshots, u64 region_size)
-{
- struct devlink *devlink = port->devlink;
- struct devlink_region *region;
- int err = 0;
-
- ASSERT_DEVLINK_PORT_INITIALIZED(port);
-
- if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
- return ERR_PTR(-EINVAL);
-
- devl_lock(devlink);
-
- if (devlink_port_region_get_by_name(port, ops->name)) {
- err = -EEXIST;
- goto unlock;
- }
-
- region = kzalloc(sizeof(*region), GFP_KERNEL);
- if (!region) {
- err = -ENOMEM;
- goto unlock;
- }
-
- region->devlink = devlink;
- region->port = port;
- region->max_snapshots = region_max_snapshots;
- region->port_ops = ops;
- region->size = region_size;
- INIT_LIST_HEAD(&region->snapshot_list);
- mutex_init(&region->snapshot_lock);
- list_add_tail(&region->list, &port->region_list);
- devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
-
- devl_unlock(devlink);
- return region;
-
-unlock:
- devl_unlock(devlink);
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(devlink_port_region_create);
-
-/**
- * devl_region_destroy - destroy address region
- *
- * @region: devlink region to destroy
- */
-void devl_region_destroy(struct devlink_region *region)
-{
- struct devlink *devlink = region->devlink;
- struct devlink_snapshot *snapshot, *ts;
-
- devl_assert_locked(devlink);
-
- /* Free all snapshots of region */
- mutex_lock(&region->snapshot_lock);
- list_for_each_entry_safe(snapshot, ts, &region->snapshot_list, list)
- devlink_region_snapshot_del(region, snapshot);
- mutex_unlock(&region->snapshot_lock);
-
- list_del(&region->list);
- mutex_destroy(&region->snapshot_lock);
-
- devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
- kfree(region);
-}
-EXPORT_SYMBOL_GPL(devl_region_destroy);
-
-/**
- * devlink_region_destroy - destroy address region
- *
- * @region: devlink region to destroy
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-void devlink_region_destroy(struct devlink_region *region)
-{
- struct devlink *devlink = region->devlink;
-
- devl_lock(devlink);
- devl_region_destroy(region);
- devl_unlock(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_region_destroy);
-
-/**
- * devlink_region_snapshot_id_get - get snapshot ID
- *
- * This callback should be called when adding a new snapshot,
- * Driver should use the same id for multiple snapshots taken
- * on multiple regions at the same time/by the same trigger.
- *
- * The caller of this function must use devlink_region_snapshot_id_put
- * when finished creating regions using this id.
- *
- * Returns zero on success, or a negative error code on failure.
- *
- * @devlink: devlink
- * @id: storage to return id
- */
-int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
-{
- return __devlink_region_snapshot_id_get(devlink, id);
-}
-EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
-
-/**
- * devlink_region_snapshot_id_put - put snapshot ID reference
- *
- * This should be called by a driver after finishing creating snapshots
- * with an id. Doing so ensures that the ID can later be released in the
- * event that all snapshots using it have been destroyed.
- *
- * @devlink: devlink
- * @id: id to release reference on
- */
-void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id)
-{
- __devlink_snapshot_id_decrement(devlink, id);
-}
-EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put);
-
-/**
- * devlink_region_snapshot_create - create a new snapshot
- * This will add a new snapshot of a region. The snapshot
- * will be stored on the region struct and can be accessed
- * from devlink. This is useful for future analyses of snapshots.
- * Multiple snapshots can be created on a region.
- * The @snapshot_id should be obtained using the getter function.
- *
- * @region: devlink region of the snapshot
- * @data: snapshot data
- * @snapshot_id: snapshot id to be created
- */
-int devlink_region_snapshot_create(struct devlink_region *region,
- u8 *data, u32 snapshot_id)
-{
- int err;
-
- mutex_lock(&region->snapshot_lock);
- err = __devlink_region_snapshot_create(region, data, snapshot_id);
- mutex_unlock(&region->snapshot_lock);
- return err;
-}
-EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
-
-#define DEVLINK_TRAP(_id, _type) \
- { \
- .type = DEVLINK_TRAP_TYPE_##_type, \
- .id = DEVLINK_TRAP_GENERIC_ID_##_id, \
- .name = DEVLINK_TRAP_GENERIC_NAME_##_id, \
- }
-
-static const struct devlink_trap devlink_trap_generic[] = {
- DEVLINK_TRAP(SMAC_MC, DROP),
- DEVLINK_TRAP(VLAN_TAG_MISMATCH, DROP),
- DEVLINK_TRAP(INGRESS_VLAN_FILTER, DROP),
- DEVLINK_TRAP(INGRESS_STP_FILTER, DROP),
- DEVLINK_TRAP(EMPTY_TX_LIST, DROP),
- DEVLINK_TRAP(PORT_LOOPBACK_FILTER, DROP),
- DEVLINK_TRAP(BLACKHOLE_ROUTE, DROP),
- DEVLINK_TRAP(TTL_ERROR, EXCEPTION),
- DEVLINK_TRAP(TAIL_DROP, DROP),
- DEVLINK_TRAP(NON_IP_PACKET, DROP),
- DEVLINK_TRAP(UC_DIP_MC_DMAC, DROP),
- DEVLINK_TRAP(DIP_LB, DROP),
- DEVLINK_TRAP(SIP_MC, DROP),
- DEVLINK_TRAP(SIP_LB, DROP),
- DEVLINK_TRAP(CORRUPTED_IP_HDR, DROP),
- DEVLINK_TRAP(IPV4_SIP_BC, DROP),
- DEVLINK_TRAP(IPV6_MC_DIP_RESERVED_SCOPE, DROP),
- DEVLINK_TRAP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, DROP),
- DEVLINK_TRAP(MTU_ERROR, EXCEPTION),
- DEVLINK_TRAP(UNRESOLVED_NEIGH, EXCEPTION),
- DEVLINK_TRAP(RPF, EXCEPTION),
- DEVLINK_TRAP(REJECT_ROUTE, EXCEPTION),
- DEVLINK_TRAP(IPV4_LPM_UNICAST_MISS, EXCEPTION),
- DEVLINK_TRAP(IPV6_LPM_UNICAST_MISS, EXCEPTION),
- DEVLINK_TRAP(NON_ROUTABLE, DROP),
- DEVLINK_TRAP(DECAP_ERROR, EXCEPTION),
- DEVLINK_TRAP(OVERLAY_SMAC_MC, DROP),
- DEVLINK_TRAP(INGRESS_FLOW_ACTION_DROP, DROP),
- DEVLINK_TRAP(EGRESS_FLOW_ACTION_DROP, DROP),
- DEVLINK_TRAP(STP, CONTROL),
- DEVLINK_TRAP(LACP, CONTROL),
- DEVLINK_TRAP(LLDP, CONTROL),
- DEVLINK_TRAP(IGMP_QUERY, CONTROL),
- DEVLINK_TRAP(IGMP_V1_REPORT, CONTROL),
- DEVLINK_TRAP(IGMP_V2_REPORT, CONTROL),
- DEVLINK_TRAP(IGMP_V3_REPORT, CONTROL),
- DEVLINK_TRAP(IGMP_V2_LEAVE, CONTROL),
- DEVLINK_TRAP(MLD_QUERY, CONTROL),
- DEVLINK_TRAP(MLD_V1_REPORT, CONTROL),
- DEVLINK_TRAP(MLD_V2_REPORT, CONTROL),
- DEVLINK_TRAP(MLD_V1_DONE, CONTROL),
- DEVLINK_TRAP(IPV4_DHCP, CONTROL),
- DEVLINK_TRAP(IPV6_DHCP, CONTROL),
- DEVLINK_TRAP(ARP_REQUEST, CONTROL),
- DEVLINK_TRAP(ARP_RESPONSE, CONTROL),
- DEVLINK_TRAP(ARP_OVERLAY, CONTROL),
- DEVLINK_TRAP(IPV6_NEIGH_SOLICIT, CONTROL),
- DEVLINK_TRAP(IPV6_NEIGH_ADVERT, CONTROL),
- DEVLINK_TRAP(IPV4_BFD, CONTROL),
- DEVLINK_TRAP(IPV6_BFD, CONTROL),
- DEVLINK_TRAP(IPV4_OSPF, CONTROL),
- DEVLINK_TRAP(IPV6_OSPF, CONTROL),
- DEVLINK_TRAP(IPV4_BGP, CONTROL),
- DEVLINK_TRAP(IPV6_BGP, CONTROL),
- DEVLINK_TRAP(IPV4_VRRP, CONTROL),
- DEVLINK_TRAP(IPV6_VRRP, CONTROL),
- DEVLINK_TRAP(IPV4_PIM, CONTROL),
- DEVLINK_TRAP(IPV6_PIM, CONTROL),
- DEVLINK_TRAP(UC_LB, CONTROL),
- DEVLINK_TRAP(LOCAL_ROUTE, CONTROL),
- DEVLINK_TRAP(EXTERNAL_ROUTE, CONTROL),
- DEVLINK_TRAP(IPV6_UC_DIP_LINK_LOCAL_SCOPE, CONTROL),
- DEVLINK_TRAP(IPV6_DIP_ALL_NODES, CONTROL),
- DEVLINK_TRAP(IPV6_DIP_ALL_ROUTERS, CONTROL),
- DEVLINK_TRAP(IPV6_ROUTER_SOLICIT, CONTROL),
- DEVLINK_TRAP(IPV6_ROUTER_ADVERT, CONTROL),
- DEVLINK_TRAP(IPV6_REDIRECT, CONTROL),
- DEVLINK_TRAP(IPV4_ROUTER_ALERT, CONTROL),
- DEVLINK_TRAP(IPV6_ROUTER_ALERT, CONTROL),
- DEVLINK_TRAP(PTP_EVENT, CONTROL),
- DEVLINK_TRAP(PTP_GENERAL, CONTROL),
- DEVLINK_TRAP(FLOW_ACTION_SAMPLE, CONTROL),
- DEVLINK_TRAP(FLOW_ACTION_TRAP, CONTROL),
- DEVLINK_TRAP(EARLY_DROP, DROP),
- DEVLINK_TRAP(VXLAN_PARSING, DROP),
- DEVLINK_TRAP(LLC_SNAP_PARSING, DROP),
- DEVLINK_TRAP(VLAN_PARSING, DROP),
- DEVLINK_TRAP(PPPOE_PPP_PARSING, DROP),
- DEVLINK_TRAP(MPLS_PARSING, DROP),
- DEVLINK_TRAP(ARP_PARSING, DROP),
- DEVLINK_TRAP(IP_1_PARSING, DROP),
- DEVLINK_TRAP(IP_N_PARSING, DROP),
- DEVLINK_TRAP(GRE_PARSING, DROP),
- DEVLINK_TRAP(UDP_PARSING, DROP),
- DEVLINK_TRAP(TCP_PARSING, DROP),
- DEVLINK_TRAP(IPSEC_PARSING, DROP),
- DEVLINK_TRAP(SCTP_PARSING, DROP),
- DEVLINK_TRAP(DCCP_PARSING, DROP),
- DEVLINK_TRAP(GTP_PARSING, DROP),
- DEVLINK_TRAP(ESP_PARSING, DROP),
- DEVLINK_TRAP(BLACKHOLE_NEXTHOP, DROP),
- DEVLINK_TRAP(DMAC_FILTER, DROP),
- DEVLINK_TRAP(EAPOL, CONTROL),
- DEVLINK_TRAP(LOCKED_PORT, DROP),
-};
-
-#define DEVLINK_TRAP_GROUP(_id) \
- { \
- .id = DEVLINK_TRAP_GROUP_GENERIC_ID_##_id, \
- .name = DEVLINK_TRAP_GROUP_GENERIC_NAME_##_id, \
- }
-
-static const struct devlink_trap_group devlink_trap_group_generic[] = {
- DEVLINK_TRAP_GROUP(L2_DROPS),
- DEVLINK_TRAP_GROUP(L3_DROPS),
- DEVLINK_TRAP_GROUP(L3_EXCEPTIONS),
- DEVLINK_TRAP_GROUP(BUFFER_DROPS),
- DEVLINK_TRAP_GROUP(TUNNEL_DROPS),
- DEVLINK_TRAP_GROUP(ACL_DROPS),
- DEVLINK_TRAP_GROUP(STP),
- DEVLINK_TRAP_GROUP(LACP),
- DEVLINK_TRAP_GROUP(LLDP),
- DEVLINK_TRAP_GROUP(MC_SNOOPING),
- DEVLINK_TRAP_GROUP(DHCP),
- DEVLINK_TRAP_GROUP(NEIGH_DISCOVERY),
- DEVLINK_TRAP_GROUP(BFD),
- DEVLINK_TRAP_GROUP(OSPF),
- DEVLINK_TRAP_GROUP(BGP),
- DEVLINK_TRAP_GROUP(VRRP),
- DEVLINK_TRAP_GROUP(PIM),
- DEVLINK_TRAP_GROUP(UC_LB),
- DEVLINK_TRAP_GROUP(LOCAL_DELIVERY),
- DEVLINK_TRAP_GROUP(EXTERNAL_DELIVERY),
- DEVLINK_TRAP_GROUP(IPV6),
- DEVLINK_TRAP_GROUP(PTP_EVENT),
- DEVLINK_TRAP_GROUP(PTP_GENERAL),
- DEVLINK_TRAP_GROUP(ACL_SAMPLE),
- DEVLINK_TRAP_GROUP(ACL_TRAP),
- DEVLINK_TRAP_GROUP(PARSER_ERROR_DROPS),
- DEVLINK_TRAP_GROUP(EAPOL),
-};
-
-static int devlink_trap_generic_verify(const struct devlink_trap *trap)
-{
- if (trap->id > DEVLINK_TRAP_GENERIC_ID_MAX)
- return -EINVAL;
-
- if (strcmp(trap->name, devlink_trap_generic[trap->id].name))
- return -EINVAL;
-
- if (trap->type != devlink_trap_generic[trap->id].type)
- return -EINVAL;
-
- return 0;
-}
-
-static int devlink_trap_driver_verify(const struct devlink_trap *trap)
-{
- int i;
-
- if (trap->id <= DEVLINK_TRAP_GENERIC_ID_MAX)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(devlink_trap_generic); i++) {
- if (!strcmp(trap->name, devlink_trap_generic[i].name))
- return -EEXIST;
- }
-
- return 0;
-}
-
-static int devlink_trap_verify(const struct devlink_trap *trap)
-{
- if (!trap || !trap->name)
- return -EINVAL;
-
- if (trap->generic)
- return devlink_trap_generic_verify(trap);
- else
- return devlink_trap_driver_verify(trap);
-}
-
-static int
-devlink_trap_group_generic_verify(const struct devlink_trap_group *group)
-{
- if (group->id > DEVLINK_TRAP_GROUP_GENERIC_ID_MAX)
- return -EINVAL;
-
- if (strcmp(group->name, devlink_trap_group_generic[group->id].name))
- return -EINVAL;
-
- return 0;
-}
-
-static int
-devlink_trap_group_driver_verify(const struct devlink_trap_group *group)
-{
- int i;
-
- if (group->id <= DEVLINK_TRAP_GROUP_GENERIC_ID_MAX)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(devlink_trap_group_generic); i++) {
- if (!strcmp(group->name, devlink_trap_group_generic[i].name))
- return -EEXIST;
- }
-
- return 0;
-}
-
-static int devlink_trap_group_verify(const struct devlink_trap_group *group)
-{
- if (group->generic)
- return devlink_trap_group_generic_verify(group);
- else
- return devlink_trap_group_driver_verify(group);
-}
-
-static void
-devlink_trap_group_notify(struct devlink *devlink,
- const struct devlink_trap_group_item *group_item,
- enum devlink_command cmd)
-{
- struct sk_buff *msg;
- int err;
-
- WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_GROUP_NEW &&
- cmd != DEVLINK_CMD_TRAP_GROUP_DEL);
- if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
- return;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return;
-
- err = devlink_nl_trap_group_fill(msg, devlink, group_item, cmd, 0, 0,
- 0);
- if (err) {
- nlmsg_free(msg);
- return;
- }
-
- genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
- msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
-}
-
-static int
-devlink_trap_item_group_link(struct devlink *devlink,
- struct devlink_trap_item *trap_item)
-{
- u16 group_id = trap_item->trap->init_group_id;
- struct devlink_trap_group_item *group_item;
-
- group_item = devlink_trap_group_item_lookup_by_id(devlink, group_id);
- if (WARN_ON_ONCE(!group_item))
- return -EINVAL;
-
- trap_item->group_item = group_item;
-
- return 0;
-}
-
-static void devlink_trap_notify(struct devlink *devlink,
- const struct devlink_trap_item *trap_item,
- enum devlink_command cmd)
-{
- struct sk_buff *msg;
- int err;
-
- WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_NEW &&
- cmd != DEVLINK_CMD_TRAP_DEL);
- if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
- return;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return;
-
- err = devlink_nl_trap_fill(msg, devlink, trap_item, cmd, 0, 0, 0);
- if (err) {
- nlmsg_free(msg);
- return;
- }
-
- genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
- msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
-}
-
-static int
-devlink_trap_register(struct devlink *devlink,
- const struct devlink_trap *trap, void *priv)
-{
- struct devlink_trap_item *trap_item;
- int err;
-
- if (devlink_trap_item_lookup(devlink, trap->name))
- return -EEXIST;
-
- trap_item = kzalloc(sizeof(*trap_item), GFP_KERNEL);
- if (!trap_item)
- return -ENOMEM;
-
- trap_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats);
- if (!trap_item->stats) {
- err = -ENOMEM;
- goto err_stats_alloc;
- }
-
- trap_item->trap = trap;
- trap_item->action = trap->init_action;
- trap_item->priv = priv;
-
- err = devlink_trap_item_group_link(devlink, trap_item);
- if (err)
- goto err_group_link;
-
- err = devlink->ops->trap_init(devlink, trap, trap_item);
- if (err)
- goto err_trap_init;
-
- list_add_tail(&trap_item->list, &devlink->trap_list);
- devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW);
-
- return 0;
-
-err_trap_init:
-err_group_link:
- free_percpu(trap_item->stats);
-err_stats_alloc:
- kfree(trap_item);
- return err;
-}
-
-static void devlink_trap_unregister(struct devlink *devlink,
- const struct devlink_trap *trap)
-{
- struct devlink_trap_item *trap_item;
-
- trap_item = devlink_trap_item_lookup(devlink, trap->name);
- if (WARN_ON_ONCE(!trap_item))
- return;
-
- devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL);
- list_del(&trap_item->list);
- if (devlink->ops->trap_fini)
- devlink->ops->trap_fini(devlink, trap, trap_item);
- free_percpu(trap_item->stats);
- kfree(trap_item);
-}
-
-static void devlink_trap_disable(struct devlink *devlink,
- const struct devlink_trap *trap)
-{
- struct devlink_trap_item *trap_item;
-
- trap_item = devlink_trap_item_lookup(devlink, trap->name);
- if (WARN_ON_ONCE(!trap_item))
- return;
-
- devlink->ops->trap_action_set(devlink, trap, DEVLINK_TRAP_ACTION_DROP,
- NULL);
- trap_item->action = DEVLINK_TRAP_ACTION_DROP;
-}
-
-/**
- * devl_traps_register - Register packet traps with devlink.
- * @devlink: devlink.
- * @traps: Packet traps.
- * @traps_count: Count of provided packet traps.
- * @priv: Driver private information.
- *
- * Return: Non-zero value on failure.
- */
-int devl_traps_register(struct devlink *devlink,
- const struct devlink_trap *traps,
- size_t traps_count, void *priv)
-{
- int i, err;
-
- if (!devlink->ops->trap_init || !devlink->ops->trap_action_set)
- return -EINVAL;
-
- devl_assert_locked(devlink);
- for (i = 0; i < traps_count; i++) {
- const struct devlink_trap *trap = &traps[i];
-
- err = devlink_trap_verify(trap);
- if (err)
- goto err_trap_verify;
-
- err = devlink_trap_register(devlink, trap, priv);
- if (err)
- goto err_trap_register;
- }
-
- return 0;
-
-err_trap_register:
-err_trap_verify:
- for (i--; i >= 0; i--)
- devlink_trap_unregister(devlink, &traps[i]);
- return err;
-}
-EXPORT_SYMBOL_GPL(devl_traps_register);
-
-/**
- * devlink_traps_register - Register packet traps with devlink.
- * @devlink: devlink.
- * @traps: Packet traps.
- * @traps_count: Count of provided packet traps.
- * @priv: Driver private information.
- *
- * Context: Takes and release devlink->lock <mutex>.
- *
- * Return: Non-zero value on failure.
- */
-int devlink_traps_register(struct devlink *devlink,
- const struct devlink_trap *traps,
- size_t traps_count, void *priv)
-{
- int err;
-
- devl_lock(devlink);
- err = devl_traps_register(devlink, traps, traps_count, priv);
- devl_unlock(devlink);
- return err;
-}
-EXPORT_SYMBOL_GPL(devlink_traps_register);
-
-/**
- * devl_traps_unregister - Unregister packet traps from devlink.
- * @devlink: devlink.
- * @traps: Packet traps.
- * @traps_count: Count of provided packet traps.
- */
-void devl_traps_unregister(struct devlink *devlink,
- const struct devlink_trap *traps,
- size_t traps_count)
-{
- int i;
-
- devl_assert_locked(devlink);
- /* Make sure we do not have any packets in-flight while unregistering
- * traps by disabling all of them and waiting for a grace period.
- */
- for (i = traps_count - 1; i >= 0; i--)
- devlink_trap_disable(devlink, &traps[i]);
- synchronize_rcu();
- for (i = traps_count - 1; i >= 0; i--)
- devlink_trap_unregister(devlink, &traps[i]);
-}
-EXPORT_SYMBOL_GPL(devl_traps_unregister);
-
-/**
- * devlink_traps_unregister - Unregister packet traps from devlink.
- * @devlink: devlink.
- * @traps: Packet traps.
- * @traps_count: Count of provided packet traps.
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-void devlink_traps_unregister(struct devlink *devlink,
- const struct devlink_trap *traps,
- size_t traps_count)
-{
- devl_lock(devlink);
- devl_traps_unregister(devlink, traps, traps_count);
- devl_unlock(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_traps_unregister);
-
-static void
-devlink_trap_stats_update(struct devlink_stats __percpu *trap_stats,
- size_t skb_len)
-{
- struct devlink_stats *stats;
-
- stats = this_cpu_ptr(trap_stats);
- u64_stats_update_begin(&stats->syncp);
- u64_stats_add(&stats->rx_bytes, skb_len);
- u64_stats_inc(&stats->rx_packets);
- u64_stats_update_end(&stats->syncp);
-}
-
-static void
-devlink_trap_report_metadata_set(struct devlink_trap_metadata *metadata,
- const struct devlink_trap_item *trap_item,
- struct devlink_port *in_devlink_port,
- const struct flow_action_cookie *fa_cookie)
-{
- metadata->trap_name = trap_item->trap->name;
- metadata->trap_group_name = trap_item->group_item->group->name;
- metadata->fa_cookie = fa_cookie;
- metadata->trap_type = trap_item->trap->type;
-
- spin_lock(&in_devlink_port->type_lock);
- if (in_devlink_port->type == DEVLINK_PORT_TYPE_ETH)
- metadata->input_dev = in_devlink_port->type_eth.netdev;
- spin_unlock(&in_devlink_port->type_lock);
-}
-
-/**
- * devlink_trap_report - Report trapped packet to drop monitor.
- * @devlink: devlink.
- * @skb: Trapped packet.
- * @trap_ctx: Trap context.
- * @in_devlink_port: Input devlink port.
- * @fa_cookie: Flow action cookie. Could be NULL.
- */
-void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
- void *trap_ctx, struct devlink_port *in_devlink_port,
- const struct flow_action_cookie *fa_cookie)
-
-{
- struct devlink_trap_item *trap_item = trap_ctx;
-
- devlink_trap_stats_update(trap_item->stats, skb->len);
- devlink_trap_stats_update(trap_item->group_item->stats, skb->len);
-
- if (trace_devlink_trap_report_enabled()) {
- struct devlink_trap_metadata metadata = {};
-
- devlink_trap_report_metadata_set(&metadata, trap_item,
- in_devlink_port, fa_cookie);
- trace_devlink_trap_report(devlink, skb, &metadata);
- }
-}
-EXPORT_SYMBOL_GPL(devlink_trap_report);
-
-/**
- * devlink_trap_ctx_priv - Trap context to driver private information.
- * @trap_ctx: Trap context.
- *
- * Return: Driver private information passed during registration.
- */
-void *devlink_trap_ctx_priv(void *trap_ctx)
-{
- struct devlink_trap_item *trap_item = trap_ctx;
-
- return trap_item->priv;
-}
-EXPORT_SYMBOL_GPL(devlink_trap_ctx_priv);
-
-static int
-devlink_trap_group_item_policer_link(struct devlink *devlink,
- struct devlink_trap_group_item *group_item)
-{
- u32 policer_id = group_item->group->init_policer_id;
- struct devlink_trap_policer_item *policer_item;
-
- if (policer_id == 0)
- return 0;
-
- policer_item = devlink_trap_policer_item_lookup(devlink, policer_id);
- if (WARN_ON_ONCE(!policer_item))
- return -EINVAL;
-
- group_item->policer_item = policer_item;
-
- return 0;
-}
-
-static int
-devlink_trap_group_register(struct devlink *devlink,
- const struct devlink_trap_group *group)
-{
- struct devlink_trap_group_item *group_item;
- int err;
-
- if (devlink_trap_group_item_lookup(devlink, group->name))
- return -EEXIST;
-
- group_item = kzalloc(sizeof(*group_item), GFP_KERNEL);
- if (!group_item)
- return -ENOMEM;
-
- group_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats);
- if (!group_item->stats) {
- err = -ENOMEM;
- goto err_stats_alloc;
- }
-
- group_item->group = group;
-
- err = devlink_trap_group_item_policer_link(devlink, group_item);
- if (err)
- goto err_policer_link;
-
- if (devlink->ops->trap_group_init) {
- err = devlink->ops->trap_group_init(devlink, group);
- if (err)
- goto err_group_init;
- }
-
- list_add_tail(&group_item->list, &devlink->trap_group_list);
- devlink_trap_group_notify(devlink, group_item,
- DEVLINK_CMD_TRAP_GROUP_NEW);
-
- return 0;
-
-err_group_init:
-err_policer_link:
- free_percpu(group_item->stats);
-err_stats_alloc:
- kfree(group_item);
- return err;
-}
-
-static void
-devlink_trap_group_unregister(struct devlink *devlink,
- const struct devlink_trap_group *group)
-{
- struct devlink_trap_group_item *group_item;
-
- group_item = devlink_trap_group_item_lookup(devlink, group->name);
- if (WARN_ON_ONCE(!group_item))
- return;
-
- devlink_trap_group_notify(devlink, group_item,
- DEVLINK_CMD_TRAP_GROUP_DEL);
- list_del(&group_item->list);
- free_percpu(group_item->stats);
- kfree(group_item);
-}
-
-/**
- * devl_trap_groups_register - Register packet trap groups with devlink.
- * @devlink: devlink.
- * @groups: Packet trap groups.
- * @groups_count: Count of provided packet trap groups.
- *
- * Return: Non-zero value on failure.
- */
-int devl_trap_groups_register(struct devlink *devlink,
- const struct devlink_trap_group *groups,
- size_t groups_count)
-{
- int i, err;
-
- devl_assert_locked(devlink);
- for (i = 0; i < groups_count; i++) {
- const struct devlink_trap_group *group = &groups[i];
-
- err = devlink_trap_group_verify(group);
- if (err)
- goto err_trap_group_verify;
-
- err = devlink_trap_group_register(devlink, group);
- if (err)
- goto err_trap_group_register;
- }
-
- return 0;
-
-err_trap_group_register:
-err_trap_group_verify:
- for (i--; i >= 0; i--)
- devlink_trap_group_unregister(devlink, &groups[i]);
- return err;
-}
-EXPORT_SYMBOL_GPL(devl_trap_groups_register);
-
-/**
- * devlink_trap_groups_register - Register packet trap groups with devlink.
- * @devlink: devlink.
- * @groups: Packet trap groups.
- * @groups_count: Count of provided packet trap groups.
- *
- * Context: Takes and release devlink->lock <mutex>.
- *
- * Return: Non-zero value on failure.
- */
-int devlink_trap_groups_register(struct devlink *devlink,
- const struct devlink_trap_group *groups,
- size_t groups_count)
-{
- int err;
-
- devl_lock(devlink);
- err = devl_trap_groups_register(devlink, groups, groups_count);
- devl_unlock(devlink);
- return err;
-}
-EXPORT_SYMBOL_GPL(devlink_trap_groups_register);
-
-/**
- * devl_trap_groups_unregister - Unregister packet trap groups from devlink.
- * @devlink: devlink.
- * @groups: Packet trap groups.
- * @groups_count: Count of provided packet trap groups.
- */
-void devl_trap_groups_unregister(struct devlink *devlink,
- const struct devlink_trap_group *groups,
- size_t groups_count)
-{
- int i;
-
- devl_assert_locked(devlink);
- for (i = groups_count - 1; i >= 0; i--)
- devlink_trap_group_unregister(devlink, &groups[i]);
-}
-EXPORT_SYMBOL_GPL(devl_trap_groups_unregister);
-
-/**
- * devlink_trap_groups_unregister - Unregister packet trap groups from devlink.
- * @devlink: devlink.
- * @groups: Packet trap groups.
- * @groups_count: Count of provided packet trap groups.
- *
- * Context: Takes and release devlink->lock <mutex>.
- */
-void devlink_trap_groups_unregister(struct devlink *devlink,
- const struct devlink_trap_group *groups,
- size_t groups_count)
-{
- devl_lock(devlink);
- devl_trap_groups_unregister(devlink, groups, groups_count);
- devl_unlock(devlink);
-}
-EXPORT_SYMBOL_GPL(devlink_trap_groups_unregister);
-
-static void
-devlink_trap_policer_notify(struct devlink *devlink,
- const struct devlink_trap_policer_item *policer_item,
- enum devlink_command cmd)
-{
- struct sk_buff *msg;
- int err;
-
- WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_POLICER_NEW &&
- cmd != DEVLINK_CMD_TRAP_POLICER_DEL);
- if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
- return;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!msg)
- return;
-
- err = devlink_nl_trap_policer_fill(msg, devlink, policer_item, cmd, 0,
- 0, 0);
- if (err) {
- nlmsg_free(msg);
- return;
- }
-
- genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
- msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
-}
-
-static int
-devlink_trap_policer_register(struct devlink *devlink,
- const struct devlink_trap_policer *policer)
-{
- struct devlink_trap_policer_item *policer_item;
- int err;
-
- if (devlink_trap_policer_item_lookup(devlink, policer->id))
- return -EEXIST;
-
- policer_item = kzalloc(sizeof(*policer_item), GFP_KERNEL);
- if (!policer_item)
- return -ENOMEM;
-
- policer_item->policer = policer;
- policer_item->rate = policer->init_rate;
- policer_item->burst = policer->init_burst;
-
- if (devlink->ops->trap_policer_init) {
- err = devlink->ops->trap_policer_init(devlink, policer);
- if (err)
- goto err_policer_init;
- }
-
- list_add_tail(&policer_item->list, &devlink->trap_policer_list);
- devlink_trap_policer_notify(devlink, policer_item,
- DEVLINK_CMD_TRAP_POLICER_NEW);
-
- return 0;
-
-err_policer_init:
- kfree(policer_item);
- return err;
-}
-
-static void
-devlink_trap_policer_unregister(struct devlink *devlink,
- const struct devlink_trap_policer *policer)
-{
- struct devlink_trap_policer_item *policer_item;
-
- policer_item = devlink_trap_policer_item_lookup(devlink, policer->id);
- if (WARN_ON_ONCE(!policer_item))
- return;
-
- devlink_trap_policer_notify(devlink, policer_item,
- DEVLINK_CMD_TRAP_POLICER_DEL);
- list_del(&policer_item->list);
- if (devlink->ops->trap_policer_fini)
- devlink->ops->trap_policer_fini(devlink, policer);
- kfree(policer_item);
-}
-
-/**
- * devl_trap_policers_register - Register packet trap policers with devlink.
- * @devlink: devlink.
- * @policers: Packet trap policers.
- * @policers_count: Count of provided packet trap policers.
- *
- * Return: Non-zero value on failure.
- */
-int
-devl_trap_policers_register(struct devlink *devlink,
- const struct devlink_trap_policer *policers,
- size_t policers_count)
-{
- int i, err;
-
- devl_assert_locked(devlink);
- for (i = 0; i < policers_count; i++) {
- const struct devlink_trap_policer *policer = &policers[i];
-
- if (WARN_ON(policer->id == 0 ||
- policer->max_rate < policer->min_rate ||
- policer->max_burst < policer->min_burst)) {
- err = -EINVAL;
- goto err_trap_policer_verify;
- }
-
- err = devlink_trap_policer_register(devlink, policer);
- if (err)
- goto err_trap_policer_register;
- }
- return 0;
-
-err_trap_policer_register:
-err_trap_policer_verify:
- for (i--; i >= 0; i--)
- devlink_trap_policer_unregister(devlink, &policers[i]);
- return err;
-}
-EXPORT_SYMBOL_GPL(devl_trap_policers_register);
-
-/**
- * devl_trap_policers_unregister - Unregister packet trap policers from devlink.
- * @devlink: devlink.
- * @policers: Packet trap policers.
- * @policers_count: Count of provided packet trap policers.
- */
-void
-devl_trap_policers_unregister(struct devlink *devlink,
- const struct devlink_trap_policer *policers,
- size_t policers_count)
-{
- int i;
-
- devl_assert_locked(devlink);
- for (i = policers_count - 1; i >= 0; i--)
- devlink_trap_policer_unregister(devlink, &policers[i]);
-}
-EXPORT_SYMBOL_GPL(devl_trap_policers_unregister);
-
-int devlink_compat_phys_port_name_get(struct net_device *dev,
- char *name, size_t len)
-{
- struct devlink_port *devlink_port;
-
- /* RTNL mutex is held here which ensures that devlink_port
- * instance cannot disappear in the middle. No need to take
- * any devlink lock as only permanent values are accessed.
- */
- ASSERT_RTNL();
-
- devlink_port = dev->devlink_port;
- if (!devlink_port)
- return -EOPNOTSUPP;
-
- return __devlink_port_phys_port_name_get(devlink_port, name, len);
-}
-
-int devlink_compat_switch_id_get(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
-{
- struct devlink_port *devlink_port;
-
- /* Caller must hold RTNL mutex or reference to dev, which ensures that
- * devlink_port instance cannot disappear in the middle. No need to take
- * any devlink lock as only permanent values are accessed.
- */
- devlink_port = dev->devlink_port;
- if (!devlink_port || !devlink_port->switch_port)
- return -EOPNOTSUPP;
-
- memcpy(ppid, &devlink_port->attrs.switch_id, sizeof(*ppid));
-
- return 0;
-}
diff --git a/net/devlink/linecard.c b/net/devlink/linecard.c
new file mode 100644
index 000000000000..85c32c314b0f
--- /dev/null
+++ b/net/devlink/linecard.c
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include "devl_internal.h"
+
+static struct devlink_linecard *
+devlink_linecard_get_by_index(struct devlink *devlink,
+ unsigned int linecard_index)
+{
+ struct devlink_linecard *devlink_linecard;
+
+ list_for_each_entry(devlink_linecard, &devlink->linecard_list, list) {
+ if (devlink_linecard->index == linecard_index)
+ return devlink_linecard;
+ }
+ return NULL;
+}
+
+static bool devlink_linecard_index_exists(struct devlink *devlink,
+ unsigned int linecard_index)
+{
+ return devlink_linecard_get_by_index(devlink, linecard_index);
+}
+
+static struct devlink_linecard *
+devlink_linecard_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
+{
+ if (attrs[DEVLINK_ATTR_LINECARD_INDEX]) {
+ u32 linecard_index = nla_get_u32(attrs[DEVLINK_ATTR_LINECARD_INDEX]);
+ struct devlink_linecard *linecard;
+
+ linecard = devlink_linecard_get_by_index(devlink, linecard_index);
+ if (!linecard)
+ return ERR_PTR(-ENODEV);
+ return linecard;
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static struct devlink_linecard *
+devlink_linecard_get_from_info(struct devlink *devlink, struct genl_info *info)
+{
+ return devlink_linecard_get_from_attrs(devlink, info->attrs);
+}
+
+static int devlink_nl_put_nested_handle(struct sk_buff *msg, struct devlink *devlink)
+{
+ struct nlattr *nested_attr;
+
+ nested_attr = nla_nest_start(msg, DEVLINK_ATTR_NESTED_DEVLINK);
+ if (!nested_attr)
+ return -EMSGSIZE;
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, nested_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, nested_attr);
+ return -EMSGSIZE;
+}
+
+struct devlink_linecard_type {
+ const char *type;
+ const void *priv;
+};
+
+static int devlink_nl_linecard_fill(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct devlink_linecard *linecard,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink_linecard_type *linecard_type;
+ struct nlattr *attr;
+ void *hdr;
+ int i;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX, linecard->index))
+ goto nla_put_failure;
+ if (nla_put_u8(msg, DEVLINK_ATTR_LINECARD_STATE, linecard->state))
+ goto nla_put_failure;
+ if (linecard->type &&
+ nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE, linecard->type))
+ goto nla_put_failure;
+
+ if (linecard->types_count) {
+ attr = nla_nest_start(msg,
+ DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES);
+ if (!attr)
+ goto nla_put_failure;
+ for (i = 0; i < linecard->types_count; i++) {
+ linecard_type = &linecard->types[i];
+ if (nla_put_string(msg, DEVLINK_ATTR_LINECARD_TYPE,
+ linecard_type->type)) {
+ nla_nest_cancel(msg, attr);
+ goto nla_put_failure;
+ }
+ }
+ nla_nest_end(msg, attr);
+ }
+
+ if (linecard->nested_devlink &&
+ devlink_nl_put_nested_handle(msg, linecard->nested_devlink))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static void devlink_linecard_notify(struct devlink_linecard *linecard,
+ enum devlink_command cmd)
+{
+ struct devlink *devlink = linecard->devlink;
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON(cmd != DEVLINK_CMD_LINECARD_NEW &&
+ cmd != DEVLINK_CMD_LINECARD_DEL);
+
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_linecard_fill(msg, devlink, linecard, cmd, 0, 0, 0,
+ NULL);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+void devlink_linecards_notify_register(struct devlink *devlink)
+{
+ struct devlink_linecard *linecard;
+
+ list_for_each_entry(linecard, &devlink->linecard_list, list)
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+}
+
+void devlink_linecards_notify_unregister(struct devlink *devlink)
+{
+ struct devlink_linecard *linecard;
+
+ list_for_each_entry_reverse(linecard, &devlink->linecard_list, list)
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL);
+}
+
+int devlink_nl_linecard_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_linecard *linecard;
+ struct sk_buff *msg;
+ int err;
+
+ linecard = devlink_linecard_get_from_info(devlink, info);
+ if (IS_ERR(linecard))
+ return PTR_ERR(linecard);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ mutex_lock(&linecard->state_lock);
+ err = devlink_nl_linecard_fill(msg, devlink, linecard,
+ DEVLINK_CMD_LINECARD_NEW,
+ info->snd_portid, info->snd_seq, 0,
+ info->extack);
+ mutex_unlock(&linecard->state_lock);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_linecard_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_linecard *linecard;
+ int idx = 0;
+ int err = 0;
+
+ list_for_each_entry(linecard, &devlink->linecard_list, list) {
+ if (idx < state->idx) {
+ idx++;
+ continue;
+ }
+ mutex_lock(&linecard->state_lock);
+ err = devlink_nl_linecard_fill(msg, devlink, linecard,
+ DEVLINK_CMD_LINECARD_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags,
+ cb->extack);
+ mutex_unlock(&linecard->state_lock);
+ if (err) {
+ state->idx = idx;
+ break;
+ }
+ idx++;
+ }
+
+ return err;
+}
+
+int devlink_nl_linecard_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_linecard_get_dump_one);
+}
+
+static struct devlink_linecard_type *
+devlink_linecard_type_lookup(struct devlink_linecard *linecard,
+ const char *type)
+{
+ struct devlink_linecard_type *linecard_type;
+ int i;
+
+ for (i = 0; i < linecard->types_count; i++) {
+ linecard_type = &linecard->types[i];
+ if (!strcmp(type, linecard_type->type))
+ return linecard_type;
+ }
+ return NULL;
+}
+
+static int devlink_linecard_type_set(struct devlink_linecard *linecard,
+ const char *type,
+ struct netlink_ext_ack *extack)
+{
+ const struct devlink_linecard_ops *ops = linecard->ops;
+ struct devlink_linecard_type *linecard_type;
+ int err;
+
+ mutex_lock(&linecard->state_lock);
+ if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
+ NL_SET_ERR_MSG(extack, "Line card is currently being provisioned");
+ err = -EBUSY;
+ goto out;
+ }
+ if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
+ NL_SET_ERR_MSG(extack, "Line card is currently being unprovisioned");
+ err = -EBUSY;
+ goto out;
+ }
+
+ linecard_type = devlink_linecard_type_lookup(linecard, type);
+ if (!linecard_type) {
+ NL_SET_ERR_MSG(extack, "Unsupported line card type provided");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (linecard->state != DEVLINK_LINECARD_STATE_UNPROVISIONED &&
+ linecard->state != DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
+ NL_SET_ERR_MSG(extack, "Line card already provisioned");
+ err = -EBUSY;
+ /* Check if the line card is provisioned in the same
+ * way the user asks. In case it is, make the operation
+ * to return success.
+ */
+ if (ops->same_provision &&
+ ops->same_provision(linecard, linecard->priv,
+ linecard_type->type,
+ linecard_type->priv))
+ err = 0;
+ goto out;
+ }
+
+ linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING;
+ linecard->type = linecard_type->type;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+ err = ops->provision(linecard, linecard->priv, linecard_type->type,
+ linecard_type->priv, extack);
+ if (err) {
+ /* Provisioning failed. Assume the linecard is unprovisioned
+ * for future operations.
+ */
+ mutex_lock(&linecard->state_lock);
+ linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+ linecard->type = NULL;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+ }
+ return err;
+
+out:
+ mutex_unlock(&linecard->state_lock);
+ return err;
+}
+
+static int devlink_linecard_type_unset(struct devlink_linecard *linecard,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mutex_lock(&linecard->state_lock);
+ if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING) {
+ NL_SET_ERR_MSG(extack, "Line card is currently being provisioned");
+ err = -EBUSY;
+ goto out;
+ }
+ if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONING) {
+ NL_SET_ERR_MSG(extack, "Line card is currently being unprovisioned");
+ err = -EBUSY;
+ goto out;
+ }
+ if (linecard->state == DEVLINK_LINECARD_STATE_PROVISIONING_FAILED) {
+ linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+ linecard->type = NULL;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ err = 0;
+ goto out;
+ }
+
+ if (linecard->state == DEVLINK_LINECARD_STATE_UNPROVISIONED) {
+ NL_SET_ERR_MSG(extack, "Line card is not provisioned");
+ err = 0;
+ goto out;
+ }
+ linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONING;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+ err = linecard->ops->unprovision(linecard, linecard->priv,
+ extack);
+ if (err) {
+ /* Unprovisioning failed. Assume the linecard is unprovisioned
+ * for future operations.
+ */
+ mutex_lock(&linecard->state_lock);
+ linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+ linecard->type = NULL;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+ }
+ return err;
+
+out:
+ mutex_unlock(&linecard->state_lock);
+ return err;
+}
+
+int devlink_nl_cmd_linecard_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_linecard *linecard;
+ int err;
+
+ linecard = devlink_linecard_get_from_info(devlink, info);
+ if (IS_ERR(linecard))
+ return PTR_ERR(linecard);
+
+ if (info->attrs[DEVLINK_ATTR_LINECARD_TYPE]) {
+ const char *type;
+
+ type = nla_data(info->attrs[DEVLINK_ATTR_LINECARD_TYPE]);
+ if (strcmp(type, "")) {
+ err = devlink_linecard_type_set(linecard, type, extack);
+ if (err)
+ return err;
+ } else {
+ err = devlink_linecard_type_unset(linecard, extack);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int devlink_linecard_types_init(struct devlink_linecard *linecard)
+{
+ struct devlink_linecard_type *linecard_type;
+ unsigned int count;
+ int i;
+
+ count = linecard->ops->types_count(linecard, linecard->priv);
+ linecard->types = kmalloc_array(count, sizeof(*linecard_type),
+ GFP_KERNEL);
+ if (!linecard->types)
+ return -ENOMEM;
+ linecard->types_count = count;
+
+ for (i = 0; i < count; i++) {
+ linecard_type = &linecard->types[i];
+ linecard->ops->types_get(linecard, linecard->priv, i,
+ &linecard_type->type,
+ &linecard_type->priv);
+ }
+ return 0;
+}
+
+static void devlink_linecard_types_fini(struct devlink_linecard *linecard)
+{
+ kfree(linecard->types);
+}
+
+/**
+ * devl_linecard_create - Create devlink linecard
+ *
+ * @devlink: devlink
+ * @linecard_index: driver-specific numerical identifier of the linecard
+ * @ops: linecards ops
+ * @priv: user priv pointer
+ *
+ * Create devlink linecard instance with provided linecard index.
+ * Caller can use any indexing, even hw-related one.
+ *
+ * Return: Line card structure or an ERR_PTR() encoded error code.
+ */
+struct devlink_linecard *
+devl_linecard_create(struct devlink *devlink, unsigned int linecard_index,
+ const struct devlink_linecard_ops *ops, void *priv)
+{
+ struct devlink_linecard *linecard;
+ int err;
+
+ if (WARN_ON(!ops || !ops->provision || !ops->unprovision ||
+ !ops->types_count || !ops->types_get))
+ return ERR_PTR(-EINVAL);
+
+ if (devlink_linecard_index_exists(devlink, linecard_index))
+ return ERR_PTR(-EEXIST);
+
+ linecard = kzalloc(sizeof(*linecard), GFP_KERNEL);
+ if (!linecard)
+ return ERR_PTR(-ENOMEM);
+
+ linecard->devlink = devlink;
+ linecard->index = linecard_index;
+ linecard->ops = ops;
+ linecard->priv = priv;
+ linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+ mutex_init(&linecard->state_lock);
+
+ err = devlink_linecard_types_init(linecard);
+ if (err) {
+ mutex_destroy(&linecard->state_lock);
+ kfree(linecard);
+ return ERR_PTR(err);
+ }
+
+ list_add_tail(&linecard->list, &devlink->linecard_list);
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ return linecard;
+}
+EXPORT_SYMBOL_GPL(devl_linecard_create);
+
+/**
+ * devl_linecard_destroy - Destroy devlink linecard
+ *
+ * @linecard: devlink linecard
+ */
+void devl_linecard_destroy(struct devlink_linecard *linecard)
+{
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL);
+ list_del(&linecard->list);
+ devlink_linecard_types_fini(linecard);
+ mutex_destroy(&linecard->state_lock);
+ kfree(linecard);
+}
+EXPORT_SYMBOL_GPL(devl_linecard_destroy);
+
+/**
+ * devlink_linecard_provision_set - Set provisioning on linecard
+ *
+ * @linecard: devlink linecard
+ * @type: linecard type
+ *
+ * This is either called directly from the provision() op call or
+ * as a result of the provision() op call asynchronously.
+ */
+void devlink_linecard_provision_set(struct devlink_linecard *linecard,
+ const char *type)
+{
+ mutex_lock(&linecard->state_lock);
+ WARN_ON(linecard->type && strcmp(linecard->type, type));
+ linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED;
+ linecard->type = type;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_provision_set);
+
+/**
+ * devlink_linecard_provision_clear - Clear provisioning on linecard
+ *
+ * @linecard: devlink linecard
+ *
+ * This is either called directly from the unprovision() op call or
+ * as a result of the unprovision() op call asynchronously.
+ */
+void devlink_linecard_provision_clear(struct devlink_linecard *linecard)
+{
+ mutex_lock(&linecard->state_lock);
+ WARN_ON(linecard->nested_devlink);
+ linecard->state = DEVLINK_LINECARD_STATE_UNPROVISIONED;
+ linecard->type = NULL;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_provision_clear);
+
+/**
+ * devlink_linecard_provision_fail - Fail provisioning on linecard
+ *
+ * @linecard: devlink linecard
+ *
+ * This is either called directly from the provision() op call or
+ * as a result of the provision() op call asynchronously.
+ */
+void devlink_linecard_provision_fail(struct devlink_linecard *linecard)
+{
+ mutex_lock(&linecard->state_lock);
+ WARN_ON(linecard->nested_devlink);
+ linecard->state = DEVLINK_LINECARD_STATE_PROVISIONING_FAILED;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_provision_fail);
+
+/**
+ * devlink_linecard_activate - Set linecard active
+ *
+ * @linecard: devlink linecard
+ */
+void devlink_linecard_activate(struct devlink_linecard *linecard)
+{
+ mutex_lock(&linecard->state_lock);
+ WARN_ON(linecard->state != DEVLINK_LINECARD_STATE_PROVISIONED);
+ linecard->state = DEVLINK_LINECARD_STATE_ACTIVE;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_activate);
+
+/**
+ * devlink_linecard_deactivate - Set linecard inactive
+ *
+ * @linecard: devlink linecard
+ */
+void devlink_linecard_deactivate(struct devlink_linecard *linecard)
+{
+ mutex_lock(&linecard->state_lock);
+ switch (linecard->state) {
+ case DEVLINK_LINECARD_STATE_ACTIVE:
+ linecard->state = DEVLINK_LINECARD_STATE_PROVISIONED;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ break;
+ case DEVLINK_LINECARD_STATE_UNPROVISIONING:
+ /* Line card is being deactivated as part
+ * of unprovisioning flow.
+ */
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ mutex_unlock(&linecard->state_lock);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_deactivate);
+
+/**
+ * devlink_linecard_nested_dl_set - Attach/detach nested devlink
+ * instance to linecard.
+ *
+ * @linecard: devlink linecard
+ * @nested_devlink: devlink instance to attach or NULL to detach
+ */
+void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard,
+ struct devlink *nested_devlink)
+{
+ mutex_lock(&linecard->state_lock);
+ linecard->nested_devlink = nested_devlink;
+ devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_NEW);
+ mutex_unlock(&linecard->state_lock);
+}
+EXPORT_SYMBOL_GPL(devlink_linecard_nested_dl_set);
diff --git a/net/devlink/netlink.c b/net/devlink/netlink.c
index 72a5005a64cd..fc3e7c029a3b 100644
--- a/net/devlink/netlink.c
+++ b/net/devlink/netlink.c
@@ -82,6 +82,21 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_REGION_DIRECT] = { .type = NLA_FLAG },
};
+int devlink_nl_msg_reply_and_new(struct sk_buff **msg, struct genl_info *info)
+{
+ int err;
+
+ if (*msg) {
+ err = genlmsg_reply(*msg, info);
+ if (err)
+ return err;
+ }
+ *msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!*msg)
+ return -ENOMEM;
+ return 0;
+}
+
struct devlink *
devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs)
{
@@ -240,6 +255,257 @@ int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb,
return devlink_nl_inst_iter_dumpit(msg, cb, flags, dump_one);
}
+static const struct genl_small_ops devlink_nl_small_ops[40] = {
+ {
+ .cmd = DEVLINK_CMD_PORT_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_port_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ },
+ {
+ .cmd = DEVLINK_CMD_RATE_SET,
+ .doit = devlink_nl_cmd_rate_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_RATE_NEW,
+ .doit = devlink_nl_cmd_rate_new_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_RATE_DEL,
+ .doit = devlink_nl_cmd_rate_del_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_SPLIT,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_port_split_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_UNSPLIT,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_port_unsplit_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_NEW,
+ .doit = devlink_nl_cmd_port_new_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_DEL,
+ .doit = devlink_nl_cmd_port_del_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ },
+
+ {
+ .cmd = DEVLINK_CMD_LINECARD_SET,
+ .doit = devlink_nl_cmd_linecard_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_POOL_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_sb_pool_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_PORT_POOL_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_sb_port_pool_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_sb_occ_snapshot_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_sb_occ_max_clear_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_ESWITCH_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_eswitch_get_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_ESWITCH_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_eswitch_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_dpipe_table_get,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_dpipe_entries_get,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_dpipe_headers_get,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_dpipe_table_counters_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_RESOURCE_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_resource_set,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_RESOURCE_DUMP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_resource_dump,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_RELOAD,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_reload,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_PARAM_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_param_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_PARAM_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_port_param_get_doit,
+ .dumpit = devlink_nl_cmd_port_param_get_dumpit,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ /* can be retrieved by unprivileged users */
+ },
+ {
+ .cmd = DEVLINK_CMD_PORT_PARAM_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_port_param_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
+ },
+ {
+ .cmd = DEVLINK_CMD_REGION_NEW,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_region_new,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_REGION_DEL,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_region_del,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_REGION_READ,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
+ .dumpit = devlink_nl_cmd_region_read_dumpit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_health_reporter_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_RECOVER,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_health_reporter_recover_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_health_reporter_diagnose_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
+ .dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
+ },
+ {
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_TEST,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_health_reporter_test_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT,
+ },
+ {
+ .cmd = DEVLINK_CMD_FLASH_UPDATE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_flash_update,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_TRAP_SET,
+ .doit = devlink_nl_cmd_trap_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_TRAP_GROUP_SET,
+ .doit = devlink_nl_cmd_trap_group_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_TRAP_POLICER_SET,
+ .doit = devlink_nl_cmd_trap_policer_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = DEVLINK_CMD_SELFTESTS_RUN,
+ .doit = devlink_nl_cmd_selftests_run,
+ .flags = GENL_ADMIN_PERM,
+ },
+ /* -- No new ops here! Use split ops going forward! -- */
+};
+
struct genl_family devlink_nl_family __ro_after_init = {
.name = DEVLINK_GENL_NAME,
.version = DEVLINK_GENL_VERSION,
diff --git a/net/devlink/param.c b/net/devlink/param.c
new file mode 100644
index 000000000000..31275f9d4cb7
--- /dev/null
+++ b/net/devlink/param.c
@@ -0,0 +1,865 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include "devl_internal.h"
+
+static const struct devlink_param devlink_param_generic[] = {
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ .name = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME,
+ .type = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ .name = DEVLINK_PARAM_GENERIC_MAX_MACS_NAME,
+ .type = DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ .name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME,
+ .type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
+ .name = DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME,
+ .type = DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME,
+ .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME,
+ .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ .name = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME,
+ .type = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
+ .name = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME,
+ .type = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_ETH_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_ETH_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_RDMA_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_RDMA_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ .name = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME,
+ .type = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ .name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME,
+ .type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE,
+ },
+};
+
+static int devlink_param_generic_verify(const struct devlink_param *param)
+{
+ /* verify it match generic parameter by id and name */
+ if (param->id > DEVLINK_PARAM_GENERIC_ID_MAX)
+ return -EINVAL;
+ if (strcmp(param->name, devlink_param_generic[param->id].name))
+ return -ENOENT;
+
+ WARN_ON(param->type != devlink_param_generic[param->id].type);
+
+ return 0;
+}
+
+static int devlink_param_driver_verify(const struct devlink_param *param)
+{
+ int i;
+
+ if (param->id <= DEVLINK_PARAM_GENERIC_ID_MAX)
+ return -EINVAL;
+ /* verify no such name in generic params */
+ for (i = 0; i <= DEVLINK_PARAM_GENERIC_ID_MAX; i++)
+ if (!strcmp(param->name, devlink_param_generic[i].name))
+ return -EEXIST;
+
+ return 0;
+}
+
+static struct devlink_param_item *
+devlink_param_find_by_name(struct xarray *params, const char *param_name)
+{
+ struct devlink_param_item *param_item;
+ unsigned long param_id;
+
+ xa_for_each(params, param_id, param_item) {
+ if (!strcmp(param_item->param->name, param_name))
+ return param_item;
+ }
+ return NULL;
+}
+
+static struct devlink_param_item *
+devlink_param_find_by_id(struct xarray *params, u32 param_id)
+{
+ return xa_load(params, param_id);
+}
+
+static bool
+devlink_param_cmode_is_supported(const struct devlink_param *param,
+ enum devlink_param_cmode cmode)
+{
+ return test_bit(cmode, &param->supported_cmodes);
+}
+
+static int devlink_param_get(struct devlink *devlink,
+ const struct devlink_param *param,
+ struct devlink_param_gset_ctx *ctx)
+{
+ if (!param->get)
+ return -EOPNOTSUPP;
+ return param->get(devlink, param->id, ctx);
+}
+
+static int devlink_param_set(struct devlink *devlink,
+ const struct devlink_param *param,
+ struct devlink_param_gset_ctx *ctx)
+{
+ if (!param->set)
+ return -EOPNOTSUPP;
+ return param->set(devlink, param->id, ctx);
+}
+
+static int
+devlink_param_type_to_nla_type(enum devlink_param_type param_type)
+{
+ switch (param_type) {
+ case DEVLINK_PARAM_TYPE_U8:
+ return NLA_U8;
+ case DEVLINK_PARAM_TYPE_U16:
+ return NLA_U16;
+ case DEVLINK_PARAM_TYPE_U32:
+ return NLA_U32;
+ case DEVLINK_PARAM_TYPE_STRING:
+ return NLA_STRING;
+ case DEVLINK_PARAM_TYPE_BOOL:
+ return NLA_FLAG;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+devlink_nl_param_value_fill_one(struct sk_buff *msg,
+ enum devlink_param_type type,
+ enum devlink_param_cmode cmode,
+ union devlink_param_value val)
+{
+ struct nlattr *param_value_attr;
+
+ param_value_attr = nla_nest_start_noflag(msg,
+ DEVLINK_ATTR_PARAM_VALUE);
+ if (!param_value_attr)
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_CMODE, cmode))
+ goto value_nest_cancel;
+
+ switch (type) {
+ case DEVLINK_PARAM_TYPE_U8:
+ if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu8))
+ goto value_nest_cancel;
+ break;
+ case DEVLINK_PARAM_TYPE_U16:
+ if (nla_put_u16(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu16))
+ goto value_nest_cancel;
+ break;
+ case DEVLINK_PARAM_TYPE_U32:
+ if (nla_put_u32(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu32))
+ goto value_nest_cancel;
+ break;
+ case DEVLINK_PARAM_TYPE_STRING:
+ if (nla_put_string(msg, DEVLINK_ATTR_PARAM_VALUE_DATA,
+ val.vstr))
+ goto value_nest_cancel;
+ break;
+ case DEVLINK_PARAM_TYPE_BOOL:
+ if (val.vbool &&
+ nla_put_flag(msg, DEVLINK_ATTR_PARAM_VALUE_DATA))
+ goto value_nest_cancel;
+ break;
+ }
+
+ nla_nest_end(msg, param_value_attr);
+ return 0;
+
+value_nest_cancel:
+ nla_nest_cancel(msg, param_value_attr);
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink,
+ unsigned int port_index,
+ struct devlink_param_item *param_item,
+ enum devlink_command cmd,
+ u32 portid, u32 seq, int flags)
+{
+ union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1];
+ bool param_value_set[DEVLINK_PARAM_CMODE_MAX + 1] = {};
+ const struct devlink_param *param = param_item->param;
+ struct devlink_param_gset_ctx ctx;
+ struct nlattr *param_values_list;
+ struct nlattr *param_attr;
+ int nla_type;
+ void *hdr;
+ int err;
+ int i;
+
+ /* Get value from driver part to driverinit configuration mode */
+ for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
+ if (!devlink_param_cmode_is_supported(param, i))
+ continue;
+ if (i == DEVLINK_PARAM_CMODE_DRIVERINIT) {
+ if (param_item->driverinit_value_new_valid)
+ param_value[i] = param_item->driverinit_value_new;
+ else if (param_item->driverinit_value_valid)
+ param_value[i] = param_item->driverinit_value;
+ else
+ return -EOPNOTSUPP;
+ } else {
+ ctx.cmode = i;
+ err = devlink_param_get(devlink, param, &ctx);
+ if (err)
+ return err;
+ param_value[i] = ctx.val;
+ }
+ param_value_set[i] = true;
+ }
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto genlmsg_cancel;
+
+ if (cmd == DEVLINK_CMD_PORT_PARAM_GET ||
+ cmd == DEVLINK_CMD_PORT_PARAM_NEW ||
+ cmd == DEVLINK_CMD_PORT_PARAM_DEL)
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, port_index))
+ goto genlmsg_cancel;
+
+ param_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PARAM);
+ if (!param_attr)
+ goto genlmsg_cancel;
+ if (nla_put_string(msg, DEVLINK_ATTR_PARAM_NAME, param->name))
+ goto param_nest_cancel;
+ if (param->generic && nla_put_flag(msg, DEVLINK_ATTR_PARAM_GENERIC))
+ goto param_nest_cancel;
+
+ nla_type = devlink_param_type_to_nla_type(param->type);
+ if (nla_type < 0)
+ goto param_nest_cancel;
+ if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_TYPE, nla_type))
+ goto param_nest_cancel;
+
+ param_values_list = nla_nest_start_noflag(msg,
+ DEVLINK_ATTR_PARAM_VALUES_LIST);
+ if (!param_values_list)
+ goto param_nest_cancel;
+
+ for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) {
+ if (!param_value_set[i])
+ continue;
+ err = devlink_nl_param_value_fill_one(msg, param->type,
+ i, param_value[i]);
+ if (err)
+ goto values_list_nest_cancel;
+ }
+
+ nla_nest_end(msg, param_values_list);
+ nla_nest_end(msg, param_attr);
+ genlmsg_end(msg, hdr);
+ return 0;
+
+values_list_nest_cancel:
+ nla_nest_end(msg, param_values_list);
+param_nest_cancel:
+ nla_nest_cancel(msg, param_attr);
+genlmsg_cancel:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static void devlink_param_notify(struct devlink *devlink,
+ unsigned int port_index,
+ struct devlink_param_item *param_item,
+ enum devlink_command cmd)
+{
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL &&
+ cmd != DEVLINK_CMD_PORT_PARAM_NEW &&
+ cmd != DEVLINK_CMD_PORT_PARAM_DEL);
+
+ /* devlink_notify_register() / devlink_notify_unregister()
+ * will replay the notifications if the params are added/removed
+ * outside of the lifetime of the instance.
+ */
+ if (!devl_is_registered(devlink))
+ return;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+ err = devlink_nl_param_fill(msg, devlink, port_index, param_item, cmd,
+ 0, 0, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static void devlink_params_notify(struct devlink *devlink,
+ enum devlink_command cmd)
+{
+ struct devlink_param_item *param_item;
+ unsigned long param_id;
+
+ xa_for_each(&devlink->params, param_id, param_item)
+ devlink_param_notify(devlink, 0, param_item, cmd);
+}
+
+void devlink_params_notify_register(struct devlink *devlink)
+{
+ devlink_params_notify(devlink, DEVLINK_CMD_PARAM_NEW);
+}
+
+void devlink_params_notify_unregister(struct devlink *devlink)
+{
+ devlink_params_notify(devlink, DEVLINK_CMD_PARAM_DEL);
+}
+
+static int devlink_nl_param_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_param_item *param_item;
+ unsigned long param_id;
+ int err = 0;
+
+ xa_for_each_start(&devlink->params, param_id, param_item, state->idx) {
+ err = devlink_nl_param_fill(msg, devlink, 0, param_item,
+ DEVLINK_CMD_PARAM_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags);
+ if (err == -EOPNOTSUPP) {
+ err = 0;
+ } else if (err) {
+ state->idx = param_id;
+ break;
+ }
+ }
+
+ return err;
+}
+
+int devlink_nl_param_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_param_get_dump_one);
+}
+
+static int
+devlink_param_type_get_from_info(struct genl_info *info,
+ enum devlink_param_type *param_type)
+{
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_TYPE))
+ return -EINVAL;
+
+ switch (nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_TYPE])) {
+ case NLA_U8:
+ *param_type = DEVLINK_PARAM_TYPE_U8;
+ break;
+ case NLA_U16:
+ *param_type = DEVLINK_PARAM_TYPE_U16;
+ break;
+ case NLA_U32:
+ *param_type = DEVLINK_PARAM_TYPE_U32;
+ break;
+ case NLA_STRING:
+ *param_type = DEVLINK_PARAM_TYPE_STRING;
+ break;
+ case NLA_FLAG:
+ *param_type = DEVLINK_PARAM_TYPE_BOOL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+devlink_param_value_get_from_info(const struct devlink_param *param,
+ struct genl_info *info,
+ union devlink_param_value *value)
+{
+ struct nlattr *param_data;
+ int len;
+
+ param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA];
+
+ if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data)
+ return -EINVAL;
+
+ switch (param->type) {
+ case DEVLINK_PARAM_TYPE_U8:
+ if (nla_len(param_data) != sizeof(u8))
+ return -EINVAL;
+ value->vu8 = nla_get_u8(param_data);
+ break;
+ case DEVLINK_PARAM_TYPE_U16:
+ if (nla_len(param_data) != sizeof(u16))
+ return -EINVAL;
+ value->vu16 = nla_get_u16(param_data);
+ break;
+ case DEVLINK_PARAM_TYPE_U32:
+ if (nla_len(param_data) != sizeof(u32))
+ return -EINVAL;
+ value->vu32 = nla_get_u32(param_data);
+ break;
+ case DEVLINK_PARAM_TYPE_STRING:
+ len = strnlen(nla_data(param_data), nla_len(param_data));
+ if (len == nla_len(param_data) ||
+ len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
+ return -EINVAL;
+ strcpy(value->vstr, nla_data(param_data));
+ break;
+ case DEVLINK_PARAM_TYPE_BOOL:
+ if (param_data && nla_len(param_data))
+ return -EINVAL;
+ value->vbool = nla_get_flag(param_data);
+ break;
+ }
+ return 0;
+}
+
+static struct devlink_param_item *
+devlink_param_get_from_info(struct xarray *params, struct genl_info *info)
+{
+ char *param_name;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_NAME))
+ return NULL;
+
+ param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]);
+ return devlink_param_find_by_name(params, param_name);
+}
+
+int devlink_nl_param_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_param_item *param_item;
+ struct sk_buff *msg;
+ int err;
+
+ param_item = devlink_param_get_from_info(&devlink->params, info);
+ if (!param_item)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_param_fill(msg, devlink, 0, param_item,
+ DEVLINK_CMD_PARAM_GET,
+ info->snd_portid, info->snd_seq, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink,
+ unsigned int port_index,
+ struct xarray *params,
+ struct genl_info *info,
+ enum devlink_command cmd)
+{
+ enum devlink_param_type param_type;
+ struct devlink_param_gset_ctx ctx;
+ enum devlink_param_cmode cmode;
+ struct devlink_param_item *param_item;
+ const struct devlink_param *param;
+ union devlink_param_value value;
+ int err = 0;
+
+ param_item = devlink_param_get_from_info(params, info);
+ if (!param_item)
+ return -EINVAL;
+ param = param_item->param;
+ err = devlink_param_type_get_from_info(info, &param_type);
+ if (err)
+ return err;
+ if (param_type != param->type)
+ return -EINVAL;
+ err = devlink_param_value_get_from_info(param, info, &value);
+ if (err)
+ return err;
+ if (param->validate) {
+ err = param->validate(devlink, param->id, value, info->extack);
+ if (err)
+ return err;
+ }
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_VALUE_CMODE))
+ return -EINVAL;
+ cmode = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]);
+ if (!devlink_param_cmode_is_supported(param, cmode))
+ return -EOPNOTSUPP;
+
+ if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
+ param_item->driverinit_value_new = value;
+ param_item->driverinit_value_new_valid = true;
+ } else {
+ if (!param->set)
+ return -EOPNOTSUPP;
+ ctx.val = value;
+ ctx.cmode = cmode;
+ err = devlink_param_set(devlink, param, &ctx);
+ if (err)
+ return err;
+ }
+
+ devlink_param_notify(devlink, port_index, param_item, cmd);
+ return 0;
+}
+
+int devlink_nl_cmd_param_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+
+ return __devlink_nl_cmd_param_set_doit(devlink, 0, &devlink->params,
+ info, DEVLINK_CMD_PARAM_NEW);
+}
+
+int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ NL_SET_ERR_MSG(cb->extack, "Port params are not supported");
+ return msg->len;
+}
+
+int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ NL_SET_ERR_MSG(info->extack, "Port params are not supported");
+ return -EINVAL;
+}
+
+int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ NL_SET_ERR_MSG(info->extack, "Port params are not supported");
+ return -EINVAL;
+}
+
+static int devlink_param_verify(const struct devlink_param *param)
+{
+ if (!param || !param->name || !param->supported_cmodes)
+ return -EINVAL;
+ if (param->generic)
+ return devlink_param_generic_verify(param);
+ else
+ return devlink_param_driver_verify(param);
+}
+
+static int devlink_param_register(struct devlink *devlink,
+ const struct devlink_param *param)
+{
+ struct devlink_param_item *param_item;
+ int err;
+
+ WARN_ON(devlink_param_verify(param));
+ WARN_ON(devlink_param_find_by_name(&devlink->params, param->name));
+
+ if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT))
+ WARN_ON(param->get || param->set);
+ else
+ WARN_ON(!param->get || !param->set);
+
+ param_item = kzalloc(sizeof(*param_item), GFP_KERNEL);
+ if (!param_item)
+ return -ENOMEM;
+
+ param_item->param = param;
+
+ err = xa_insert(&devlink->params, param->id, param_item, GFP_KERNEL);
+ if (err)
+ goto err_xa_insert;
+
+ devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
+ return 0;
+
+err_xa_insert:
+ kfree(param_item);
+ return err;
+}
+
+static void devlink_param_unregister(struct devlink *devlink,
+ const struct devlink_param *param)
+{
+ struct devlink_param_item *param_item;
+
+ param_item = devlink_param_find_by_id(&devlink->params, param->id);
+ if (WARN_ON(!param_item))
+ return;
+ devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_DEL);
+ xa_erase(&devlink->params, param->id);
+ kfree(param_item);
+}
+
+/**
+ * devl_params_register - register configuration parameters
+ *
+ * @devlink: devlink
+ * @params: configuration parameters array
+ * @params_count: number of parameters provided
+ *
+ * Register the configuration parameters supported by the driver.
+ */
+int devl_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ const struct devlink_param *param = params;
+ int i, err;
+
+ lockdep_assert_held(&devlink->lock);
+
+ for (i = 0; i < params_count; i++, param++) {
+ err = devlink_param_register(devlink, param);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ if (!i)
+ return err;
+
+ for (param--; i > 0; i--, param--)
+ devlink_param_unregister(devlink, param);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devl_params_register);
+
+int devlink_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ int err;
+
+ devl_lock(devlink);
+ err = devl_params_register(devlink, params, params_count);
+ devl_unlock(devlink);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_params_register);
+
+/**
+ * devl_params_unregister - unregister configuration parameters
+ * @devlink: devlink
+ * @params: configuration parameters to unregister
+ * @params_count: number of parameters provided
+ */
+void devl_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ const struct devlink_param *param = params;
+ int i;
+
+ lockdep_assert_held(&devlink->lock);
+
+ for (i = 0; i < params_count; i++, param++)
+ devlink_param_unregister(devlink, param);
+}
+EXPORT_SYMBOL_GPL(devl_params_unregister);
+
+void devlink_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ devl_lock(devlink);
+ devl_params_unregister(devlink, params, params_count);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_params_unregister);
+
+/**
+ * devl_param_driverinit_value_get - get configuration parameter
+ * value for driver initializing
+ *
+ * @devlink: devlink
+ * @param_id: parameter ID
+ * @val: pointer to store the value of parameter in driverinit
+ * configuration mode
+ *
+ * This function should be used by the driver to get driverinit
+ * configuration for initialization after reload command.
+ *
+ * Note that lockless call of this function relies on the
+ * driver to maintain following basic sane behavior:
+ * 1) Driver ensures a call to this function cannot race with
+ * registering/unregistering the parameter with the same parameter ID.
+ * 2) Driver ensures a call to this function cannot race with
+ * devl_param_driverinit_value_set() call with the same parameter ID.
+ * 3) Driver ensures a call to this function cannot race with
+ * reload operation.
+ * If the driver is not able to comply, it has to take the devlink->lock
+ * while calling this.
+ */
+int devl_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *val)
+{
+ struct devlink_param_item *param_item;
+
+ if (WARN_ON(!devlink_reload_supported(devlink->ops)))
+ return -EOPNOTSUPP;
+
+ param_item = devlink_param_find_by_id(&devlink->params, param_id);
+ if (!param_item)
+ return -EINVAL;
+
+ if (!param_item->driverinit_value_valid)
+ return -EOPNOTSUPP;
+
+ if (WARN_ON(!devlink_param_cmode_is_supported(param_item->param,
+ DEVLINK_PARAM_CMODE_DRIVERINIT)))
+ return -EOPNOTSUPP;
+
+ *val = param_item->driverinit_value;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devl_param_driverinit_value_get);
+
+/**
+ * devl_param_driverinit_value_set - set value of configuration
+ * parameter for driverinit
+ * configuration mode
+ *
+ * @devlink: devlink
+ * @param_id: parameter ID
+ * @init_val: value of parameter to set for driverinit configuration mode
+ *
+ * This function should be used by the driver to set driverinit
+ * configuration mode default value.
+ */
+void devl_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val)
+{
+ struct devlink_param_item *param_item;
+
+ devl_assert_locked(devlink);
+
+ param_item = devlink_param_find_by_id(&devlink->params, param_id);
+ if (WARN_ON(!param_item))
+ return;
+
+ if (WARN_ON(!devlink_param_cmode_is_supported(param_item->param,
+ DEVLINK_PARAM_CMODE_DRIVERINIT)))
+ return;
+
+ param_item->driverinit_value = init_val;
+ param_item->driverinit_value_valid = true;
+
+ devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
+}
+EXPORT_SYMBOL_GPL(devl_param_driverinit_value_set);
+
+void devlink_params_driverinit_load_new(struct devlink *devlink)
+{
+ struct devlink_param_item *param_item;
+ unsigned long param_id;
+
+ xa_for_each(&devlink->params, param_id, param_item) {
+ if (!devlink_param_cmode_is_supported(param_item->param,
+ DEVLINK_PARAM_CMODE_DRIVERINIT) ||
+ !param_item->driverinit_value_new_valid)
+ continue;
+ param_item->driverinit_value = param_item->driverinit_value_new;
+ param_item->driverinit_value_valid = true;
+ param_item->driverinit_value_new_valid = false;
+ }
+}
+
+/**
+ * devl_param_value_changed - notify devlink on a parameter's value
+ * change. Should be called by the driver
+ * right after the change.
+ *
+ * @devlink: devlink
+ * @param_id: parameter ID
+ *
+ * This function should be used by the driver to notify devlink on value
+ * change, excluding driverinit configuration mode.
+ * For driverinit configuration mode driver should use the function
+ */
+void devl_param_value_changed(struct devlink *devlink, u32 param_id)
+{
+ struct devlink_param_item *param_item;
+
+ param_item = devlink_param_find_by_id(&devlink->params, param_id);
+ WARN_ON(!param_item);
+
+ devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW);
+}
+EXPORT_SYMBOL_GPL(devl_param_value_changed);
diff --git a/net/devlink/port.c b/net/devlink/port.c
new file mode 100644
index 000000000000..4763b42885fb
--- /dev/null
+++ b/net/devlink/port.c
@@ -0,0 +1,1515 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include "devl_internal.h"
+
+#define DEVLINK_PORT_FN_CAPS_VALID_MASK \
+ (_BITUL(__DEVLINK_PORT_FN_ATTR_CAPS_MAX) - 1)
+
+static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1] = {
+ [DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY },
+ [DEVLINK_PORT_FN_ATTR_STATE] =
+ NLA_POLICY_RANGE(NLA_U8, DEVLINK_PORT_FN_STATE_INACTIVE,
+ DEVLINK_PORT_FN_STATE_ACTIVE),
+ [DEVLINK_PORT_FN_ATTR_CAPS] =
+ NLA_POLICY_BITFIELD32(DEVLINK_PORT_FN_CAPS_VALID_MASK),
+};
+
+#define ASSERT_DEVLINK_PORT_REGISTERED(devlink_port) \
+ WARN_ON_ONCE(!(devlink_port)->registered)
+#define ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port) \
+ WARN_ON_ONCE((devlink_port)->registered)
+
+struct devlink_port *devlink_port_get_by_index(struct devlink *devlink,
+ unsigned int port_index)
+{
+ return xa_load(&devlink->ports, port_index);
+}
+
+struct devlink_port *devlink_port_get_from_attrs(struct devlink *devlink,
+ struct nlattr **attrs)
+{
+ if (attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ u32 port_index = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]);
+ struct devlink_port *devlink_port;
+
+ devlink_port = devlink_port_get_by_index(devlink, port_index);
+ if (!devlink_port)
+ return ERR_PTR(-ENODEV);
+ return devlink_port;
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+struct devlink_port *devlink_port_get_from_info(struct devlink *devlink,
+ struct genl_info *info)
+{
+ return devlink_port_get_from_attrs(devlink, info->attrs);
+}
+
+static void devlink_port_fn_cap_fill(struct nla_bitfield32 *caps,
+ u32 cap, bool is_enable)
+{
+ caps->selector |= cap;
+ if (is_enable)
+ caps->value |= cap;
+}
+
+static int devlink_port_fn_roce_fill(struct devlink_port *devlink_port,
+ struct nla_bitfield32 *caps,
+ struct netlink_ext_ack *extack)
+{
+ bool is_enable;
+ int err;
+
+ if (!devlink_port->ops->port_fn_roce_get)
+ return 0;
+
+ err = devlink_port->ops->port_fn_roce_get(devlink_port, &is_enable,
+ extack);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+
+ devlink_port_fn_cap_fill(caps, DEVLINK_PORT_FN_CAP_ROCE, is_enable);
+ return 0;
+}
+
+static int devlink_port_fn_migratable_fill(struct devlink_port *devlink_port,
+ struct nla_bitfield32 *caps,
+ struct netlink_ext_ack *extack)
+{
+ bool is_enable;
+ int err;
+
+ if (!devlink_port->ops->port_fn_migratable_get ||
+ devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PCI_VF)
+ return 0;
+
+ err = devlink_port->ops->port_fn_migratable_get(devlink_port,
+ &is_enable, extack);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+
+ devlink_port_fn_cap_fill(caps, DEVLINK_PORT_FN_CAP_MIGRATABLE, is_enable);
+ return 0;
+}
+
+static int devlink_port_fn_ipsec_crypto_fill(struct devlink_port *devlink_port,
+ struct nla_bitfield32 *caps,
+ struct netlink_ext_ack *extack)
+{
+ bool is_enable;
+ int err;
+
+ if (!devlink_port->ops->port_fn_ipsec_crypto_get ||
+ devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PCI_VF)
+ return 0;
+
+ err = devlink_port->ops->port_fn_ipsec_crypto_get(devlink_port, &is_enable, extack);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+
+ devlink_port_fn_cap_fill(caps, DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO, is_enable);
+ return 0;
+}
+
+static int devlink_port_fn_ipsec_packet_fill(struct devlink_port *devlink_port,
+ struct nla_bitfield32 *caps,
+ struct netlink_ext_ack *extack)
+{
+ bool is_enable;
+ int err;
+
+ if (!devlink_port->ops->port_fn_ipsec_packet_get ||
+ devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PCI_VF)
+ return 0;
+
+ err = devlink_port->ops->port_fn_ipsec_packet_get(devlink_port, &is_enable, extack);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+
+ devlink_port_fn_cap_fill(caps, DEVLINK_PORT_FN_CAP_IPSEC_PACKET, is_enable);
+ return 0;
+}
+
+static int devlink_port_fn_caps_fill(struct devlink_port *devlink_port,
+ struct sk_buff *msg,
+ struct netlink_ext_ack *extack,
+ bool *msg_updated)
+{
+ struct nla_bitfield32 caps = {};
+ int err;
+
+ err = devlink_port_fn_roce_fill(devlink_port, &caps, extack);
+ if (err)
+ return err;
+
+ err = devlink_port_fn_migratable_fill(devlink_port, &caps, extack);
+ if (err)
+ return err;
+
+ err = devlink_port_fn_ipsec_crypto_fill(devlink_port, &caps, extack);
+ if (err)
+ return err;
+
+ err = devlink_port_fn_ipsec_packet_fill(devlink_port, &caps, extack);
+ if (err)
+ return err;
+
+ if (!caps.selector)
+ return 0;
+ err = nla_put_bitfield32(msg, DEVLINK_PORT_FN_ATTR_CAPS, caps.value,
+ caps.selector);
+ if (err)
+ return err;
+
+ *msg_updated = true;
+ return 0;
+}
+
+int devlink_nl_port_handle_fill(struct sk_buff *msg, struct devlink_port *devlink_port)
+{
+ if (devlink_nl_put_handle(msg, devlink_port->devlink))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+ return -EMSGSIZE;
+ return 0;
+}
+
+size_t devlink_nl_port_handle_size(struct devlink_port *devlink_port)
+{
+ struct devlink *devlink = devlink_port->devlink;
+
+ return nla_total_size(strlen(devlink->dev->bus->name) + 1) /* DEVLINK_ATTR_BUS_NAME */
+ + nla_total_size(strlen(dev_name(devlink->dev)) + 1) /* DEVLINK_ATTR_DEV_NAME */
+ + nla_total_size(4); /* DEVLINK_ATTR_PORT_INDEX */
+}
+
+static int devlink_nl_port_attrs_put(struct sk_buff *msg,
+ struct devlink_port *devlink_port)
+{
+ struct devlink_port_attrs *attrs = &devlink_port->attrs;
+
+ if (!devlink_port->attrs_set)
+ return 0;
+ if (attrs->lanes) {
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_LANES, attrs->lanes))
+ return -EMSGSIZE;
+ }
+ if (nla_put_u8(msg, DEVLINK_ATTR_PORT_SPLITTABLE, attrs->splittable))
+ return -EMSGSIZE;
+ if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour))
+ return -EMSGSIZE;
+ switch (devlink_port->attrs.flavour) {
+ case DEVLINK_PORT_FLAVOUR_PCI_PF:
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
+ attrs->pci_pf.controller) ||
+ nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_pf.pf))
+ return -EMSGSIZE;
+ if (nla_put_u8(msg, DEVLINK_ATTR_PORT_EXTERNAL, attrs->pci_pf.external))
+ return -EMSGSIZE;
+ break;
+ case DEVLINK_PORT_FLAVOUR_PCI_VF:
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
+ attrs->pci_vf.controller) ||
+ nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_vf.pf) ||
+ nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_VF_NUMBER, attrs->pci_vf.vf))
+ return -EMSGSIZE;
+ if (nla_put_u8(msg, DEVLINK_ATTR_PORT_EXTERNAL, attrs->pci_vf.external))
+ return -EMSGSIZE;
+ break;
+ case DEVLINK_PORT_FLAVOUR_PCI_SF:
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
+ attrs->pci_sf.controller) ||
+ nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER,
+ attrs->pci_sf.pf) ||
+ nla_put_u32(msg, DEVLINK_ATTR_PORT_PCI_SF_NUMBER,
+ attrs->pci_sf.sf))
+ return -EMSGSIZE;
+ break;
+ case DEVLINK_PORT_FLAVOUR_PHYSICAL:
+ case DEVLINK_PORT_FLAVOUR_CPU:
+ case DEVLINK_PORT_FLAVOUR_DSA:
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
+ attrs->phys.port_number))
+ return -EMSGSIZE;
+ if (!attrs->split)
+ return 0;
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_GROUP,
+ attrs->phys.port_number))
+ return -EMSGSIZE;
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER,
+ attrs->phys.split_subport_number))
+ return -EMSGSIZE;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int devlink_port_fn_hw_addr_fill(struct devlink_port *port,
+ struct sk_buff *msg,
+ struct netlink_ext_ack *extack,
+ bool *msg_updated)
+{
+ u8 hw_addr[MAX_ADDR_LEN];
+ int hw_addr_len;
+ int err;
+
+ if (!port->ops->port_fn_hw_addr_get)
+ return 0;
+
+ err = port->ops->port_fn_hw_addr_get(port, hw_addr, &hw_addr_len,
+ extack);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+ err = nla_put(msg, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, hw_addr_len, hw_addr);
+ if (err)
+ return err;
+ *msg_updated = true;
+ return 0;
+}
+
+static bool
+devlink_port_fn_state_valid(enum devlink_port_fn_state state)
+{
+ return state == DEVLINK_PORT_FN_STATE_INACTIVE ||
+ state == DEVLINK_PORT_FN_STATE_ACTIVE;
+}
+
+static bool
+devlink_port_fn_opstate_valid(enum devlink_port_fn_opstate opstate)
+{
+ return opstate == DEVLINK_PORT_FN_OPSTATE_DETACHED ||
+ opstate == DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+}
+
+static int devlink_port_fn_state_fill(struct devlink_port *port,
+ struct sk_buff *msg,
+ struct netlink_ext_ack *extack,
+ bool *msg_updated)
+{
+ enum devlink_port_fn_opstate opstate;
+ enum devlink_port_fn_state state;
+ int err;
+
+ if (!port->ops->port_fn_state_get)
+ return 0;
+
+ err = port->ops->port_fn_state_get(port, &state, &opstate, extack);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+ if (!devlink_port_fn_state_valid(state)) {
+ WARN_ON_ONCE(1);
+ NL_SET_ERR_MSG(extack, "Invalid state read from driver");
+ return -EINVAL;
+ }
+ if (!devlink_port_fn_opstate_valid(opstate)) {
+ WARN_ON_ONCE(1);
+ NL_SET_ERR_MSG(extack, "Invalid operational state read from driver");
+ return -EINVAL;
+ }
+ if (nla_put_u8(msg, DEVLINK_PORT_FN_ATTR_STATE, state) ||
+ nla_put_u8(msg, DEVLINK_PORT_FN_ATTR_OPSTATE, opstate))
+ return -EMSGSIZE;
+ *msg_updated = true;
+ return 0;
+}
+
+static int
+devlink_port_fn_mig_set(struct devlink_port *devlink_port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_port->ops->port_fn_migratable_set(devlink_port, enable,
+ extack);
+}
+
+static int
+devlink_port_fn_roce_set(struct devlink_port *devlink_port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_port->ops->port_fn_roce_set(devlink_port, enable,
+ extack);
+}
+
+static int
+devlink_port_fn_ipsec_crypto_set(struct devlink_port *devlink_port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_port->ops->port_fn_ipsec_crypto_set(devlink_port, enable, extack);
+}
+
+static int
+devlink_port_fn_ipsec_packet_set(struct devlink_port *devlink_port, bool enable,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_port->ops->port_fn_ipsec_packet_set(devlink_port, enable, extack);
+}
+
+static int devlink_port_fn_caps_set(struct devlink_port *devlink_port,
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct nla_bitfield32 caps;
+ u32 caps_value;
+ int err;
+
+ caps = nla_get_bitfield32(attr);
+ caps_value = caps.value & caps.selector;
+ if (caps.selector & DEVLINK_PORT_FN_CAP_ROCE) {
+ err = devlink_port_fn_roce_set(devlink_port,
+ caps_value & DEVLINK_PORT_FN_CAP_ROCE,
+ extack);
+ if (err)
+ return err;
+ }
+ if (caps.selector & DEVLINK_PORT_FN_CAP_MIGRATABLE) {
+ err = devlink_port_fn_mig_set(devlink_port, caps_value &
+ DEVLINK_PORT_FN_CAP_MIGRATABLE,
+ extack);
+ if (err)
+ return err;
+ }
+ if (caps.selector & DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO) {
+ err = devlink_port_fn_ipsec_crypto_set(devlink_port, caps_value &
+ DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO,
+ extack);
+ if (err)
+ return err;
+ }
+ if (caps.selector & DEVLINK_PORT_FN_CAP_IPSEC_PACKET) {
+ err = devlink_port_fn_ipsec_packet_set(devlink_port, caps_value &
+ DEVLINK_PORT_FN_CAP_IPSEC_PACKET,
+ extack);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int
+devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *function_attr;
+ bool msg_updated = false;
+ int err;
+
+ function_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PORT_FUNCTION);
+ if (!function_attr)
+ return -EMSGSIZE;
+
+ err = devlink_port_fn_hw_addr_fill(port, msg, extack, &msg_updated);
+ if (err)
+ goto out;
+ err = devlink_port_fn_caps_fill(port, msg, extack, &msg_updated);
+ if (err)
+ goto out;
+ err = devlink_port_fn_state_fill(port, msg, extack, &msg_updated);
+out:
+ if (err || !msg_updated)
+ nla_nest_cancel(msg, function_attr);
+ else
+ nla_nest_end(msg, function_attr);
+ return err;
+}
+
+static int devlink_nl_port_fill(struct sk_buff *msg,
+ struct devlink_port *devlink_port,
+ enum devlink_command cmd, u32 portid, u32 seq,
+ int flags, struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = devlink_port->devlink;
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+ goto nla_put_failure;
+
+ spin_lock_bh(&devlink_port->type_lock);
+ if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type))
+ goto nla_put_failure_type_locked;
+ if (devlink_port->desired_type != DEVLINK_PORT_TYPE_NOTSET &&
+ nla_put_u16(msg, DEVLINK_ATTR_PORT_DESIRED_TYPE,
+ devlink_port->desired_type))
+ goto nla_put_failure_type_locked;
+ if (devlink_port->type == DEVLINK_PORT_TYPE_ETH) {
+ if (devlink_port->type_eth.netdev &&
+ (nla_put_u32(msg, DEVLINK_ATTR_PORT_NETDEV_IFINDEX,
+ devlink_port->type_eth.ifindex) ||
+ nla_put_string(msg, DEVLINK_ATTR_PORT_NETDEV_NAME,
+ devlink_port->type_eth.ifname)))
+ goto nla_put_failure_type_locked;
+ }
+ if (devlink_port->type == DEVLINK_PORT_TYPE_IB) {
+ struct ib_device *ibdev = devlink_port->type_ib.ibdev;
+
+ if (ibdev &&
+ nla_put_string(msg, DEVLINK_ATTR_PORT_IBDEV_NAME,
+ ibdev->name))
+ goto nla_put_failure_type_locked;
+ }
+ spin_unlock_bh(&devlink_port->type_lock);
+ if (devlink_nl_port_attrs_put(msg, devlink_port))
+ goto nla_put_failure;
+ if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack))
+ goto nla_put_failure;
+ if (devlink_port->linecard &&
+ nla_put_u32(msg, DEVLINK_ATTR_LINECARD_INDEX,
+ devlink_port->linecard->index))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure_type_locked:
+ spin_unlock_bh(&devlink_port->type_lock);
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static void devlink_port_notify(struct devlink_port *devlink_port,
+ enum devlink_command cmd)
+{
+ struct devlink *devlink = devlink_port->devlink;
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON(cmd != DEVLINK_CMD_PORT_NEW && cmd != DEVLINK_CMD_PORT_DEL);
+
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_port_fill(msg, devlink_port, cmd, 0, 0, 0, NULL);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
+ 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+static void devlink_ports_notify(struct devlink *devlink,
+ enum devlink_command cmd)
+{
+ struct devlink_port *devlink_port;
+ unsigned long port_index;
+
+ xa_for_each(&devlink->ports, port_index, devlink_port)
+ devlink_port_notify(devlink_port, cmd);
+}
+
+void devlink_ports_notify_register(struct devlink *devlink)
+{
+ devlink_ports_notify(devlink, DEVLINK_CMD_PORT_NEW);
+}
+
+void devlink_ports_notify_unregister(struct devlink *devlink)
+{
+ devlink_ports_notify(devlink, DEVLINK_CMD_PORT_DEL);
+}
+
+int devlink_nl_port_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[1];
+ struct sk_buff *msg;
+ int err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_PORT_NEW,
+ info->snd_portid, info->snd_seq, 0,
+ info->extack);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int
+devlink_nl_port_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb, int flags)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_port *devlink_port;
+ unsigned long port_index;
+ int err = 0;
+
+ xa_for_each_start(&devlink->ports, port_index, devlink_port, state->idx) {
+ err = devlink_nl_port_fill(msg, devlink_port,
+ DEVLINK_CMD_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags,
+ cb->extack);
+ if (err) {
+ state->idx = port_index;
+ break;
+ }
+ }
+
+ return err;
+}
+
+int devlink_nl_port_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_port_get_dump_one);
+}
+
+static int devlink_port_type_set(struct devlink_port *devlink_port,
+ enum devlink_port_type port_type)
+
+{
+ int err;
+
+ if (!devlink_port->ops->port_type_set)
+ return -EOPNOTSUPP;
+
+ if (port_type == devlink_port->type)
+ return 0;
+
+ err = devlink_port->ops->port_type_set(devlink_port, port_type);
+ if (err)
+ return err;
+
+ devlink_port->desired_type = port_type;
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+ return 0;
+}
+
+static int devlink_port_function_hw_addr_set(struct devlink_port *port,
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ const u8 *hw_addr;
+ int hw_addr_len;
+
+ hw_addr = nla_data(attr);
+ hw_addr_len = nla_len(attr);
+ if (hw_addr_len > MAX_ADDR_LEN) {
+ NL_SET_ERR_MSG(extack, "Port function hardware address too long");
+ return -EINVAL;
+ }
+ if (port->type == DEVLINK_PORT_TYPE_ETH) {
+ if (hw_addr_len != ETH_ALEN) {
+ NL_SET_ERR_MSG(extack, "Address must be 6 bytes for Ethernet device");
+ return -EINVAL;
+ }
+ if (!is_unicast_ether_addr(hw_addr)) {
+ NL_SET_ERR_MSG(extack, "Non-unicast hardware address unsupported");
+ return -EINVAL;
+ }
+ }
+
+ return port->ops->port_fn_hw_addr_set(port, hw_addr, hw_addr_len,
+ extack);
+}
+
+static int devlink_port_fn_state_set(struct devlink_port *port,
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ enum devlink_port_fn_state state;
+
+ state = nla_get_u8(attr);
+ return port->ops->port_fn_state_set(port, state, extack);
+}
+
+static int devlink_port_function_validate(struct devlink_port *devlink_port,
+ struct nlattr **tb,
+ struct netlink_ext_ack *extack)
+{
+ const struct devlink_port_ops *ops = devlink_port->ops;
+ struct nlattr *attr;
+
+ if (tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] &&
+ !ops->port_fn_hw_addr_set) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR],
+ "Port doesn't support function attributes");
+ return -EOPNOTSUPP;
+ }
+ if (tb[DEVLINK_PORT_FN_ATTR_STATE] && !ops->port_fn_state_set) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR],
+ "Function does not support state setting");
+ return -EOPNOTSUPP;
+ }
+ attr = tb[DEVLINK_PORT_FN_ATTR_CAPS];
+ if (attr) {
+ struct nla_bitfield32 caps;
+
+ caps = nla_get_bitfield32(attr);
+ if (caps.selector & DEVLINK_PORT_FN_CAP_ROCE &&
+ !ops->port_fn_roce_set) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "Port doesn't support RoCE function attribute");
+ return -EOPNOTSUPP;
+ }
+ if (caps.selector & DEVLINK_PORT_FN_CAP_MIGRATABLE) {
+ if (!ops->port_fn_migratable_set) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "Port doesn't support migratable function attribute");
+ return -EOPNOTSUPP;
+ }
+ if (devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PCI_VF) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "migratable function attribute supported for VFs only");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (caps.selector & DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO) {
+ if (!ops->port_fn_ipsec_crypto_set) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "Port doesn't support ipsec_crypto function attribute");
+ return -EOPNOTSUPP;
+ }
+ if (devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PCI_VF) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "ipsec_crypto function attribute supported for VFs only");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (caps.selector & DEVLINK_PORT_FN_CAP_IPSEC_PACKET) {
+ if (!ops->port_fn_ipsec_packet_set) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "Port doesn't support ipsec_packet function attribute");
+ return -EOPNOTSUPP;
+ }
+ if (devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PCI_VF) {
+ NL_SET_ERR_MSG_ATTR(extack, attr,
+ "ipsec_packet function attribute supported for VFs only");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+ return 0;
+}
+
+static int devlink_port_function_set(struct devlink_port *port,
+ const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, DEVLINK_PORT_FUNCTION_ATTR_MAX, attr,
+ devlink_function_nl_policy, extack);
+ if (err < 0) {
+ NL_SET_ERR_MSG(extack, "Fail to parse port function attributes");
+ return err;
+ }
+
+ err = devlink_port_function_validate(port, tb, extack);
+ if (err)
+ return err;
+
+ attr = tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR];
+ if (attr) {
+ err = devlink_port_function_hw_addr_set(port, attr, extack);
+ if (err)
+ return err;
+ }
+
+ attr = tb[DEVLINK_PORT_FN_ATTR_CAPS];
+ if (attr) {
+ err = devlink_port_fn_caps_set(port, attr, extack);
+ if (err)
+ return err;
+ }
+
+ /* Keep this as the last function attribute set, so that when
+ * multiple port function attributes are set along with state,
+ * Those can be applied first before activating the state.
+ */
+ attr = tb[DEVLINK_PORT_FN_ATTR_STATE];
+ if (attr)
+ err = devlink_port_fn_state_set(port, attr, extack);
+
+ if (!err)
+ devlink_port_notify(port, DEVLINK_CMD_PORT_NEW);
+ return err;
+}
+
+int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[1];
+ int err;
+
+ if (info->attrs[DEVLINK_ATTR_PORT_TYPE]) {
+ enum devlink_port_type port_type;
+
+ port_type = nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_TYPE]);
+ err = devlink_port_type_set(devlink_port, port_type);
+ if (err)
+ return err;
+ }
+
+ if (info->attrs[DEVLINK_ATTR_PORT_FUNCTION]) {
+ struct nlattr *attr = info->attrs[DEVLINK_ATTR_PORT_FUNCTION];
+ struct netlink_ext_ack *extack = info->extack;
+
+ err = devlink_port_function_set(devlink_port, attr, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int devlink_nl_cmd_port_split_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[1];
+ struct devlink *devlink = info->user_ptr[0];
+ u32 count;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_SPLIT_COUNT))
+ return -EINVAL;
+ if (!devlink_port->ops->port_split)
+ return -EOPNOTSUPP;
+
+ count = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]);
+
+ if (!devlink_port->attrs.splittable) {
+ /* Split ports cannot be split. */
+ if (devlink_port->attrs.split)
+ NL_SET_ERR_MSG(info->extack, "Port cannot be split further");
+ else
+ NL_SET_ERR_MSG(info->extack, "Port cannot be split");
+ return -EINVAL;
+ }
+
+ if (count < 2 || !is_power_of_2(count) || count > devlink_port->attrs.lanes) {
+ NL_SET_ERR_MSG(info->extack, "Invalid split count");
+ return -EINVAL;
+ }
+
+ return devlink_port->ops->port_split(devlink, devlink_port, count,
+ info->extack);
+}
+
+int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[1];
+ struct devlink *devlink = info->user_ptr[0];
+
+ if (!devlink_port->ops->port_unsplit)
+ return -EOPNOTSUPP;
+ return devlink_port->ops->port_unsplit(devlink, devlink_port, info->extack);
+}
+
+int devlink_nl_cmd_port_new_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct devlink_port_new_attrs new_attrs = {};
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_port *devlink_port;
+ struct sk_buff *msg;
+ int err;
+
+ if (!devlink->ops->port_new)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[DEVLINK_ATTR_PORT_FLAVOUR] ||
+ !info->attrs[DEVLINK_ATTR_PORT_PCI_PF_NUMBER]) {
+ NL_SET_ERR_MSG(extack, "Port flavour or PCI PF are not specified");
+ return -EINVAL;
+ }
+ new_attrs.flavour = nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_FLAVOUR]);
+ new_attrs.pfnum =
+ nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_PCI_PF_NUMBER]);
+
+ if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ /* Port index of the new port being created by driver. */
+ new_attrs.port_index =
+ nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+ new_attrs.port_index_valid = true;
+ }
+ if (info->attrs[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER]) {
+ new_attrs.controller =
+ nla_get_u16(info->attrs[DEVLINK_ATTR_PORT_CONTROLLER_NUMBER]);
+ new_attrs.controller_valid = true;
+ }
+ if (new_attrs.flavour == DEVLINK_PORT_FLAVOUR_PCI_SF &&
+ info->attrs[DEVLINK_ATTR_PORT_PCI_SF_NUMBER]) {
+ new_attrs.sfnum = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_PCI_SF_NUMBER]);
+ new_attrs.sfnum_valid = true;
+ }
+
+ err = devlink->ops->port_new(devlink, &new_attrs,
+ extack, &devlink_port);
+ if (err)
+ return err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto err_out_port_del;
+ }
+ err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_NEW,
+ info->snd_portid, info->snd_seq, 0, NULL);
+ if (WARN_ON_ONCE(err))
+ goto err_out_msg_free;
+ err = genlmsg_reply(msg, info);
+ if (err)
+ goto err_out_port_del;
+ return 0;
+
+err_out_msg_free:
+ nlmsg_free(msg);
+err_out_port_del:
+ devlink_port->ops->port_del(devlink, devlink_port, NULL);
+ return err;
+}
+
+int devlink_nl_cmd_port_del_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[1];
+ struct netlink_ext_ack *extack = info->extack;
+ struct devlink *devlink = info->user_ptr[0];
+
+ if (!devlink_port->ops->port_del)
+ return -EOPNOTSUPP;
+
+ return devlink_port->ops->port_del(devlink, devlink_port, extack);
+}
+
+static void devlink_port_type_warn(struct work_struct *work)
+{
+ struct devlink_port *port = container_of(to_delayed_work(work),
+ struct devlink_port,
+ type_warn_dw);
+ dev_warn(port->devlink->dev, "Type was not set for devlink port.");
+}
+
+static bool devlink_port_type_should_warn(struct devlink_port *devlink_port)
+{
+ /* Ignore CPU and DSA flavours. */
+ return devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_CPU &&
+ devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA &&
+ devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_UNUSED;
+}
+
+#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 3600)
+
+static void devlink_port_type_warn_schedule(struct devlink_port *devlink_port)
+{
+ if (!devlink_port_type_should_warn(devlink_port))
+ return;
+ /* Schedule a work to WARN in case driver does not set port
+ * type within timeout.
+ */
+ schedule_delayed_work(&devlink_port->type_warn_dw,
+ DEVLINK_PORT_TYPE_WARN_TIMEOUT);
+}
+
+static void devlink_port_type_warn_cancel(struct devlink_port *devlink_port)
+{
+ if (!devlink_port_type_should_warn(devlink_port))
+ return;
+ cancel_delayed_work_sync(&devlink_port->type_warn_dw);
+}
+
+/**
+ * devlink_port_init() - Init devlink port
+ *
+ * @devlink: devlink
+ * @devlink_port: devlink port
+ *
+ * Initialize essential stuff that is needed for functions
+ * that may be called before devlink port registration.
+ * Call to this function is optional and not needed
+ * in case the driver does not use such functions.
+ */
+void devlink_port_init(struct devlink *devlink,
+ struct devlink_port *devlink_port)
+{
+ if (devlink_port->initialized)
+ return;
+ devlink_port->devlink = devlink;
+ INIT_LIST_HEAD(&devlink_port->region_list);
+ devlink_port->initialized = true;
+}
+EXPORT_SYMBOL_GPL(devlink_port_init);
+
+/**
+ * devlink_port_fini() - Deinitialize devlink port
+ *
+ * @devlink_port: devlink port
+ *
+ * Deinitialize essential stuff that is in use for functions
+ * that may be called after devlink port unregistration.
+ * Call to this function is optional and not needed
+ * in case the driver does not use such functions.
+ */
+void devlink_port_fini(struct devlink_port *devlink_port)
+{
+ WARN_ON(!list_empty(&devlink_port->region_list));
+}
+EXPORT_SYMBOL_GPL(devlink_port_fini);
+
+static const struct devlink_port_ops devlink_port_dummy_ops = {};
+
+/**
+ * devl_port_register_with_ops() - Register devlink port
+ *
+ * @devlink: devlink
+ * @devlink_port: devlink port
+ * @port_index: driver-specific numerical identifier of the port
+ * @ops: port ops
+ *
+ * Register devlink port with provided port index. User can use
+ * any indexing, even hw-related one. devlink_port structure
+ * is convenient to be embedded inside user driver private structure.
+ * Note that the caller should take care of zeroing the devlink_port
+ * structure.
+ */
+int devl_port_register_with_ops(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index,
+ const struct devlink_port_ops *ops)
+{
+ int err;
+
+ devl_assert_locked(devlink);
+
+ ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+
+ devlink_port_init(devlink, devlink_port);
+ devlink_port->registered = true;
+ devlink_port->index = port_index;
+ devlink_port->ops = ops ? ops : &devlink_port_dummy_ops;
+ spin_lock_init(&devlink_port->type_lock);
+ INIT_LIST_HEAD(&devlink_port->reporter_list);
+ err = xa_insert(&devlink->ports, port_index, devlink_port, GFP_KERNEL);
+ if (err) {
+ devlink_port->registered = false;
+ return err;
+ }
+
+ INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
+ devlink_port_type_warn_schedule(devlink_port);
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devl_port_register_with_ops);
+
+/**
+ * devlink_port_register_with_ops - Register devlink port
+ *
+ * @devlink: devlink
+ * @devlink_port: devlink port
+ * @port_index: driver-specific numerical identifier of the port
+ * @ops: port ops
+ *
+ * Register devlink port with provided port index. User can use
+ * any indexing, even hw-related one. devlink_port structure
+ * is convenient to be embedded inside user driver private structure.
+ * Note that the caller should take care of zeroing the devlink_port
+ * structure.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+int devlink_port_register_with_ops(struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ unsigned int port_index,
+ const struct devlink_port_ops *ops)
+{
+ int err;
+
+ devl_lock(devlink);
+ err = devl_port_register_with_ops(devlink, devlink_port,
+ port_index, ops);
+ devl_unlock(devlink);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_port_register_with_ops);
+
+/**
+ * devl_port_unregister() - Unregister devlink port
+ *
+ * @devlink_port: devlink port
+ */
+void devl_port_unregister(struct devlink_port *devlink_port)
+{
+ lockdep_assert_held(&devlink_port->devlink->lock);
+ WARN_ON(devlink_port->type != DEVLINK_PORT_TYPE_NOTSET);
+
+ devlink_port_type_warn_cancel(devlink_port);
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
+ xa_erase(&devlink_port->devlink->ports, devlink_port->index);
+ WARN_ON(!list_empty(&devlink_port->reporter_list));
+ devlink_port->registered = false;
+}
+EXPORT_SYMBOL_GPL(devl_port_unregister);
+
+/**
+ * devlink_port_unregister - Unregister devlink port
+ *
+ * @devlink_port: devlink port
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_port_unregister(struct devlink_port *devlink_port)
+{
+ struct devlink *devlink = devlink_port->devlink;
+
+ devl_lock(devlink);
+ devl_port_unregister(devlink_port);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_port_unregister);
+
+static void devlink_port_type_netdev_checks(struct devlink_port *devlink_port,
+ struct net_device *netdev)
+{
+ const struct net_device_ops *ops = netdev->netdev_ops;
+
+ /* If driver registers devlink port, it should set devlink port
+ * attributes accordingly so the compat functions are called
+ * and the original ops are not used.
+ */
+ if (ops->ndo_get_phys_port_name) {
+ /* Some drivers use the same set of ndos for netdevs
+ * that have devlink_port registered and also for
+ * those who don't. Make sure that ndo_get_phys_port_name
+ * returns -EOPNOTSUPP here in case it is defined.
+ * Warn if not.
+ */
+ char name[IFNAMSIZ];
+ int err;
+
+ err = ops->ndo_get_phys_port_name(netdev, name, sizeof(name));
+ WARN_ON(err != -EOPNOTSUPP);
+ }
+ if (ops->ndo_get_port_parent_id) {
+ /* Some drivers use the same set of ndos for netdevs
+ * that have devlink_port registered and also for
+ * those who don't. Make sure that ndo_get_port_parent_id
+ * returns -EOPNOTSUPP here in case it is defined.
+ * Warn if not.
+ */
+ struct netdev_phys_item_id ppid;
+ int err;
+
+ err = ops->ndo_get_port_parent_id(netdev, &ppid);
+ WARN_ON(err != -EOPNOTSUPP);
+ }
+}
+
+static void __devlink_port_type_set(struct devlink_port *devlink_port,
+ enum devlink_port_type type,
+ void *type_dev)
+{
+ struct net_device *netdev = type_dev;
+
+ ASSERT_DEVLINK_PORT_REGISTERED(devlink_port);
+
+ if (type == DEVLINK_PORT_TYPE_NOTSET) {
+ devlink_port_type_warn_schedule(devlink_port);
+ } else {
+ devlink_port_type_warn_cancel(devlink_port);
+ if (type == DEVLINK_PORT_TYPE_ETH && netdev)
+ devlink_port_type_netdev_checks(devlink_port, netdev);
+ }
+
+ spin_lock_bh(&devlink_port->type_lock);
+ devlink_port->type = type;
+ switch (type) {
+ case DEVLINK_PORT_TYPE_ETH:
+ devlink_port->type_eth.netdev = netdev;
+ if (netdev) {
+ ASSERT_RTNL();
+ devlink_port->type_eth.ifindex = netdev->ifindex;
+ BUILD_BUG_ON(sizeof(devlink_port->type_eth.ifname) !=
+ sizeof(netdev->name));
+ strcpy(devlink_port->type_eth.ifname, netdev->name);
+ }
+ break;
+ case DEVLINK_PORT_TYPE_IB:
+ devlink_port->type_ib.ibdev = type_dev;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_bh(&devlink_port->type_lock);
+ devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
+}
+
+/**
+ * devlink_port_type_eth_set - Set port type to Ethernet
+ *
+ * @devlink_port: devlink port
+ *
+ * If driver is calling this, most likely it is doing something wrong.
+ */
+void devlink_port_type_eth_set(struct devlink_port *devlink_port)
+{
+ dev_warn(devlink_port->devlink->dev,
+ "devlink port type for port %d set to Ethernet without a software interface reference, device type not supported by the kernel?\n",
+ devlink_port->index);
+ __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH, NULL);
+}
+EXPORT_SYMBOL_GPL(devlink_port_type_eth_set);
+
+/**
+ * devlink_port_type_ib_set - Set port type to InfiniBand
+ *
+ * @devlink_port: devlink port
+ * @ibdev: related IB device
+ */
+void devlink_port_type_ib_set(struct devlink_port *devlink_port,
+ struct ib_device *ibdev)
+{
+ __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_IB, ibdev);
+}
+EXPORT_SYMBOL_GPL(devlink_port_type_ib_set);
+
+/**
+ * devlink_port_type_clear - Clear port type
+ *
+ * @devlink_port: devlink port
+ *
+ * If driver is calling this for clearing Ethernet type, most likely
+ * it is doing something wrong.
+ */
+void devlink_port_type_clear(struct devlink_port *devlink_port)
+{
+ if (devlink_port->type == DEVLINK_PORT_TYPE_ETH)
+ dev_warn(devlink_port->devlink->dev,
+ "devlink port type for port %d cleared without a software interface reference, device type not supported by the kernel?\n",
+ devlink_port->index);
+ __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_NOTSET, NULL);
+}
+EXPORT_SYMBOL_GPL(devlink_port_type_clear);
+
+int devlink_port_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct devlink_port *devlink_port = netdev->devlink_port;
+ struct devlink *devlink;
+
+ if (!devlink_port)
+ return NOTIFY_OK;
+ devlink = devlink_port->devlink;
+
+ switch (event) {
+ case NETDEV_POST_INIT:
+ /* Set the type but not netdev pointer. It is going to be set
+ * later on by NETDEV_REGISTER event. Happens once during
+ * netdevice register
+ */
+ __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH,
+ NULL);
+ break;
+ case NETDEV_REGISTER:
+ case NETDEV_CHANGENAME:
+ if (devlink_net(devlink) != dev_net(netdev))
+ return NOTIFY_OK;
+ /* Set the netdev on top of previously set type. Note this
+ * event happens also during net namespace change so here
+ * we take into account netdev pointer appearing in this
+ * namespace.
+ */
+ __devlink_port_type_set(devlink_port, devlink_port->type,
+ netdev);
+ break;
+ case NETDEV_UNREGISTER:
+ if (devlink_net(devlink) != dev_net(netdev))
+ return NOTIFY_OK;
+ /* Clear netdev pointer, but not the type. This event happens
+ * also during net namespace change so we need to clear
+ * pointer to netdev that is going to another net namespace.
+ */
+ __devlink_port_type_set(devlink_port, devlink_port->type,
+ NULL);
+ break;
+ case NETDEV_PRE_UNINIT:
+ /* Clear the type and the netdev pointer. Happens one during
+ * netdevice unregister.
+ */
+ __devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_NOTSET,
+ NULL);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int __devlink_port_attrs_set(struct devlink_port *devlink_port,
+ enum devlink_port_flavour flavour)
+{
+ struct devlink_port_attrs *attrs = &devlink_port->attrs;
+
+ devlink_port->attrs_set = true;
+ attrs->flavour = flavour;
+ if (attrs->switch_id.id_len) {
+ devlink_port->switch_port = true;
+ if (WARN_ON(attrs->switch_id.id_len > MAX_PHYS_ITEM_ID_LEN))
+ attrs->switch_id.id_len = MAX_PHYS_ITEM_ID_LEN;
+ } else {
+ devlink_port->switch_port = false;
+ }
+ return 0;
+}
+
+/**
+ * devlink_port_attrs_set - Set port attributes
+ *
+ * @devlink_port: devlink port
+ * @attrs: devlink port attrs
+ */
+void devlink_port_attrs_set(struct devlink_port *devlink_port,
+ struct devlink_port_attrs *attrs)
+{
+ int ret;
+
+ ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+
+ devlink_port->attrs = *attrs;
+ ret = __devlink_port_attrs_set(devlink_port, attrs->flavour);
+ if (ret)
+ return;
+ WARN_ON(attrs->splittable && attrs->split);
+}
+EXPORT_SYMBOL_GPL(devlink_port_attrs_set);
+
+/**
+ * devlink_port_attrs_pci_pf_set - Set PCI PF port attributes
+ *
+ * @devlink_port: devlink port
+ * @controller: associated controller number for the devlink port instance
+ * @pf: associated PF for the devlink port instance
+ * @external: indicates if the port is for an external controller
+ */
+void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, bool external)
+{
+ struct devlink_port_attrs *attrs = &devlink_port->attrs;
+ int ret;
+
+ ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+
+ ret = __devlink_port_attrs_set(devlink_port,
+ DEVLINK_PORT_FLAVOUR_PCI_PF);
+ if (ret)
+ return;
+ attrs->pci_pf.controller = controller;
+ attrs->pci_pf.pf = pf;
+ attrs->pci_pf.external = external;
+}
+EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set);
+
+/**
+ * devlink_port_attrs_pci_vf_set - Set PCI VF port attributes
+ *
+ * @devlink_port: devlink port
+ * @controller: associated controller number for the devlink port instance
+ * @pf: associated PF for the devlink port instance
+ * @vf: associated VF of a PF for the devlink port instance
+ * @external: indicates if the port is for an external controller
+ */
+void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, u16 vf, bool external)
+{
+ struct devlink_port_attrs *attrs = &devlink_port->attrs;
+ int ret;
+
+ ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+
+ ret = __devlink_port_attrs_set(devlink_port,
+ DEVLINK_PORT_FLAVOUR_PCI_VF);
+ if (ret)
+ return;
+ attrs->pci_vf.controller = controller;
+ attrs->pci_vf.pf = pf;
+ attrs->pci_vf.vf = vf;
+ attrs->pci_vf.external = external;
+}
+EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set);
+
+/**
+ * devlink_port_attrs_pci_sf_set - Set PCI SF port attributes
+ *
+ * @devlink_port: devlink port
+ * @controller: associated controller number for the devlink port instance
+ * @pf: associated PF for the devlink port instance
+ * @sf: associated SF of a PF for the devlink port instance
+ * @external: indicates if the port is for an external controller
+ */
+void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, u32 sf, bool external)
+{
+ struct devlink_port_attrs *attrs = &devlink_port->attrs;
+ int ret;
+
+ ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+
+ ret = __devlink_port_attrs_set(devlink_port,
+ DEVLINK_PORT_FLAVOUR_PCI_SF);
+ if (ret)
+ return;
+ attrs->pci_sf.controller = controller;
+ attrs->pci_sf.pf = pf;
+ attrs->pci_sf.sf = sf;
+ attrs->pci_sf.external = external;
+}
+EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_sf_set);
+
+/**
+ * devlink_port_linecard_set - Link port with a linecard
+ *
+ * @devlink_port: devlink port
+ * @linecard: devlink linecard
+ */
+void devlink_port_linecard_set(struct devlink_port *devlink_port,
+ struct devlink_linecard *linecard)
+{
+ ASSERT_DEVLINK_PORT_NOT_REGISTERED(devlink_port);
+
+ devlink_port->linecard = linecard;
+}
+EXPORT_SYMBOL_GPL(devlink_port_linecard_set);
+
+static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
+ char *name, size_t len)
+{
+ struct devlink_port_attrs *attrs = &devlink_port->attrs;
+ int n = 0;
+
+ if (!devlink_port->attrs_set)
+ return -EOPNOTSUPP;
+
+ switch (attrs->flavour) {
+ case DEVLINK_PORT_FLAVOUR_PHYSICAL:
+ if (devlink_port->linecard)
+ n = snprintf(name, len, "l%u",
+ devlink_port->linecard->index);
+ if (n < len)
+ n += snprintf(name + n, len - n, "p%u",
+ attrs->phys.port_number);
+ if (n < len && attrs->split)
+ n += snprintf(name + n, len - n, "s%u",
+ attrs->phys.split_subport_number);
+ break;
+ case DEVLINK_PORT_FLAVOUR_CPU:
+ case DEVLINK_PORT_FLAVOUR_DSA:
+ case DEVLINK_PORT_FLAVOUR_UNUSED:
+ /* As CPU and DSA ports do not have a netdevice associated
+ * case should not ever happen.
+ */
+ WARN_ON(1);
+ return -EINVAL;
+ case DEVLINK_PORT_FLAVOUR_PCI_PF:
+ if (attrs->pci_pf.external) {
+ n = snprintf(name, len, "c%u", attrs->pci_pf.controller);
+ if (n >= len)
+ return -EINVAL;
+ len -= n;
+ name += n;
+ }
+ n = snprintf(name, len, "pf%u", attrs->pci_pf.pf);
+ break;
+ case DEVLINK_PORT_FLAVOUR_PCI_VF:
+ if (attrs->pci_vf.external) {
+ n = snprintf(name, len, "c%u", attrs->pci_vf.controller);
+ if (n >= len)
+ return -EINVAL;
+ len -= n;
+ name += n;
+ }
+ n = snprintf(name, len, "pf%uvf%u",
+ attrs->pci_vf.pf, attrs->pci_vf.vf);
+ break;
+ case DEVLINK_PORT_FLAVOUR_PCI_SF:
+ if (attrs->pci_sf.external) {
+ n = snprintf(name, len, "c%u", attrs->pci_sf.controller);
+ if (n >= len)
+ return -EINVAL;
+ len -= n;
+ name += n;
+ }
+ n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
+ attrs->pci_sf.sf);
+ break;
+ case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+ return -EOPNOTSUPP;
+ }
+
+ if (n >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+int devlink_compat_phys_port_name_get(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct devlink_port *devlink_port;
+
+ /* RTNL mutex is held here which ensures that devlink_port
+ * instance cannot disappear in the middle. No need to take
+ * any devlink lock as only permanent values are accessed.
+ */
+ ASSERT_RTNL();
+
+ devlink_port = dev->devlink_port;
+ if (!devlink_port)
+ return -EOPNOTSUPP;
+
+ return __devlink_port_phys_port_name_get(devlink_port, name, len);
+}
+
+int devlink_compat_switch_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct devlink_port *devlink_port;
+
+ /* Caller must hold RTNL mutex or reference to dev, which ensures that
+ * devlink_port instance cannot disappear in the middle. No need to take
+ * any devlink lock as only permanent values are accessed.
+ */
+ devlink_port = dev->devlink_port;
+ if (!devlink_port || !devlink_port->switch_port)
+ return -EOPNOTSUPP;
+
+ memcpy(ppid, &devlink_port->attrs.switch_id, sizeof(*ppid));
+
+ return 0;
+}
diff --git a/net/devlink/rate.c b/net/devlink/rate.c
new file mode 100644
index 000000000000..dff1593b8406
--- /dev/null
+++ b/net/devlink/rate.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include "devl_internal.h"
+
+static inline bool
+devlink_rate_is_leaf(struct devlink_rate *devlink_rate)
+{
+ return devlink_rate->type == DEVLINK_RATE_TYPE_LEAF;
+}
+
+static inline bool
+devlink_rate_is_node(struct devlink_rate *devlink_rate)
+{
+ return devlink_rate->type == DEVLINK_RATE_TYPE_NODE;
+}
+
+static struct devlink_rate *
+devlink_rate_leaf_get_from_info(struct devlink *devlink, struct genl_info *info)
+{
+ struct devlink_rate *devlink_rate;
+ struct devlink_port *devlink_port;
+
+ devlink_port = devlink_port_get_from_attrs(devlink, info->attrs);
+ if (IS_ERR(devlink_port))
+ return ERR_CAST(devlink_port);
+ devlink_rate = devlink_port->devlink_rate;
+ return devlink_rate ?: ERR_PTR(-ENODEV);
+}
+
+static struct devlink_rate *
+devlink_rate_node_get_by_name(struct devlink *devlink, const char *node_name)
+{
+ static struct devlink_rate *devlink_rate;
+
+ list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
+ if (devlink_rate_is_node(devlink_rate) &&
+ !strcmp(node_name, devlink_rate->name))
+ return devlink_rate;
+ }
+ return ERR_PTR(-ENODEV);
+}
+
+static struct devlink_rate *
+devlink_rate_node_get_from_attrs(struct devlink *devlink, struct nlattr **attrs)
+{
+ const char *rate_node_name;
+ size_t len;
+
+ if (!attrs[DEVLINK_ATTR_RATE_NODE_NAME])
+ return ERR_PTR(-EINVAL);
+ rate_node_name = nla_data(attrs[DEVLINK_ATTR_RATE_NODE_NAME]);
+ len = strlen(rate_node_name);
+ /* Name cannot be empty or decimal number */
+ if (!len || strspn(rate_node_name, "0123456789") == len)
+ return ERR_PTR(-EINVAL);
+
+ return devlink_rate_node_get_by_name(devlink, rate_node_name);
+}
+
+static struct devlink_rate *
+devlink_rate_node_get_from_info(struct devlink *devlink, struct genl_info *info)
+{
+ return devlink_rate_node_get_from_attrs(devlink, info->attrs);
+}
+
+static struct devlink_rate *
+devlink_rate_get_from_info(struct devlink *devlink, struct genl_info *info)
+{
+ struct nlattr **attrs = info->attrs;
+
+ if (attrs[DEVLINK_ATTR_PORT_INDEX])
+ return devlink_rate_leaf_get_from_info(devlink, info);
+ else if (attrs[DEVLINK_ATTR_RATE_NODE_NAME])
+ return devlink_rate_node_get_from_info(devlink, info);
+ else
+ return ERR_PTR(-EINVAL);
+}
+
+static int devlink_nl_rate_fill(struct sk_buff *msg,
+ struct devlink_rate *devlink_rate,
+ enum devlink_command cmd, u32 portid, u32 seq,
+ int flags, struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = devlink_rate->devlink;
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+
+ if (nla_put_u16(msg, DEVLINK_ATTR_RATE_TYPE, devlink_rate->type))
+ goto nla_put_failure;
+
+ if (devlink_rate_is_leaf(devlink_rate)) {
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+ devlink_rate->devlink_port->index))
+ goto nla_put_failure;
+ } else if (devlink_rate_is_node(devlink_rate)) {
+ if (nla_put_string(msg, DEVLINK_ATTR_RATE_NODE_NAME,
+ devlink_rate->name))
+ goto nla_put_failure;
+ }
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_SHARE,
+ devlink_rate->tx_share, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_RATE_TX_MAX,
+ devlink_rate->tx_max, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, DEVLINK_ATTR_RATE_TX_PRIORITY,
+ devlink_rate->tx_priority))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, DEVLINK_ATTR_RATE_TX_WEIGHT,
+ devlink_rate->tx_weight))
+ goto nla_put_failure;
+
+ if (devlink_rate->parent)
+ if (nla_put_string(msg, DEVLINK_ATTR_RATE_PARENT_NODE_NAME,
+ devlink_rate->parent->name))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static void devlink_rate_notify(struct devlink_rate *devlink_rate,
+ enum devlink_command cmd)
+{
+ struct devlink *devlink = devlink_rate->devlink;
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON(cmd != DEVLINK_CMD_RATE_NEW && cmd != DEVLINK_CMD_RATE_DEL);
+
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_rate_fill(msg, devlink_rate, cmd, 0, 0, 0, NULL);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
+ 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+void devlink_rates_notify_register(struct devlink *devlink)
+{
+ struct devlink_rate *rate_node;
+
+ list_for_each_entry(rate_node, &devlink->rate_list, list)
+ devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
+}
+
+void devlink_rates_notify_unregister(struct devlink *devlink)
+{
+ struct devlink_rate *rate_node;
+
+ list_for_each_entry_reverse(rate_node, &devlink->rate_list, list)
+ devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
+}
+
+static int
+devlink_nl_rate_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb, int flags)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_rate *devlink_rate;
+ int idx = 0;
+ int err = 0;
+
+ list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
+ enum devlink_command cmd = DEVLINK_CMD_RATE_NEW;
+ u32 id = NETLINK_CB(cb->skb).portid;
+
+ if (idx < state->idx) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_rate_fill(msg, devlink_rate, cmd, id,
+ cb->nlh->nlmsg_seq, flags, NULL);
+ if (err) {
+ state->idx = idx;
+ break;
+ }
+ idx++;
+ }
+
+ return err;
+}
+
+int devlink_nl_rate_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_rate_get_dump_one);
+}
+
+int devlink_nl_rate_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_rate *devlink_rate;
+ struct sk_buff *msg;
+ int err;
+
+ devlink_rate = devlink_rate_get_from_info(devlink, info);
+ if (IS_ERR(devlink_rate))
+ return PTR_ERR(devlink_rate);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_rate_fill(msg, devlink_rate, DEVLINK_CMD_RATE_NEW,
+ info->snd_portid, info->snd_seq, 0,
+ info->extack);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static bool
+devlink_rate_is_parent_node(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent)
+{
+ while (parent) {
+ if (parent == devlink_rate)
+ return true;
+ parent = parent->parent;
+ }
+ return false;
+}
+
+static int
+devlink_nl_rate_parent_node_set(struct devlink_rate *devlink_rate,
+ struct genl_info *info,
+ struct nlattr *nla_parent)
+{
+ struct devlink *devlink = devlink_rate->devlink;
+ const char *parent_name = nla_data(nla_parent);
+ const struct devlink_ops *ops = devlink->ops;
+ size_t len = strlen(parent_name);
+ struct devlink_rate *parent;
+ int err = -EOPNOTSUPP;
+
+ parent = devlink_rate->parent;
+
+ if (parent && !len) {
+ if (devlink_rate_is_leaf(devlink_rate))
+ err = ops->rate_leaf_parent_set(devlink_rate, NULL,
+ devlink_rate->priv, NULL,
+ info->extack);
+ else if (devlink_rate_is_node(devlink_rate))
+ err = ops->rate_node_parent_set(devlink_rate, NULL,
+ devlink_rate->priv, NULL,
+ info->extack);
+ if (err)
+ return err;
+
+ refcount_dec(&parent->refcnt);
+ devlink_rate->parent = NULL;
+ } else if (len) {
+ parent = devlink_rate_node_get_by_name(devlink, parent_name);
+ if (IS_ERR(parent))
+ return -ENODEV;
+
+ if (parent == devlink_rate) {
+ NL_SET_ERR_MSG(info->extack, "Parent to self is not allowed");
+ return -EINVAL;
+ }
+
+ if (devlink_rate_is_node(devlink_rate) &&
+ devlink_rate_is_parent_node(devlink_rate, parent->parent)) {
+ NL_SET_ERR_MSG(info->extack, "Node is already a parent of parent node.");
+ return -EEXIST;
+ }
+
+ if (devlink_rate_is_leaf(devlink_rate))
+ err = ops->rate_leaf_parent_set(devlink_rate, parent,
+ devlink_rate->priv, parent->priv,
+ info->extack);
+ else if (devlink_rate_is_node(devlink_rate))
+ err = ops->rate_node_parent_set(devlink_rate, parent,
+ devlink_rate->priv, parent->priv,
+ info->extack);
+ if (err)
+ return err;
+
+ if (devlink_rate->parent)
+ /* we're reassigning to other parent in this case */
+ refcount_dec(&devlink_rate->parent->refcnt);
+
+ refcount_inc(&parent->refcnt);
+ devlink_rate->parent = parent;
+ }
+
+ return 0;
+}
+
+static int devlink_nl_rate_set(struct devlink_rate *devlink_rate,
+ const struct devlink_ops *ops,
+ struct genl_info *info)
+{
+ struct nlattr *nla_parent, **attrs = info->attrs;
+ int err = -EOPNOTSUPP;
+ u32 priority;
+ u32 weight;
+ u64 rate;
+
+ if (attrs[DEVLINK_ATTR_RATE_TX_SHARE]) {
+ rate = nla_get_u64(attrs[DEVLINK_ATTR_RATE_TX_SHARE]);
+ if (devlink_rate_is_leaf(devlink_rate))
+ err = ops->rate_leaf_tx_share_set(devlink_rate, devlink_rate->priv,
+ rate, info->extack);
+ else if (devlink_rate_is_node(devlink_rate))
+ err = ops->rate_node_tx_share_set(devlink_rate, devlink_rate->priv,
+ rate, info->extack);
+ if (err)
+ return err;
+ devlink_rate->tx_share = rate;
+ }
+
+ if (attrs[DEVLINK_ATTR_RATE_TX_MAX]) {
+ rate = nla_get_u64(attrs[DEVLINK_ATTR_RATE_TX_MAX]);
+ if (devlink_rate_is_leaf(devlink_rate))
+ err = ops->rate_leaf_tx_max_set(devlink_rate, devlink_rate->priv,
+ rate, info->extack);
+ else if (devlink_rate_is_node(devlink_rate))
+ err = ops->rate_node_tx_max_set(devlink_rate, devlink_rate->priv,
+ rate, info->extack);
+ if (err)
+ return err;
+ devlink_rate->tx_max = rate;
+ }
+
+ if (attrs[DEVLINK_ATTR_RATE_TX_PRIORITY]) {
+ priority = nla_get_u32(attrs[DEVLINK_ATTR_RATE_TX_PRIORITY]);
+ if (devlink_rate_is_leaf(devlink_rate))
+ err = ops->rate_leaf_tx_priority_set(devlink_rate, devlink_rate->priv,
+ priority, info->extack);
+ else if (devlink_rate_is_node(devlink_rate))
+ err = ops->rate_node_tx_priority_set(devlink_rate, devlink_rate->priv,
+ priority, info->extack);
+
+ if (err)
+ return err;
+ devlink_rate->tx_priority = priority;
+ }
+
+ if (attrs[DEVLINK_ATTR_RATE_TX_WEIGHT]) {
+ weight = nla_get_u32(attrs[DEVLINK_ATTR_RATE_TX_WEIGHT]);
+ if (devlink_rate_is_leaf(devlink_rate))
+ err = ops->rate_leaf_tx_weight_set(devlink_rate, devlink_rate->priv,
+ weight, info->extack);
+ else if (devlink_rate_is_node(devlink_rate))
+ err = ops->rate_node_tx_weight_set(devlink_rate, devlink_rate->priv,
+ weight, info->extack);
+
+ if (err)
+ return err;
+ devlink_rate->tx_weight = weight;
+ }
+
+ nla_parent = attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME];
+ if (nla_parent) {
+ err = devlink_nl_rate_parent_node_set(devlink_rate, info,
+ nla_parent);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static bool devlink_rate_set_ops_supported(const struct devlink_ops *ops,
+ struct genl_info *info,
+ enum devlink_rate_type type)
+{
+ struct nlattr **attrs = info->attrs;
+
+ if (type == DEVLINK_RATE_TYPE_LEAF) {
+ if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_leaf_tx_share_set) {
+ NL_SET_ERR_MSG(info->extack, "TX share set isn't supported for the leafs");
+ return false;
+ }
+ if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_leaf_tx_max_set) {
+ NL_SET_ERR_MSG(info->extack, "TX max set isn't supported for the leafs");
+ return false;
+ }
+ if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
+ !ops->rate_leaf_parent_set) {
+ NL_SET_ERR_MSG(info->extack, "Parent set isn't supported for the leafs");
+ return false;
+ }
+ if (attrs[DEVLINK_ATTR_RATE_TX_PRIORITY] && !ops->rate_leaf_tx_priority_set) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ attrs[DEVLINK_ATTR_RATE_TX_PRIORITY],
+ "TX priority set isn't supported for the leafs");
+ return false;
+ }
+ if (attrs[DEVLINK_ATTR_RATE_TX_WEIGHT] && !ops->rate_leaf_tx_weight_set) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ attrs[DEVLINK_ATTR_RATE_TX_WEIGHT],
+ "TX weight set isn't supported for the leafs");
+ return false;
+ }
+ } else if (type == DEVLINK_RATE_TYPE_NODE) {
+ if (attrs[DEVLINK_ATTR_RATE_TX_SHARE] && !ops->rate_node_tx_share_set) {
+ NL_SET_ERR_MSG(info->extack, "TX share set isn't supported for the nodes");
+ return false;
+ }
+ if (attrs[DEVLINK_ATTR_RATE_TX_MAX] && !ops->rate_node_tx_max_set) {
+ NL_SET_ERR_MSG(info->extack, "TX max set isn't supported for the nodes");
+ return false;
+ }
+ if (attrs[DEVLINK_ATTR_RATE_PARENT_NODE_NAME] &&
+ !ops->rate_node_parent_set) {
+ NL_SET_ERR_MSG(info->extack, "Parent set isn't supported for the nodes");
+ return false;
+ }
+ if (attrs[DEVLINK_ATTR_RATE_TX_PRIORITY] && !ops->rate_node_tx_priority_set) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ attrs[DEVLINK_ATTR_RATE_TX_PRIORITY],
+ "TX priority set isn't supported for the nodes");
+ return false;
+ }
+ if (attrs[DEVLINK_ATTR_RATE_TX_WEIGHT] && !ops->rate_node_tx_weight_set) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ attrs[DEVLINK_ATTR_RATE_TX_WEIGHT],
+ "TX weight set isn't supported for the nodes");
+ return false;
+ }
+ } else {
+ WARN(1, "Unknown type of rate object");
+ return false;
+ }
+
+ return true;
+}
+
+int devlink_nl_cmd_rate_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_rate *devlink_rate;
+ const struct devlink_ops *ops;
+ int err;
+
+ devlink_rate = devlink_rate_get_from_info(devlink, info);
+ if (IS_ERR(devlink_rate))
+ return PTR_ERR(devlink_rate);
+
+ ops = devlink->ops;
+ if (!ops || !devlink_rate_set_ops_supported(ops, info, devlink_rate->type))
+ return -EOPNOTSUPP;
+
+ err = devlink_nl_rate_set(devlink_rate, ops, info);
+
+ if (!err)
+ devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
+ return err;
+}
+
+int devlink_nl_cmd_rate_new_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_rate *rate_node;
+ const struct devlink_ops *ops;
+ int err;
+
+ ops = devlink->ops;
+ if (!ops || !ops->rate_node_new || !ops->rate_node_del) {
+ NL_SET_ERR_MSG(info->extack, "Rate nodes aren't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!devlink_rate_set_ops_supported(ops, info, DEVLINK_RATE_TYPE_NODE))
+ return -EOPNOTSUPP;
+
+ rate_node = devlink_rate_node_get_from_attrs(devlink, info->attrs);
+ if (!IS_ERR(rate_node))
+ return -EEXIST;
+ else if (rate_node == ERR_PTR(-EINVAL))
+ return -EINVAL;
+
+ rate_node = kzalloc(sizeof(*rate_node), GFP_KERNEL);
+ if (!rate_node)
+ return -ENOMEM;
+
+ rate_node->devlink = devlink;
+ rate_node->type = DEVLINK_RATE_TYPE_NODE;
+ rate_node->name = nla_strdup(info->attrs[DEVLINK_ATTR_RATE_NODE_NAME], GFP_KERNEL);
+ if (!rate_node->name) {
+ err = -ENOMEM;
+ goto err_strdup;
+ }
+
+ err = ops->rate_node_new(rate_node, &rate_node->priv, info->extack);
+ if (err)
+ goto err_node_new;
+
+ err = devlink_nl_rate_set(rate_node, ops, info);
+ if (err)
+ goto err_rate_set;
+
+ refcount_set(&rate_node->refcnt, 1);
+ list_add(&rate_node->list, &devlink->rate_list);
+ devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
+ return 0;
+
+err_rate_set:
+ ops->rate_node_del(rate_node, rate_node->priv, info->extack);
+err_node_new:
+ kfree(rate_node->name);
+err_strdup:
+ kfree(rate_node);
+ return err;
+}
+
+int devlink_nl_cmd_rate_del_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_rate *rate_node;
+ int err;
+
+ rate_node = devlink_rate_node_get_from_info(devlink, info);
+ if (IS_ERR(rate_node))
+ return PTR_ERR(rate_node);
+
+ if (refcount_read(&rate_node->refcnt) > 1) {
+ NL_SET_ERR_MSG(info->extack, "Node has children. Cannot delete node.");
+ return -EBUSY;
+ }
+
+ devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_DEL);
+ err = devlink->ops->rate_node_del(rate_node, rate_node->priv,
+ info->extack);
+ if (rate_node->parent)
+ refcount_dec(&rate_node->parent->refcnt);
+ list_del(&rate_node->list);
+ kfree(rate_node->name);
+ kfree(rate_node);
+ return err;
+}
+
+int devlink_rate_nodes_check(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink_rate *devlink_rate;
+
+ list_for_each_entry(devlink_rate, &devlink->rate_list, list)
+ if (devlink_rate_is_node(devlink_rate)) {
+ NL_SET_ERR_MSG(extack, "Rate node(s) exists.");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
+ * devl_rate_node_create - create devlink rate node
+ * @devlink: devlink instance
+ * @priv: driver private data
+ * @node_name: name of the resulting node
+ * @parent: parent devlink_rate struct
+ *
+ * Create devlink rate object of type node
+ */
+struct devlink_rate *
+devl_rate_node_create(struct devlink *devlink, void *priv, char *node_name,
+ struct devlink_rate *parent)
+{
+ struct devlink_rate *rate_node;
+
+ rate_node = devlink_rate_node_get_by_name(devlink, node_name);
+ if (!IS_ERR(rate_node))
+ return ERR_PTR(-EEXIST);
+
+ rate_node = kzalloc(sizeof(*rate_node), GFP_KERNEL);
+ if (!rate_node)
+ return ERR_PTR(-ENOMEM);
+
+ if (parent) {
+ rate_node->parent = parent;
+ refcount_inc(&rate_node->parent->refcnt);
+ }
+
+ rate_node->type = DEVLINK_RATE_TYPE_NODE;
+ rate_node->devlink = devlink;
+ rate_node->priv = priv;
+
+ rate_node->name = kstrdup(node_name, GFP_KERNEL);
+ if (!rate_node->name) {
+ kfree(rate_node);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ refcount_set(&rate_node->refcnt, 1);
+ list_add(&rate_node->list, &devlink->rate_list);
+ devlink_rate_notify(rate_node, DEVLINK_CMD_RATE_NEW);
+ return rate_node;
+}
+EXPORT_SYMBOL_GPL(devl_rate_node_create);
+
+/**
+ * devl_rate_leaf_create - create devlink rate leaf
+ * @devlink_port: devlink port object to create rate object on
+ * @priv: driver private data
+ * @parent: parent devlink_rate struct
+ *
+ * Create devlink rate object of type leaf on provided @devlink_port.
+ */
+int devl_rate_leaf_create(struct devlink_port *devlink_port, void *priv,
+ struct devlink_rate *parent)
+{
+ struct devlink *devlink = devlink_port->devlink;
+ struct devlink_rate *devlink_rate;
+
+ devl_assert_locked(devlink_port->devlink);
+
+ if (WARN_ON(devlink_port->devlink_rate))
+ return -EBUSY;
+
+ devlink_rate = kzalloc(sizeof(*devlink_rate), GFP_KERNEL);
+ if (!devlink_rate)
+ return -ENOMEM;
+
+ if (parent) {
+ devlink_rate->parent = parent;
+ refcount_inc(&devlink_rate->parent->refcnt);
+ }
+
+ devlink_rate->type = DEVLINK_RATE_TYPE_LEAF;
+ devlink_rate->devlink = devlink;
+ devlink_rate->devlink_port = devlink_port;
+ devlink_rate->priv = priv;
+ list_add_tail(&devlink_rate->list, &devlink->rate_list);
+ devlink_port->devlink_rate = devlink_rate;
+ devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_NEW);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devl_rate_leaf_create);
+
+/**
+ * devl_rate_leaf_destroy - destroy devlink rate leaf
+ *
+ * @devlink_port: devlink port linked to the rate object
+ *
+ * Destroy the devlink rate object of type leaf on provided @devlink_port.
+ */
+void devl_rate_leaf_destroy(struct devlink_port *devlink_port)
+{
+ struct devlink_rate *devlink_rate = devlink_port->devlink_rate;
+
+ devl_assert_locked(devlink_port->devlink);
+ if (!devlink_rate)
+ return;
+
+ devlink_rate_notify(devlink_rate, DEVLINK_CMD_RATE_DEL);
+ if (devlink_rate->parent)
+ refcount_dec(&devlink_rate->parent->refcnt);
+ list_del(&devlink_rate->list);
+ devlink_port->devlink_rate = NULL;
+ kfree(devlink_rate);
+}
+EXPORT_SYMBOL_GPL(devl_rate_leaf_destroy);
+
+/**
+ * devl_rate_nodes_destroy - destroy all devlink rate nodes on device
+ * @devlink: devlink instance
+ *
+ * Unset parent for all rate objects and destroy all rate nodes
+ * on specified device.
+ */
+void devl_rate_nodes_destroy(struct devlink *devlink)
+{
+ static struct devlink_rate *devlink_rate, *tmp;
+ const struct devlink_ops *ops = devlink->ops;
+
+ devl_assert_locked(devlink);
+
+ list_for_each_entry(devlink_rate, &devlink->rate_list, list) {
+ if (!devlink_rate->parent)
+ continue;
+
+ refcount_dec(&devlink_rate->parent->refcnt);
+ if (devlink_rate_is_leaf(devlink_rate))
+ ops->rate_leaf_parent_set(devlink_rate, NULL, devlink_rate->priv,
+ NULL, NULL);
+ else if (devlink_rate_is_node(devlink_rate))
+ ops->rate_node_parent_set(devlink_rate, NULL, devlink_rate->priv,
+ NULL, NULL);
+ }
+ list_for_each_entry_safe(devlink_rate, tmp, &devlink->rate_list, list) {
+ if (devlink_rate_is_node(devlink_rate)) {
+ ops->rate_node_del(devlink_rate, devlink_rate->priv, NULL);
+ list_del(&devlink_rate->list);
+ kfree(devlink_rate->name);
+ kfree(devlink_rate);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(devl_rate_nodes_destroy);
diff --git a/net/devlink/region.c b/net/devlink/region.c
new file mode 100644
index 000000000000..d197cdb662db
--- /dev/null
+++ b/net/devlink/region.c
@@ -0,0 +1,1260 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include "devl_internal.h"
+
+struct devlink_region {
+ struct devlink *devlink;
+ struct devlink_port *port;
+ struct list_head list;
+ union {
+ const struct devlink_region_ops *ops;
+ const struct devlink_port_region_ops *port_ops;
+ };
+ struct mutex snapshot_lock; /* protects snapshot_list,
+ * max_snapshots and cur_snapshots
+ * consistency.
+ */
+ struct list_head snapshot_list;
+ u32 max_snapshots;
+ u32 cur_snapshots;
+ u64 size;
+};
+
+struct devlink_snapshot {
+ struct list_head list;
+ struct devlink_region *region;
+ u8 *data;
+ u32 id;
+};
+
+static struct devlink_region *
+devlink_region_get_by_name(struct devlink *devlink, const char *region_name)
+{
+ struct devlink_region *region;
+
+ list_for_each_entry(region, &devlink->region_list, list)
+ if (!strcmp(region->ops->name, region_name))
+ return region;
+
+ return NULL;
+}
+
+static struct devlink_region *
+devlink_port_region_get_by_name(struct devlink_port *port,
+ const char *region_name)
+{
+ struct devlink_region *region;
+
+ list_for_each_entry(region, &port->region_list, list)
+ if (!strcmp(region->ops->name, region_name))
+ return region;
+
+ return NULL;
+}
+
+static struct devlink_snapshot *
+devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
+{
+ struct devlink_snapshot *snapshot;
+
+ list_for_each_entry(snapshot, &region->snapshot_list, list)
+ if (snapshot->id == id)
+ return snapshot;
+
+ return NULL;
+}
+
+static int devlink_nl_region_snapshot_id_put(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct devlink_snapshot *snapshot)
+{
+ struct nlattr *snap_attr;
+ int err;
+
+ snap_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_SNAPSHOT);
+ if (!snap_attr)
+ return -EINVAL;
+
+ err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID, snapshot->id);
+ if (err)
+ goto nla_put_failure;
+
+ nla_nest_end(msg, snap_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, snap_attr);
+ return err;
+}
+
+static int devlink_nl_region_snapshots_id_put(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct devlink_region *region)
+{
+ struct devlink_snapshot *snapshot;
+ struct nlattr *snapshots_attr;
+ int err;
+
+ snapshots_attr = nla_nest_start_noflag(msg,
+ DEVLINK_ATTR_REGION_SNAPSHOTS);
+ if (!snapshots_attr)
+ return -EINVAL;
+
+ list_for_each_entry(snapshot, &region->snapshot_list, list) {
+ err = devlink_nl_region_snapshot_id_put(msg, devlink, snapshot);
+ if (err)
+ goto nla_put_failure;
+ }
+
+ nla_nest_end(msg, snapshots_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, snapshots_attr);
+ return err;
+}
+
+static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags,
+ struct devlink_region *region)
+{
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ err = devlink_nl_put_handle(msg, devlink);
+ if (err)
+ goto nla_put_failure;
+
+ if (region->port) {
+ err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+ region->port->index);
+ if (err)
+ goto nla_put_failure;
+ }
+
+ err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->ops->name);
+ if (err)
+ goto nla_put_failure;
+
+ err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
+ region->size,
+ DEVLINK_ATTR_PAD);
+ if (err)
+ goto nla_put_failure;
+
+ err = nla_put_u32(msg, DEVLINK_ATTR_REGION_MAX_SNAPSHOTS,
+ region->max_snapshots);
+ if (err)
+ goto nla_put_failure;
+
+ err = devlink_nl_region_snapshots_id_put(msg, devlink, region);
+ if (err)
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+static struct sk_buff *
+devlink_nl_region_notify_build(struct devlink_region *region,
+ struct devlink_snapshot *snapshot,
+ enum devlink_command cmd, u32 portid, u32 seq)
+{
+ struct devlink *devlink = region->devlink;
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, 0, cmd);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto out_free_msg;
+ }
+
+ err = devlink_nl_put_handle(msg, devlink);
+ if (err)
+ goto out_cancel_msg;
+
+ if (region->port) {
+ err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+ region->port->index);
+ if (err)
+ goto out_cancel_msg;
+ }
+
+ err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME,
+ region->ops->name);
+ if (err)
+ goto out_cancel_msg;
+
+ if (snapshot) {
+ err = nla_put_u32(msg, DEVLINK_ATTR_REGION_SNAPSHOT_ID,
+ snapshot->id);
+ if (err)
+ goto out_cancel_msg;
+ } else {
+ err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_SIZE,
+ region->size, DEVLINK_ATTR_PAD);
+ if (err)
+ goto out_cancel_msg;
+ }
+ genlmsg_end(msg, hdr);
+
+ return msg;
+
+out_cancel_msg:
+ genlmsg_cancel(msg, hdr);
+out_free_msg:
+ nlmsg_free(msg);
+ return ERR_PTR(err);
+}
+
+static void devlink_nl_region_notify(struct devlink_region *region,
+ struct devlink_snapshot *snapshot,
+ enum devlink_command cmd)
+{
+ struct devlink *devlink = region->devlink;
+ struct sk_buff *msg;
+
+ WARN_ON(cmd != DEVLINK_CMD_REGION_NEW && cmd != DEVLINK_CMD_REGION_DEL);
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
+
+ msg = devlink_nl_region_notify_build(region, snapshot, cmd, 0, 0);
+ if (IS_ERR(msg))
+ return;
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), msg,
+ 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+void devlink_regions_notify_register(struct devlink *devlink)
+{
+ struct devlink_region *region;
+
+ list_for_each_entry(region, &devlink->region_list, list)
+ devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
+}
+
+void devlink_regions_notify_unregister(struct devlink *devlink)
+{
+ struct devlink_region *region;
+
+ list_for_each_entry_reverse(region, &devlink->region_list, list)
+ devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
+}
+
+/**
+ * __devlink_snapshot_id_increment - Increment number of snapshots using an id
+ * @devlink: devlink instance
+ * @id: the snapshot id
+ *
+ * Track when a new snapshot begins using an id. Load the count for the
+ * given id from the snapshot xarray, increment it, and store it back.
+ *
+ * Called when a new snapshot is created with the given id.
+ *
+ * The id *must* have been previously allocated by
+ * devlink_region_snapshot_id_get().
+ *
+ * Returns 0 on success, or an error on failure.
+ */
+static int __devlink_snapshot_id_increment(struct devlink *devlink, u32 id)
+{
+ unsigned long count;
+ void *p;
+ int err;
+
+ xa_lock(&devlink->snapshot_ids);
+ p = xa_load(&devlink->snapshot_ids, id);
+ if (WARN_ON(!p)) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ if (WARN_ON(!xa_is_value(p))) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ count = xa_to_value(p);
+ count++;
+
+ err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
+ GFP_ATOMIC));
+unlock:
+ xa_unlock(&devlink->snapshot_ids);
+ return err;
+}
+
+/**
+ * __devlink_snapshot_id_decrement - Decrease number of snapshots using an id
+ * @devlink: devlink instance
+ * @id: the snapshot id
+ *
+ * Track when a snapshot is deleted and stops using an id. Load the count
+ * for the given id from the snapshot xarray, decrement it, and store it
+ * back.
+ *
+ * If the count reaches zero, erase this id from the xarray, freeing it
+ * up for future re-use by devlink_region_snapshot_id_get().
+ *
+ * Called when a snapshot using the given id is deleted, and when the
+ * initial allocator of the id is finished using it.
+ */
+static void __devlink_snapshot_id_decrement(struct devlink *devlink, u32 id)
+{
+ unsigned long count;
+ void *p;
+
+ xa_lock(&devlink->snapshot_ids);
+ p = xa_load(&devlink->snapshot_ids, id);
+ if (WARN_ON(!p))
+ goto unlock;
+
+ if (WARN_ON(!xa_is_value(p)))
+ goto unlock;
+
+ count = xa_to_value(p);
+
+ if (count > 1) {
+ count--;
+ __xa_store(&devlink->snapshot_ids, id, xa_mk_value(count),
+ GFP_ATOMIC);
+ } else {
+ /* If this was the last user, we can erase this id */
+ __xa_erase(&devlink->snapshot_ids, id);
+ }
+unlock:
+ xa_unlock(&devlink->snapshot_ids);
+}
+
+/**
+ * __devlink_snapshot_id_insert - Insert a specific snapshot ID
+ * @devlink: devlink instance
+ * @id: the snapshot id
+ *
+ * Mark the given snapshot id as used by inserting a zero value into the
+ * snapshot xarray.
+ *
+ * This must be called while holding the devlink instance lock. Unlike
+ * devlink_snapshot_id_get, the initial reference count is zero, not one.
+ * It is expected that the id will immediately be used before
+ * releasing the devlink instance lock.
+ *
+ * Returns zero on success, or an error code if the snapshot id could not
+ * be inserted.
+ */
+static int __devlink_snapshot_id_insert(struct devlink *devlink, u32 id)
+{
+ int err;
+
+ xa_lock(&devlink->snapshot_ids);
+ if (xa_load(&devlink->snapshot_ids, id)) {
+ xa_unlock(&devlink->snapshot_ids);
+ return -EEXIST;
+ }
+ err = xa_err(__xa_store(&devlink->snapshot_ids, id, xa_mk_value(0),
+ GFP_ATOMIC));
+ xa_unlock(&devlink->snapshot_ids);
+ return err;
+}
+
+/**
+ * __devlink_region_snapshot_id_get - get snapshot ID
+ * @devlink: devlink instance
+ * @id: storage to return snapshot id
+ *
+ * Allocates a new snapshot id. Returns zero on success, or a negative
+ * error on failure. Must be called while holding the devlink instance
+ * lock.
+ *
+ * Snapshot IDs are tracked using an xarray which stores the number of
+ * users of the snapshot id.
+ *
+ * Note that the caller of this function counts as a 'user', in order to
+ * avoid race conditions. The caller must release its hold on the
+ * snapshot by using devlink_region_snapshot_id_put.
+ */
+static int __devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
+{
+ return xa_alloc(&devlink->snapshot_ids, id, xa_mk_value(1),
+ xa_limit_32b, GFP_KERNEL);
+}
+
+/**
+ * __devlink_region_snapshot_create - create a new snapshot
+ * This will add a new snapshot of a region. The snapshot
+ * will be stored on the region struct and can be accessed
+ * from devlink. This is useful for future analyses of snapshots.
+ * Multiple snapshots can be created on a region.
+ * The @snapshot_id should be obtained using the getter function.
+ *
+ * Must be called only while holding the region snapshot lock.
+ *
+ * @region: devlink region of the snapshot
+ * @data: snapshot data
+ * @snapshot_id: snapshot id to be created
+ */
+static int
+__devlink_region_snapshot_create(struct devlink_region *region,
+ u8 *data, u32 snapshot_id)
+{
+ struct devlink *devlink = region->devlink;
+ struct devlink_snapshot *snapshot;
+ int err;
+
+ lockdep_assert_held(&region->snapshot_lock);
+
+ /* check if region can hold one more snapshot */
+ if (region->cur_snapshots == region->max_snapshots)
+ return -ENOSPC;
+
+ if (devlink_region_snapshot_get_by_id(region, snapshot_id))
+ return -EEXIST;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
+ if (!snapshot)
+ return -ENOMEM;
+
+ err = __devlink_snapshot_id_increment(devlink, snapshot_id);
+ if (err)
+ goto err_snapshot_id_increment;
+
+ snapshot->id = snapshot_id;
+ snapshot->region = region;
+ snapshot->data = data;
+
+ list_add_tail(&snapshot->list, &region->snapshot_list);
+
+ region->cur_snapshots++;
+
+ devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_NEW);
+ return 0;
+
+err_snapshot_id_increment:
+ kfree(snapshot);
+ return err;
+}
+
+static void devlink_region_snapshot_del(struct devlink_region *region,
+ struct devlink_snapshot *snapshot)
+{
+ struct devlink *devlink = region->devlink;
+
+ lockdep_assert_held(&region->snapshot_lock);
+
+ devlink_nl_region_notify(region, snapshot, DEVLINK_CMD_REGION_DEL);
+ region->cur_snapshots--;
+ list_del(&snapshot->list);
+ region->ops->destructor(snapshot->data);
+ __devlink_snapshot_id_decrement(devlink, snapshot->id);
+ kfree(snapshot);
+}
+
+int devlink_nl_region_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_port *port = NULL;
+ struct devlink_region *region;
+ const char *region_name;
+ struct sk_buff *msg;
+ unsigned int index;
+ int err;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME))
+ return -EINVAL;
+
+ if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+
+ port = devlink_port_get_by_index(devlink, index);
+ if (!port)
+ return -ENODEV;
+ }
+
+ region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
+ if (port)
+ region = devlink_port_region_get_by_name(port, region_name);
+ else
+ region = devlink_region_get_by_name(devlink, region_name);
+
+ if (!region)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_region_fill(msg, devlink, DEVLINK_CMD_REGION_GET,
+ info->snd_portid, info->snd_seq, 0,
+ region);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int devlink_nl_cmd_region_get_port_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb,
+ struct devlink_port *port,
+ int *idx, int start, int flags)
+{
+ struct devlink_region *region;
+ int err = 0;
+
+ list_for_each_entry(region, &port->region_list, list) {
+ if (*idx < start) {
+ (*idx)++;
+ continue;
+ }
+ err = devlink_nl_region_fill(msg, port->devlink,
+ DEVLINK_CMD_REGION_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ flags, region);
+ if (err)
+ goto out;
+ (*idx)++;
+ }
+
+out:
+ return err;
+}
+
+static int devlink_nl_region_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_region *region;
+ struct devlink_port *port;
+ unsigned long port_index;
+ int idx = 0;
+ int err;
+
+ list_for_each_entry(region, &devlink->region_list, list) {
+ if (idx < state->idx) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_region_fill(msg, devlink,
+ DEVLINK_CMD_REGION_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags,
+ region);
+ if (err) {
+ state->idx = idx;
+ return err;
+ }
+ idx++;
+ }
+
+ xa_for_each(&devlink->ports, port_index, port) {
+ err = devlink_nl_cmd_region_get_port_dumpit(msg, cb, port, &idx,
+ state->idx, flags);
+ if (err) {
+ state->idx = idx;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int devlink_nl_region_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_region_get_dump_one);
+}
+
+int devlink_nl_cmd_region_del(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_snapshot *snapshot;
+ struct devlink_port *port = NULL;
+ struct devlink_region *region;
+ const char *region_name;
+ unsigned int index;
+ u32 snapshot_id;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME) ||
+ GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_SNAPSHOT_ID))
+ return -EINVAL;
+
+ region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
+ snapshot_id = nla_get_u32(info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
+
+ if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+
+ port = devlink_port_get_by_index(devlink, index);
+ if (!port)
+ return -ENODEV;
+ }
+
+ if (port)
+ region = devlink_port_region_get_by_name(port, region_name);
+ else
+ region = devlink_region_get_by_name(devlink, region_name);
+
+ if (!region)
+ return -EINVAL;
+
+ mutex_lock(&region->snapshot_lock);
+ snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
+ if (!snapshot) {
+ mutex_unlock(&region->snapshot_lock);
+ return -EINVAL;
+ }
+
+ devlink_region_snapshot_del(region, snapshot);
+ mutex_unlock(&region->snapshot_lock);
+ return 0;
+}
+
+int devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_snapshot *snapshot;
+ struct devlink_port *port = NULL;
+ struct nlattr *snapshot_id_attr;
+ struct devlink_region *region;
+ const char *region_name;
+ unsigned int index;
+ u32 snapshot_id;
+ u8 *data;
+ int err;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_REGION_NAME)) {
+ NL_SET_ERR_MSG(info->extack, "No region name provided");
+ return -EINVAL;
+ }
+
+ region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
+
+ if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+
+ port = devlink_port_get_by_index(devlink, index);
+ if (!port)
+ return -ENODEV;
+ }
+
+ if (port)
+ region = devlink_port_region_get_by_name(port, region_name);
+ else
+ region = devlink_region_get_by_name(devlink, region_name);
+
+ if (!region) {
+ NL_SET_ERR_MSG(info->extack, "The requested region does not exist");
+ return -EINVAL;
+ }
+
+ if (!region->ops->snapshot) {
+ NL_SET_ERR_MSG(info->extack, "The requested region does not support taking an immediate snapshot");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&region->snapshot_lock);
+
+ if (region->cur_snapshots == region->max_snapshots) {
+ NL_SET_ERR_MSG(info->extack, "The region has reached the maximum number of stored snapshots");
+ err = -ENOSPC;
+ goto unlock;
+ }
+
+ snapshot_id_attr = info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID];
+ if (snapshot_id_attr) {
+ snapshot_id = nla_get_u32(snapshot_id_attr);
+
+ if (devlink_region_snapshot_get_by_id(region, snapshot_id)) {
+ NL_SET_ERR_MSG(info->extack, "The requested snapshot id is already in use");
+ err = -EEXIST;
+ goto unlock;
+ }
+
+ err = __devlink_snapshot_id_insert(devlink, snapshot_id);
+ if (err)
+ goto unlock;
+ } else {
+ err = __devlink_region_snapshot_id_get(devlink, &snapshot_id);
+ if (err) {
+ NL_SET_ERR_MSG(info->extack, "Failed to allocate a new snapshot id");
+ goto unlock;
+ }
+ }
+
+ if (port)
+ err = region->port_ops->snapshot(port, region->port_ops,
+ info->extack, &data);
+ else
+ err = region->ops->snapshot(devlink, region->ops,
+ info->extack, &data);
+ if (err)
+ goto err_snapshot_capture;
+
+ err = __devlink_region_snapshot_create(region, data, snapshot_id);
+ if (err)
+ goto err_snapshot_create;
+
+ if (!snapshot_id_attr) {
+ struct sk_buff *msg;
+
+ snapshot = devlink_region_snapshot_get_by_id(region,
+ snapshot_id);
+ if (WARN_ON(!snapshot)) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ msg = devlink_nl_region_notify_build(region, snapshot,
+ DEVLINK_CMD_REGION_NEW,
+ info->snd_portid,
+ info->snd_seq);
+ err = PTR_ERR_OR_ZERO(msg);
+ if (err)
+ goto err_notify;
+
+ err = genlmsg_reply(msg, info);
+ if (err)
+ goto err_notify;
+ }
+
+ mutex_unlock(&region->snapshot_lock);
+ return 0;
+
+err_snapshot_create:
+ region->ops->destructor(data);
+err_snapshot_capture:
+ __devlink_snapshot_id_decrement(devlink, snapshot_id);
+ mutex_unlock(&region->snapshot_lock);
+ return err;
+
+err_notify:
+ devlink_region_snapshot_del(region, snapshot);
+unlock:
+ mutex_unlock(&region->snapshot_lock);
+ return err;
+}
+
+static int devlink_nl_cmd_region_read_chunk_fill(struct sk_buff *msg,
+ u8 *chunk, u32 chunk_size,
+ u64 addr)
+{
+ struct nlattr *chunk_attr;
+ int err;
+
+ chunk_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_REGION_CHUNK);
+ if (!chunk_attr)
+ return -EINVAL;
+
+ err = nla_put(msg, DEVLINK_ATTR_REGION_CHUNK_DATA, chunk_size, chunk);
+ if (err)
+ goto nla_put_failure;
+
+ err = nla_put_u64_64bit(msg, DEVLINK_ATTR_REGION_CHUNK_ADDR, addr,
+ DEVLINK_ATTR_PAD);
+ if (err)
+ goto nla_put_failure;
+
+ nla_nest_end(msg, chunk_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, chunk_attr);
+ return err;
+}
+
+#define DEVLINK_REGION_READ_CHUNK_SIZE 256
+
+typedef int devlink_chunk_fill_t(void *cb_priv, u8 *chunk, u32 chunk_size,
+ u64 curr_offset,
+ struct netlink_ext_ack *extack);
+
+static int
+devlink_nl_region_read_fill(struct sk_buff *skb, devlink_chunk_fill_t *cb,
+ void *cb_priv, u64 start_offset, u64 end_offset,
+ u64 *new_offset, struct netlink_ext_ack *extack)
+{
+ u64 curr_offset = start_offset;
+ int err = 0;
+ u8 *data;
+
+ /* Allocate and re-use a single buffer */
+ data = kmalloc(DEVLINK_REGION_READ_CHUNK_SIZE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ *new_offset = start_offset;
+
+ while (curr_offset < end_offset) {
+ u32 data_size;
+
+ data_size = min_t(u32, end_offset - curr_offset,
+ DEVLINK_REGION_READ_CHUNK_SIZE);
+
+ err = cb(cb_priv, data, data_size, curr_offset, extack);
+ if (err)
+ break;
+
+ err = devlink_nl_cmd_region_read_chunk_fill(skb, data, data_size, curr_offset);
+ if (err)
+ break;
+
+ curr_offset += data_size;
+ }
+ *new_offset = curr_offset;
+
+ kfree(data);
+
+ return err;
+}
+
+static int
+devlink_region_snapshot_fill(void *cb_priv, u8 *chunk, u32 chunk_size,
+ u64 curr_offset,
+ struct netlink_ext_ack __always_unused *extack)
+{
+ struct devlink_snapshot *snapshot = cb_priv;
+
+ memcpy(chunk, &snapshot->data[curr_offset], chunk_size);
+
+ return 0;
+}
+
+static int
+devlink_region_port_direct_fill(void *cb_priv, u8 *chunk, u32 chunk_size,
+ u64 curr_offset, struct netlink_ext_ack *extack)
+{
+ struct devlink_region *region = cb_priv;
+
+ return region->port_ops->read(region->port, region->port_ops, extack,
+ curr_offset, chunk_size, chunk);
+}
+
+static int
+devlink_region_direct_fill(void *cb_priv, u8 *chunk, u32 chunk_size,
+ u64 curr_offset, struct netlink_ext_ack *extack)
+{
+ struct devlink_region *region = cb_priv;
+
+ return region->ops->read(region->devlink, region->ops, extack,
+ curr_offset, chunk_size, chunk);
+}
+
+int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct nlattr *chunks_attr, *region_attr, *snapshot_attr;
+ u64 ret_offset, start_offset, end_offset = U64_MAX;
+ struct nlattr **attrs = info->info.attrs;
+ struct devlink_port *port = NULL;
+ devlink_chunk_fill_t *region_cb;
+ struct devlink_region *region;
+ const char *region_name;
+ struct devlink *devlink;
+ unsigned int index;
+ void *region_cb_priv;
+ void *hdr;
+ int err;
+
+ start_offset = state->start_offset;
+
+ devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs);
+ if (IS_ERR(devlink))
+ return PTR_ERR(devlink);
+
+ if (!attrs[DEVLINK_ATTR_REGION_NAME]) {
+ NL_SET_ERR_MSG(cb->extack, "No region name provided");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ index = nla_get_u32(attrs[DEVLINK_ATTR_PORT_INDEX]);
+
+ port = devlink_port_get_by_index(devlink, index);
+ if (!port) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+ }
+
+ region_attr = attrs[DEVLINK_ATTR_REGION_NAME];
+ region_name = nla_data(region_attr);
+
+ if (port)
+ region = devlink_port_region_get_by_name(port, region_name);
+ else
+ region = devlink_region_get_by_name(devlink, region_name);
+
+ if (!region) {
+ NL_SET_ERR_MSG_ATTR(cb->extack, region_attr, "Requested region does not exist");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ snapshot_attr = attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID];
+ if (!snapshot_attr) {
+ if (!nla_get_flag(attrs[DEVLINK_ATTR_REGION_DIRECT])) {
+ NL_SET_ERR_MSG(cb->extack, "No snapshot id provided");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!region->ops->read) {
+ NL_SET_ERR_MSG(cb->extack, "Requested region does not support direct read");
+ err = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
+ if (port)
+ region_cb = &devlink_region_port_direct_fill;
+ else
+ region_cb = &devlink_region_direct_fill;
+ region_cb_priv = region;
+ } else {
+ struct devlink_snapshot *snapshot;
+ u32 snapshot_id;
+
+ if (nla_get_flag(attrs[DEVLINK_ATTR_REGION_DIRECT])) {
+ NL_SET_ERR_MSG_ATTR(cb->extack, snapshot_attr, "Direct region read does not use snapshot");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ snapshot_id = nla_get_u32(snapshot_attr);
+ snapshot = devlink_region_snapshot_get_by_id(region, snapshot_id);
+ if (!snapshot) {
+ NL_SET_ERR_MSG_ATTR(cb->extack, snapshot_attr, "Requested snapshot does not exist");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ region_cb = &devlink_region_snapshot_fill;
+ region_cb_priv = snapshot;
+ }
+
+ if (attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR] &&
+ attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]) {
+ if (!start_offset)
+ start_offset =
+ nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
+
+ end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
+ end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
+ }
+
+ if (end_offset > region->size)
+ end_offset = region->size;
+
+ /* return 0 if there is no further data to read */
+ if (start_offset == end_offset) {
+ err = 0;
+ goto out_unlock;
+ }
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI,
+ DEVLINK_CMD_REGION_READ);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto out_unlock;
+ }
+
+ err = devlink_nl_put_handle(skb, devlink);
+ if (err)
+ goto nla_put_failure;
+
+ if (region->port) {
+ err = nla_put_u32(skb, DEVLINK_ATTR_PORT_INDEX,
+ region->port->index);
+ if (err)
+ goto nla_put_failure;
+ }
+
+ err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name);
+ if (err)
+ goto nla_put_failure;
+
+ chunks_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_REGION_CHUNKS);
+ if (!chunks_attr) {
+ err = -EMSGSIZE;
+ goto nla_put_failure;
+ }
+
+ err = devlink_nl_region_read_fill(skb, region_cb, region_cb_priv,
+ start_offset, end_offset, &ret_offset,
+ cb->extack);
+
+ if (err && err != -EMSGSIZE)
+ goto nla_put_failure;
+
+ /* Check if there was any progress done to prevent infinite loop */
+ if (ret_offset == start_offset) {
+ err = -EINVAL;
+ goto nla_put_failure;
+ }
+
+ state->start_offset = ret_offset;
+
+ nla_nest_end(skb, chunks_attr);
+ genlmsg_end(skb, hdr);
+ devl_unlock(devlink);
+ devlink_put(devlink);
+ return skb->len;
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+out_unlock:
+ devl_unlock(devlink);
+ devlink_put(devlink);
+ return err;
+}
+
+/**
+ * devl_region_create - create a new address region
+ *
+ * @devlink: devlink
+ * @ops: region operations and name
+ * @region_max_snapshots: Maximum supported number of snapshots for region
+ * @region_size: size of region
+ */
+struct devlink_region *devl_region_create(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots,
+ u64 region_size)
+{
+ struct devlink_region *region;
+
+ devl_assert_locked(devlink);
+
+ if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
+ return ERR_PTR(-EINVAL);
+
+ if (devlink_region_get_by_name(devlink, ops->name))
+ return ERR_PTR(-EEXIST);
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return ERR_PTR(-ENOMEM);
+
+ region->devlink = devlink;
+ region->max_snapshots = region_max_snapshots;
+ region->ops = ops;
+ region->size = region_size;
+ INIT_LIST_HEAD(&region->snapshot_list);
+ mutex_init(&region->snapshot_lock);
+ list_add_tail(&region->list, &devlink->region_list);
+ devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
+
+ return region;
+}
+EXPORT_SYMBOL_GPL(devl_region_create);
+
+/**
+ * devlink_region_create - create a new address region
+ *
+ * @devlink: devlink
+ * @ops: region operations and name
+ * @region_max_snapshots: Maximum supported number of snapshots for region
+ * @region_size: size of region
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+struct devlink_region *
+devlink_region_create(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size)
+{
+ struct devlink_region *region;
+
+ devl_lock(devlink);
+ region = devl_region_create(devlink, ops, region_max_snapshots,
+ region_size);
+ devl_unlock(devlink);
+ return region;
+}
+EXPORT_SYMBOL_GPL(devlink_region_create);
+
+/**
+ * devlink_port_region_create - create a new address region for a port
+ *
+ * @port: devlink port
+ * @ops: region operations and name
+ * @region_max_snapshots: Maximum supported number of snapshots for region
+ * @region_size: size of region
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+struct devlink_region *
+devlink_port_region_create(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size)
+{
+ struct devlink *devlink = port->devlink;
+ struct devlink_region *region;
+ int err = 0;
+
+ ASSERT_DEVLINK_PORT_INITIALIZED(port);
+
+ if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
+ return ERR_PTR(-EINVAL);
+
+ devl_lock(devlink);
+
+ if (devlink_port_region_get_by_name(port, ops->name)) {
+ err = -EEXIST;
+ goto unlock;
+ }
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ region->devlink = devlink;
+ region->port = port;
+ region->max_snapshots = region_max_snapshots;
+ region->port_ops = ops;
+ region->size = region_size;
+ INIT_LIST_HEAD(&region->snapshot_list);
+ mutex_init(&region->snapshot_lock);
+ list_add_tail(&region->list, &port->region_list);
+ devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
+
+ devl_unlock(devlink);
+ return region;
+
+unlock:
+ devl_unlock(devlink);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(devlink_port_region_create);
+
+/**
+ * devl_region_destroy - destroy address region
+ *
+ * @region: devlink region to destroy
+ */
+void devl_region_destroy(struct devlink_region *region)
+{
+ struct devlink *devlink = region->devlink;
+ struct devlink_snapshot *snapshot, *ts;
+
+ devl_assert_locked(devlink);
+
+ /* Free all snapshots of region */
+ mutex_lock(&region->snapshot_lock);
+ list_for_each_entry_safe(snapshot, ts, &region->snapshot_list, list)
+ devlink_region_snapshot_del(region, snapshot);
+ mutex_unlock(&region->snapshot_lock);
+
+ list_del(&region->list);
+ mutex_destroy(&region->snapshot_lock);
+
+ devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_DEL);
+ kfree(region);
+}
+EXPORT_SYMBOL_GPL(devl_region_destroy);
+
+/**
+ * devlink_region_destroy - destroy address region
+ *
+ * @region: devlink region to destroy
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_region_destroy(struct devlink_region *region)
+{
+ struct devlink *devlink = region->devlink;
+
+ devl_lock(devlink);
+ devl_region_destroy(region);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_region_destroy);
+
+/**
+ * devlink_region_snapshot_id_get - get snapshot ID
+ *
+ * This callback should be called when adding a new snapshot,
+ * Driver should use the same id for multiple snapshots taken
+ * on multiple regions at the same time/by the same trigger.
+ *
+ * The caller of this function must use devlink_region_snapshot_id_put
+ * when finished creating regions using this id.
+ *
+ * Returns zero on success, or a negative error code on failure.
+ *
+ * @devlink: devlink
+ * @id: storage to return id
+ */
+int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id)
+{
+ return __devlink_region_snapshot_id_get(devlink, id);
+}
+EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_get);
+
+/**
+ * devlink_region_snapshot_id_put - put snapshot ID reference
+ *
+ * This should be called by a driver after finishing creating snapshots
+ * with an id. Doing so ensures that the ID can later be released in the
+ * event that all snapshots using it have been destroyed.
+ *
+ * @devlink: devlink
+ * @id: id to release reference on
+ */
+void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id)
+{
+ __devlink_snapshot_id_decrement(devlink, id);
+}
+EXPORT_SYMBOL_GPL(devlink_region_snapshot_id_put);
+
+/**
+ * devlink_region_snapshot_create - create a new snapshot
+ * This will add a new snapshot of a region. The snapshot
+ * will be stored on the region struct and can be accessed
+ * from devlink. This is useful for future analyses of snapshots.
+ * Multiple snapshots can be created on a region.
+ * The @snapshot_id should be obtained using the getter function.
+ *
+ * @region: devlink region of the snapshot
+ * @data: snapshot data
+ * @snapshot_id: snapshot id to be created
+ */
+int devlink_region_snapshot_create(struct devlink_region *region,
+ u8 *data, u32 snapshot_id)
+{
+ int err;
+
+ mutex_lock(&region->snapshot_lock);
+ err = __devlink_region_snapshot_create(region, data, snapshot_id);
+ mutex_unlock(&region->snapshot_lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_region_snapshot_create);
diff --git a/net/devlink/resource.c b/net/devlink/resource.c
new file mode 100644
index 000000000000..c8b615e4c385
--- /dev/null
+++ b/net/devlink/resource.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include "devl_internal.h"
+
+/**
+ * struct devlink_resource - devlink resource
+ * @name: name of the resource
+ * @id: id, per devlink instance
+ * @size: size of the resource
+ * @size_new: updated size of the resource, reload is needed
+ * @size_valid: valid in case the total size of the resource is valid
+ * including its children
+ * @parent: parent resource
+ * @size_params: size parameters
+ * @list: parent list
+ * @resource_list: list of child resources
+ * @occ_get: occupancy getter callback
+ * @occ_get_priv: occupancy getter callback priv
+ */
+struct devlink_resource {
+ const char *name;
+ u64 id;
+ u64 size;
+ u64 size_new;
+ bool size_valid;
+ struct devlink_resource *parent;
+ struct devlink_resource_size_params size_params;
+ struct list_head list;
+ struct list_head resource_list;
+ devlink_resource_occ_get_t *occ_get;
+ void *occ_get_priv;
+};
+
+static struct devlink_resource *
+devlink_resource_find(struct devlink *devlink,
+ struct devlink_resource *resource, u64 resource_id)
+{
+ struct list_head *resource_list;
+
+ if (resource)
+ resource_list = &resource->resource_list;
+ else
+ resource_list = &devlink->resource_list;
+
+ list_for_each_entry(resource, resource_list, list) {
+ struct devlink_resource *child_resource;
+
+ if (resource->id == resource_id)
+ return resource;
+
+ child_resource = devlink_resource_find(devlink, resource,
+ resource_id);
+ if (child_resource)
+ return child_resource;
+ }
+ return NULL;
+}
+
+static void
+devlink_resource_validate_children(struct devlink_resource *resource)
+{
+ struct devlink_resource *child_resource;
+ bool size_valid = true;
+ u64 parts_size = 0;
+
+ if (list_empty(&resource->resource_list))
+ goto out;
+
+ list_for_each_entry(child_resource, &resource->resource_list, list)
+ parts_size += child_resource->size_new;
+
+ if (parts_size > resource->size_new)
+ size_valid = false;
+out:
+ resource->size_valid = size_valid;
+}
+
+static int
+devlink_resource_validate_size(struct devlink_resource *resource, u64 size,
+ struct netlink_ext_ack *extack)
+{
+ u64 reminder;
+ int err = 0;
+
+ if (size > resource->size_params.size_max) {
+ NL_SET_ERR_MSG(extack, "Size larger than maximum");
+ err = -EINVAL;
+ }
+
+ if (size < resource->size_params.size_min) {
+ NL_SET_ERR_MSG(extack, "Size smaller than minimum");
+ err = -EINVAL;
+ }
+
+ div64_u64_rem(size, resource->size_params.size_granularity, &reminder);
+ if (reminder) {
+ NL_SET_ERR_MSG(extack, "Wrong granularity");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+int devlink_nl_cmd_resource_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_resource *resource;
+ u64 resource_id;
+ u64 size;
+ int err;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_RESOURCE_ID) ||
+ GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_RESOURCE_SIZE))
+ return -EINVAL;
+ resource_id = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_ID]);
+
+ resource = devlink_resource_find(devlink, NULL, resource_id);
+ if (!resource)
+ return -EINVAL;
+
+ size = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_SIZE]);
+ err = devlink_resource_validate_size(resource, size, info->extack);
+ if (err)
+ return err;
+
+ resource->size_new = size;
+ devlink_resource_validate_children(resource);
+ if (resource->parent)
+ devlink_resource_validate_children(resource->parent);
+ return 0;
+}
+
+static int
+devlink_resource_size_params_put(struct devlink_resource *resource,
+ struct sk_buff *skb)
+{
+ struct devlink_resource_size_params *size_params;
+
+ size_params = &resource->size_params;
+ if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
+ size_params->size_granularity, DEVLINK_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
+ size_params->size_max, DEVLINK_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
+ size_params->size_min, DEVLINK_ATTR_PAD) ||
+ nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static int devlink_resource_occ_put(struct devlink_resource *resource,
+ struct sk_buff *skb)
+{
+ if (!resource->occ_get)
+ return 0;
+ return nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
+ resource->occ_get(resource->occ_get_priv),
+ DEVLINK_ATTR_PAD);
+}
+
+static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
+ struct devlink_resource *resource)
+{
+ struct devlink_resource *child_resource;
+ struct nlattr *child_resource_attr;
+ struct nlattr *resource_attr;
+
+ resource_attr = nla_nest_start_noflag(skb, DEVLINK_ATTR_RESOURCE);
+ if (!resource_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, DEVLINK_ATTR_RESOURCE_NAME, resource->name) ||
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE, resource->size,
+ DEVLINK_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_ID, resource->id,
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+ if (resource->size != resource->size_new &&
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
+ resource->size_new, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+ if (devlink_resource_occ_put(resource, skb))
+ goto nla_put_failure;
+ if (devlink_resource_size_params_put(resource, skb))
+ goto nla_put_failure;
+ if (list_empty(&resource->resource_list))
+ goto out;
+
+ if (nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_SIZE_VALID,
+ resource->size_valid))
+ goto nla_put_failure;
+
+ child_resource_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_RESOURCE_LIST);
+ if (!child_resource_attr)
+ goto nla_put_failure;
+
+ list_for_each_entry(child_resource, &resource->resource_list, list) {
+ if (devlink_resource_put(devlink, skb, child_resource))
+ goto resource_put_failure;
+ }
+
+ nla_nest_end(skb, child_resource_attr);
+out:
+ nla_nest_end(skb, resource_attr);
+ return 0;
+
+resource_put_failure:
+ nla_nest_cancel(skb, child_resource_attr);
+nla_put_failure:
+ nla_nest_cancel(skb, resource_attr);
+ return -EMSGSIZE;
+}
+
+static int devlink_resource_fill(struct genl_info *info,
+ enum devlink_command cmd, int flags)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_resource *resource;
+ struct nlattr *resources_attr;
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ bool incomplete;
+ void *hdr;
+ int i;
+ int err;
+
+ resource = list_first_entry(&devlink->resource_list,
+ struct devlink_resource, list);
+start_again:
+ err = devlink_nl_msg_reply_and_new(&skb, info);
+ if (err)
+ return err;
+
+ hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
+ &devlink_nl_family, NLM_F_MULTI, cmd);
+ if (!hdr) {
+ nlmsg_free(skb);
+ return -EMSGSIZE;
+ }
+
+ if (devlink_nl_put_handle(skb, devlink))
+ goto nla_put_failure;
+
+ resources_attr = nla_nest_start_noflag(skb,
+ DEVLINK_ATTR_RESOURCE_LIST);
+ if (!resources_attr)
+ goto nla_put_failure;
+
+ incomplete = false;
+ i = 0;
+ list_for_each_entry_from(resource, &devlink->resource_list, list) {
+ err = devlink_resource_put(devlink, skb, resource);
+ if (err) {
+ if (!i)
+ goto err_resource_put;
+ incomplete = true;
+ break;
+ }
+ i++;
+ }
+ nla_nest_end(skb, resources_attr);
+ genlmsg_end(skb, hdr);
+ if (incomplete)
+ goto start_again;
+send_done:
+ nlh = nlmsg_put(skb, info->snd_portid, info->snd_seq,
+ NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = devlink_nl_msg_reply_and_new(&skb, info);
+ if (err)
+ return err;
+ goto send_done;
+ }
+ return genlmsg_reply(skb, info);
+
+nla_put_failure:
+ err = -EMSGSIZE;
+err_resource_put:
+ nlmsg_free(skb);
+ return err;
+}
+
+int devlink_nl_cmd_resource_dump(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+
+ if (list_empty(&devlink->resource_list))
+ return -EOPNOTSUPP;
+
+ return devlink_resource_fill(info, DEVLINK_CMD_RESOURCE_DUMP, 0);
+}
+
+int devlink_resources_validate(struct devlink *devlink,
+ struct devlink_resource *resource,
+ struct genl_info *info)
+{
+ struct list_head *resource_list;
+ int err = 0;
+
+ if (resource)
+ resource_list = &resource->resource_list;
+ else
+ resource_list = &devlink->resource_list;
+
+ list_for_each_entry(resource, resource_list, list) {
+ if (!resource->size_valid)
+ return -EINVAL;
+ err = devlink_resources_validate(devlink, resource, info);
+ if (err)
+ return err;
+ }
+ return err;
+}
+
+/**
+ * devl_resource_register - devlink resource register
+ *
+ * @devlink: devlink
+ * @resource_name: resource's name
+ * @resource_size: resource's size
+ * @resource_id: resource's id
+ * @parent_resource_id: resource's parent id
+ * @size_params: size parameters
+ *
+ * Generic resources should reuse the same names across drivers.
+ * Please see the generic resources list at:
+ * Documentation/networking/devlink/devlink-resource.rst
+ */
+int devl_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params)
+{
+ struct devlink_resource *resource;
+ struct list_head *resource_list;
+ bool top_hierarchy;
+
+ lockdep_assert_held(&devlink->lock);
+
+ top_hierarchy = parent_resource_id == DEVLINK_RESOURCE_ID_PARENT_TOP;
+
+ resource = devlink_resource_find(devlink, NULL, resource_id);
+ if (resource)
+ return -EINVAL;
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
+ return -ENOMEM;
+
+ if (top_hierarchy) {
+ resource_list = &devlink->resource_list;
+ } else {
+ struct devlink_resource *parent_resource;
+
+ parent_resource = devlink_resource_find(devlink, NULL,
+ parent_resource_id);
+ if (parent_resource) {
+ resource_list = &parent_resource->resource_list;
+ resource->parent = parent_resource;
+ } else {
+ kfree(resource);
+ return -EINVAL;
+ }
+ }
+
+ resource->name = resource_name;
+ resource->size = resource_size;
+ resource->size_new = resource_size;
+ resource->id = resource_id;
+ resource->size_valid = true;
+ memcpy(&resource->size_params, size_params,
+ sizeof(resource->size_params));
+ INIT_LIST_HEAD(&resource->resource_list);
+ list_add_tail(&resource->list, resource_list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devl_resource_register);
+
+/**
+ * devlink_resource_register - devlink resource register
+ *
+ * @devlink: devlink
+ * @resource_name: resource's name
+ * @resource_size: resource's size
+ * @resource_id: resource's id
+ * @parent_resource_id: resource's parent id
+ * @size_params: size parameters
+ *
+ * Generic resources should reuse the same names across drivers.
+ * Please see the generic resources list at:
+ * Documentation/networking/devlink/devlink-resource.rst
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+int devlink_resource_register(struct devlink *devlink,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params)
+{
+ int err;
+
+ devl_lock(devlink);
+ err = devl_resource_register(devlink, resource_name, resource_size,
+ resource_id, parent_resource_id, size_params);
+ devl_unlock(devlink);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_resource_register);
+
+static void devlink_resource_unregister(struct devlink *devlink,
+ struct devlink_resource *resource)
+{
+ struct devlink_resource *tmp, *child_resource;
+
+ list_for_each_entry_safe(child_resource, tmp, &resource->resource_list,
+ list) {
+ devlink_resource_unregister(devlink, child_resource);
+ list_del(&child_resource->list);
+ kfree(child_resource);
+ }
+}
+
+/**
+ * devl_resources_unregister - free all resources
+ *
+ * @devlink: devlink
+ */
+void devl_resources_unregister(struct devlink *devlink)
+{
+ struct devlink_resource *tmp, *child_resource;
+
+ lockdep_assert_held(&devlink->lock);
+
+ list_for_each_entry_safe(child_resource, tmp, &devlink->resource_list,
+ list) {
+ devlink_resource_unregister(devlink, child_resource);
+ list_del(&child_resource->list);
+ kfree(child_resource);
+ }
+}
+EXPORT_SYMBOL_GPL(devl_resources_unregister);
+
+/**
+ * devlink_resources_unregister - free all resources
+ *
+ * @devlink: devlink
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_resources_unregister(struct devlink *devlink)
+{
+ devl_lock(devlink);
+ devl_resources_unregister(devlink);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_resources_unregister);
+
+/**
+ * devl_resource_size_get - get and update size
+ *
+ * @devlink: devlink
+ * @resource_id: the requested resource id
+ * @p_resource_size: ptr to update
+ */
+int devl_resource_size_get(struct devlink *devlink,
+ u64 resource_id,
+ u64 *p_resource_size)
+{
+ struct devlink_resource *resource;
+
+ lockdep_assert_held(&devlink->lock);
+
+ resource = devlink_resource_find(devlink, NULL, resource_id);
+ if (!resource)
+ return -EINVAL;
+ *p_resource_size = resource->size_new;
+ resource->size = resource->size_new;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devl_resource_size_get);
+
+/**
+ * devl_resource_occ_get_register - register occupancy getter
+ *
+ * @devlink: devlink
+ * @resource_id: resource id
+ * @occ_get: occupancy getter callback
+ * @occ_get_priv: occupancy getter callback priv
+ */
+void devl_resource_occ_get_register(struct devlink *devlink,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv)
+{
+ struct devlink_resource *resource;
+
+ lockdep_assert_held(&devlink->lock);
+
+ resource = devlink_resource_find(devlink, NULL, resource_id);
+ if (WARN_ON(!resource))
+ return;
+ WARN_ON(resource->occ_get);
+
+ resource->occ_get = occ_get;
+ resource->occ_get_priv = occ_get_priv;
+}
+EXPORT_SYMBOL_GPL(devl_resource_occ_get_register);
+
+/**
+ * devlink_resource_occ_get_register - register occupancy getter
+ *
+ * @devlink: devlink
+ * @resource_id: resource id
+ * @occ_get: occupancy getter callback
+ * @occ_get_priv: occupancy getter callback priv
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_resource_occ_get_register(struct devlink *devlink,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv)
+{
+ devl_lock(devlink);
+ devl_resource_occ_get_register(devlink, resource_id,
+ occ_get, occ_get_priv);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_resource_occ_get_register);
+
+/**
+ * devl_resource_occ_get_unregister - unregister occupancy getter
+ *
+ * @devlink: devlink
+ * @resource_id: resource id
+ */
+void devl_resource_occ_get_unregister(struct devlink *devlink,
+ u64 resource_id)
+{
+ struct devlink_resource *resource;
+
+ lockdep_assert_held(&devlink->lock);
+
+ resource = devlink_resource_find(devlink, NULL, resource_id);
+ if (WARN_ON(!resource))
+ return;
+ WARN_ON(!resource->occ_get);
+
+ resource->occ_get = NULL;
+ resource->occ_get_priv = NULL;
+}
+EXPORT_SYMBOL_GPL(devl_resource_occ_get_unregister);
+
+/**
+ * devlink_resource_occ_get_unregister - unregister occupancy getter
+ *
+ * @devlink: devlink
+ * @resource_id: resource id
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_resource_occ_get_unregister(struct devlink *devlink,
+ u64 resource_id)
+{
+ devl_lock(devlink);
+ devl_resource_occ_get_unregister(devlink, resource_id);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
diff --git a/net/devlink/sb.c b/net/devlink/sb.c
new file mode 100644
index 000000000000..bd677fff5ec8
--- /dev/null
+++ b/net/devlink/sb.c
@@ -0,0 +1,996 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include "devl_internal.h"
+
+struct devlink_sb {
+ struct list_head list;
+ unsigned int index;
+ u32 size;
+ u16 ingress_pools_count;
+ u16 egress_pools_count;
+ u16 ingress_tc_count;
+ u16 egress_tc_count;
+};
+
+static u16 devlink_sb_pool_count(struct devlink_sb *devlink_sb)
+{
+ return devlink_sb->ingress_pools_count + devlink_sb->egress_pools_count;
+}
+
+static struct devlink_sb *devlink_sb_get_by_index(struct devlink *devlink,
+ unsigned int sb_index)
+{
+ struct devlink_sb *devlink_sb;
+
+ list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+ if (devlink_sb->index == sb_index)
+ return devlink_sb;
+ }
+ return NULL;
+}
+
+static bool devlink_sb_index_exists(struct devlink *devlink,
+ unsigned int sb_index)
+{
+ return devlink_sb_get_by_index(devlink, sb_index);
+}
+
+static struct devlink_sb *devlink_sb_get_from_attrs(struct devlink *devlink,
+ struct nlattr **attrs)
+{
+ if (attrs[DEVLINK_ATTR_SB_INDEX]) {
+ u32 sb_index = nla_get_u32(attrs[DEVLINK_ATTR_SB_INDEX]);
+ struct devlink_sb *devlink_sb;
+
+ devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
+ if (!devlink_sb)
+ return ERR_PTR(-ENODEV);
+ return devlink_sb;
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static struct devlink_sb *devlink_sb_get_from_info(struct devlink *devlink,
+ struct genl_info *info)
+{
+ return devlink_sb_get_from_attrs(devlink, info->attrs);
+}
+
+static int devlink_sb_pool_index_get_from_attrs(struct devlink_sb *devlink_sb,
+ struct nlattr **attrs,
+ u16 *p_pool_index)
+{
+ u16 val;
+
+ if (!attrs[DEVLINK_ATTR_SB_POOL_INDEX])
+ return -EINVAL;
+
+ val = nla_get_u16(attrs[DEVLINK_ATTR_SB_POOL_INDEX]);
+ if (val >= devlink_sb_pool_count(devlink_sb))
+ return -EINVAL;
+ *p_pool_index = val;
+ return 0;
+}
+
+static int devlink_sb_pool_index_get_from_info(struct devlink_sb *devlink_sb,
+ struct genl_info *info,
+ u16 *p_pool_index)
+{
+ return devlink_sb_pool_index_get_from_attrs(devlink_sb, info->attrs,
+ p_pool_index);
+}
+
+static int
+devlink_sb_pool_type_get_from_attrs(struct nlattr **attrs,
+ enum devlink_sb_pool_type *p_pool_type)
+{
+ u8 val;
+
+ if (!attrs[DEVLINK_ATTR_SB_POOL_TYPE])
+ return -EINVAL;
+
+ val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_TYPE]);
+ if (val != DEVLINK_SB_POOL_TYPE_INGRESS &&
+ val != DEVLINK_SB_POOL_TYPE_EGRESS)
+ return -EINVAL;
+ *p_pool_type = val;
+ return 0;
+}
+
+static int
+devlink_sb_pool_type_get_from_info(struct genl_info *info,
+ enum devlink_sb_pool_type *p_pool_type)
+{
+ return devlink_sb_pool_type_get_from_attrs(info->attrs, p_pool_type);
+}
+
+static int
+devlink_sb_th_type_get_from_attrs(struct nlattr **attrs,
+ enum devlink_sb_threshold_type *p_th_type)
+{
+ u8 val;
+
+ if (!attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE])
+ return -EINVAL;
+
+ val = nla_get_u8(attrs[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE]);
+ if (val != DEVLINK_SB_THRESHOLD_TYPE_STATIC &&
+ val != DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC)
+ return -EINVAL;
+ *p_th_type = val;
+ return 0;
+}
+
+static int
+devlink_sb_th_type_get_from_info(struct genl_info *info,
+ enum devlink_sb_threshold_type *p_th_type)
+{
+ return devlink_sb_th_type_get_from_attrs(info->attrs, p_th_type);
+}
+
+static int
+devlink_sb_tc_index_get_from_attrs(struct devlink_sb *devlink_sb,
+ struct nlattr **attrs,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_tc_index)
+{
+ u16 val;
+
+ if (!attrs[DEVLINK_ATTR_SB_TC_INDEX])
+ return -EINVAL;
+
+ val = nla_get_u16(attrs[DEVLINK_ATTR_SB_TC_INDEX]);
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS &&
+ val >= devlink_sb->ingress_tc_count)
+ return -EINVAL;
+ if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS &&
+ val >= devlink_sb->egress_tc_count)
+ return -EINVAL;
+ *p_tc_index = val;
+ return 0;
+}
+
+static int
+devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
+ struct genl_info *info,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_tc_index)
+{
+ return devlink_sb_tc_index_get_from_attrs(devlink_sb, info->attrs,
+ pool_type, p_tc_index);
+}
+
+static int devlink_nl_sb_fill(struct sk_buff *msg, struct devlink *devlink,
+ struct devlink_sb *devlink_sb,
+ enum devlink_command cmd, u32 portid,
+ u32 seq, int flags)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_SIZE, devlink_sb->size))
+ goto nla_put_failure;
+ if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_POOL_COUNT,
+ devlink_sb->ingress_pools_count))
+ goto nla_put_failure;
+ if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_POOL_COUNT,
+ devlink_sb->egress_pools_count))
+ goto nla_put_failure;
+ if (nla_put_u16(msg, DEVLINK_ATTR_SB_INGRESS_TC_COUNT,
+ devlink_sb->ingress_tc_count))
+ goto nla_put_failure;
+ if (nla_put_u16(msg, DEVLINK_ATTR_SB_EGRESS_TC_COUNT,
+ devlink_sb->egress_tc_count))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+int devlink_nl_sb_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_sb *devlink_sb;
+ struct sk_buff *msg;
+ int err;
+
+ devlink_sb = devlink_sb_get_from_info(devlink, info);
+ if (IS_ERR(devlink_sb))
+ return PTR_ERR(devlink_sb);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
+ DEVLINK_CMD_SB_NEW,
+ info->snd_portid, info->snd_seq, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int
+devlink_nl_sb_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb, int flags)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_sb *devlink_sb;
+ int idx = 0;
+ int err = 0;
+
+ list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+ if (idx < state->idx) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_sb_fill(msg, devlink, devlink_sb,
+ DEVLINK_CMD_SB_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags);
+ if (err) {
+ state->idx = idx;
+ break;
+ }
+ idx++;
+ }
+
+ return err;
+}
+
+int devlink_nl_sb_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_sb_get_dump_one);
+}
+
+static int devlink_nl_sb_pool_fill(struct sk_buff *msg, struct devlink *devlink,
+ struct devlink_sb *devlink_sb,
+ u16 pool_index, enum devlink_command cmd,
+ u32 portid, u32 seq, int flags)
+{
+ struct devlink_sb_pool_info pool_info;
+ void *hdr;
+ int err;
+
+ err = devlink->ops->sb_pool_get(devlink, devlink_sb->index,
+ pool_index, &pool_info);
+ if (err)
+ return err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+ goto nla_put_failure;
+ if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
+ goto nla_put_failure;
+ if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_info.pool_type))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_SIZE, pool_info.size))
+ goto nla_put_failure;
+ if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE,
+ pool_info.threshold_type))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_POOL_CELL_SIZE,
+ pool_info.cell_size))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+int devlink_nl_sb_pool_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_sb *devlink_sb;
+ struct sk_buff *msg;
+ u16 pool_index;
+ int err;
+
+ devlink_sb = devlink_sb_get_from_info(devlink, info);
+ if (IS_ERR(devlink_sb))
+ return PTR_ERR(devlink_sb);
+
+ err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+ &pool_index);
+ if (err)
+ return err;
+
+ if (!devlink->ops->sb_pool_get)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_sb_pool_fill(msg, devlink, devlink_sb, pool_index,
+ DEVLINK_CMD_SB_POOL_NEW,
+ info->snd_portid, info->snd_seq, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int __sb_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
+ struct devlink *devlink,
+ struct devlink_sb *devlink_sb,
+ u32 portid, u32 seq, int flags)
+{
+ u16 pool_count = devlink_sb_pool_count(devlink_sb);
+ u16 pool_index;
+ int err;
+
+ for (pool_index = 0; pool_index < pool_count; pool_index++) {
+ if (*p_idx < start) {
+ (*p_idx)++;
+ continue;
+ }
+ err = devlink_nl_sb_pool_fill(msg, devlink,
+ devlink_sb,
+ pool_index,
+ DEVLINK_CMD_SB_POOL_NEW,
+ portid, seq, flags);
+ if (err)
+ return err;
+ (*p_idx)++;
+ }
+ return 0;
+}
+
+static int
+devlink_nl_sb_pool_get_dump_one(struct sk_buff *msg, struct devlink *devlink,
+ struct netlink_callback *cb, int flags)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_sb *devlink_sb;
+ int err = 0;
+ int idx = 0;
+
+ if (!devlink->ops->sb_pool_get)
+ return 0;
+
+ list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+ err = __sb_pool_get_dumpit(msg, state->idx, &idx,
+ devlink, devlink_sb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags);
+ if (err == -EOPNOTSUPP) {
+ err = 0;
+ } else if (err) {
+ state->idx = idx;
+ break;
+ }
+ }
+
+ return err;
+}
+
+int devlink_nl_sb_pool_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_sb_pool_get_dump_one);
+}
+
+static int devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+
+{
+ const struct devlink_ops *ops = devlink->ops;
+
+ if (ops->sb_pool_set)
+ return ops->sb_pool_set(devlink, sb_index, pool_index,
+ size, threshold_type, extack);
+ return -EOPNOTSUPP;
+}
+
+int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ enum devlink_sb_threshold_type threshold_type;
+ struct devlink_sb *devlink_sb;
+ u16 pool_index;
+ u32 size;
+ int err;
+
+ devlink_sb = devlink_sb_get_from_info(devlink, info);
+ if (IS_ERR(devlink_sb))
+ return PTR_ERR(devlink_sb);
+
+ err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+ &pool_index);
+ if (err)
+ return err;
+
+ err = devlink_sb_th_type_get_from_info(info, &threshold_type);
+ if (err)
+ return err;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_POOL_SIZE))
+ return -EINVAL;
+
+ size = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_POOL_SIZE]);
+ return devlink_sb_pool_set(devlink, devlink_sb->index,
+ pool_index, size, threshold_type,
+ info->extack);
+}
+
+static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ struct devlink_sb *devlink_sb,
+ u16 pool_index,
+ enum devlink_command cmd,
+ u32 portid, u32 seq, int flags)
+{
+ const struct devlink_ops *ops = devlink->ops;
+ u32 threshold;
+ void *hdr;
+ int err;
+
+ err = ops->sb_port_pool_get(devlink_port, devlink_sb->index,
+ pool_index, &threshold);
+ if (err)
+ return err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+ goto nla_put_failure;
+ if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
+ goto nla_put_failure;
+
+ if (ops->sb_occ_port_pool_get) {
+ u32 cur;
+ u32 max;
+
+ err = ops->sb_occ_port_pool_get(devlink_port, devlink_sb->index,
+ pool_index, &cur, &max);
+ if (err && err != -EOPNOTSUPP)
+ goto sb_occ_get_failure;
+ if (!err) {
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
+ goto nla_put_failure;
+ }
+ }
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ err = -EMSGSIZE;
+sb_occ_get_failure:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+int devlink_nl_sb_port_pool_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[1];
+ struct devlink *devlink = devlink_port->devlink;
+ struct devlink_sb *devlink_sb;
+ struct sk_buff *msg;
+ u16 pool_index;
+ int err;
+
+ devlink_sb = devlink_sb_get_from_info(devlink, info);
+ if (IS_ERR(devlink_sb))
+ return PTR_ERR(devlink_sb);
+
+ err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+ &pool_index);
+ if (err)
+ return err;
+
+ if (!devlink->ops->sb_port_pool_get)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_sb_port_pool_fill(msg, devlink, devlink_port,
+ devlink_sb, pool_index,
+ DEVLINK_CMD_SB_PORT_POOL_NEW,
+ info->snd_portid, info->snd_seq, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int __sb_port_pool_get_dumpit(struct sk_buff *msg, int start, int *p_idx,
+ struct devlink *devlink,
+ struct devlink_sb *devlink_sb,
+ u32 portid, u32 seq, int flags)
+{
+ struct devlink_port *devlink_port;
+ u16 pool_count = devlink_sb_pool_count(devlink_sb);
+ unsigned long port_index;
+ u16 pool_index;
+ int err;
+
+ xa_for_each(&devlink->ports, port_index, devlink_port) {
+ for (pool_index = 0; pool_index < pool_count; pool_index++) {
+ if (*p_idx < start) {
+ (*p_idx)++;
+ continue;
+ }
+ err = devlink_nl_sb_port_pool_fill(msg, devlink,
+ devlink_port,
+ devlink_sb,
+ pool_index,
+ DEVLINK_CMD_SB_PORT_POOL_NEW,
+ portid, seq, flags);
+ if (err)
+ return err;
+ (*p_idx)++;
+ }
+ }
+ return 0;
+}
+
+static int
+devlink_nl_sb_port_pool_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb, int flags)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_sb *devlink_sb;
+ int idx = 0;
+ int err = 0;
+
+ if (!devlink->ops->sb_port_pool_get)
+ return 0;
+
+ list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+ err = __sb_port_pool_get_dumpit(msg, state->idx, &idx,
+ devlink, devlink_sb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags);
+ if (err == -EOPNOTSUPP) {
+ err = 0;
+ } else if (err) {
+ state->idx = idx;
+ break;
+ }
+ }
+
+ return err;
+}
+
+int devlink_nl_sb_port_pool_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_sb_port_pool_get_dump_one);
+}
+
+static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold,
+ struct netlink_ext_ack *extack)
+
+{
+ const struct devlink_ops *ops = devlink_port->devlink->ops;
+
+ if (ops->sb_port_pool_set)
+ return ops->sb_port_pool_set(devlink_port, sb_index,
+ pool_index, threshold, extack);
+ return -EOPNOTSUPP;
+}
+
+int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[1];
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_sb *devlink_sb;
+ u16 pool_index;
+ u32 threshold;
+ int err;
+
+ devlink_sb = devlink_sb_get_from_info(devlink, info);
+ if (IS_ERR(devlink_sb))
+ return PTR_ERR(devlink_sb);
+
+ err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+ &pool_index);
+ if (err)
+ return err;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD))
+ return -EINVAL;
+
+ threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
+ return devlink_sb_port_pool_set(devlink_port, devlink_sb->index,
+ pool_index, threshold, info->extack);
+}
+
+static int
+devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink,
+ struct devlink_port *devlink_port,
+ struct devlink_sb *devlink_sb, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ enum devlink_command cmd,
+ u32 portid, u32 seq, int flags)
+{
+ const struct devlink_ops *ops = devlink->ops;
+ u16 pool_index;
+ u32 threshold;
+ void *hdr;
+ int err;
+
+ err = ops->sb_tc_pool_bind_get(devlink_port, devlink_sb->index,
+ tc_index, pool_type,
+ &pool_index, &threshold);
+ if (err)
+ return err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_INDEX, devlink_sb->index))
+ goto nla_put_failure;
+ if (nla_put_u16(msg, DEVLINK_ATTR_SB_TC_INDEX, tc_index))
+ goto nla_put_failure;
+ if (nla_put_u8(msg, DEVLINK_ATTR_SB_POOL_TYPE, pool_type))
+ goto nla_put_failure;
+ if (nla_put_u16(msg, DEVLINK_ATTR_SB_POOL_INDEX, pool_index))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_THRESHOLD, threshold))
+ goto nla_put_failure;
+
+ if (ops->sb_occ_tc_port_bind_get) {
+ u32 cur;
+ u32 max;
+
+ err = ops->sb_occ_tc_port_bind_get(devlink_port,
+ devlink_sb->index,
+ tc_index, pool_type,
+ &cur, &max);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ if (!err) {
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_CUR, cur))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, DEVLINK_ATTR_SB_OCC_MAX, max))
+ goto nla_put_failure;
+ }
+ }
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+int devlink_nl_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[1];
+ struct devlink *devlink = devlink_port->devlink;
+ struct devlink_sb *devlink_sb;
+ struct sk_buff *msg;
+ enum devlink_sb_pool_type pool_type;
+ u16 tc_index;
+ int err;
+
+ devlink_sb = devlink_sb_get_from_info(devlink, info);
+ if (IS_ERR(devlink_sb))
+ return PTR_ERR(devlink_sb);
+
+ err = devlink_sb_pool_type_get_from_info(info, &pool_type);
+ if (err)
+ return err;
+
+ err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
+ pool_type, &tc_index);
+ if (err)
+ return err;
+
+ if (!devlink->ops->sb_tc_pool_bind_get)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink, devlink_port,
+ devlink_sb, tc_index, pool_type,
+ DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+ info->snd_portid,
+ info->snd_seq, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ return genlmsg_reply(msg, info);
+}
+
+static int __sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
+ int start, int *p_idx,
+ struct devlink *devlink,
+ struct devlink_sb *devlink_sb,
+ u32 portid, u32 seq, int flags)
+{
+ struct devlink_port *devlink_port;
+ unsigned long port_index;
+ u16 tc_index;
+ int err;
+
+ xa_for_each(&devlink->ports, port_index, devlink_port) {
+ for (tc_index = 0;
+ tc_index < devlink_sb->ingress_tc_count; tc_index++) {
+ if (*p_idx < start) {
+ (*p_idx)++;
+ continue;
+ }
+ err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
+ devlink_port,
+ devlink_sb,
+ tc_index,
+ DEVLINK_SB_POOL_TYPE_INGRESS,
+ DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+ portid, seq,
+ flags);
+ if (err)
+ return err;
+ (*p_idx)++;
+ }
+ for (tc_index = 0;
+ tc_index < devlink_sb->egress_tc_count; tc_index++) {
+ if (*p_idx < start) {
+ (*p_idx)++;
+ continue;
+ }
+ err = devlink_nl_sb_tc_pool_bind_fill(msg, devlink,
+ devlink_port,
+ devlink_sb,
+ tc_index,
+ DEVLINK_SB_POOL_TYPE_EGRESS,
+ DEVLINK_CMD_SB_TC_POOL_BIND_NEW,
+ portid, seq,
+ flags);
+ if (err)
+ return err;
+ (*p_idx)++;
+ }
+ }
+ return 0;
+}
+
+static int devlink_nl_sb_tc_pool_bind_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_sb *devlink_sb;
+ int idx = 0;
+ int err = 0;
+
+ if (!devlink->ops->sb_tc_pool_bind_get)
+ return 0;
+
+ list_for_each_entry(devlink_sb, &devlink->sb_list, list) {
+ err = __sb_tc_pool_bind_get_dumpit(msg, state->idx, &idx,
+ devlink, devlink_sb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags);
+ if (err == -EOPNOTSUPP) {
+ err = 0;
+ } else if (err) {
+ state->idx = idx;
+ break;
+ }
+ }
+
+ return err;
+}
+
+int devlink_nl_sb_tc_pool_bind_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb,
+ devlink_nl_sb_tc_pool_bind_get_dump_one);
+}
+
+static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+
+{
+ const struct devlink_ops *ops = devlink_port->devlink->ops;
+
+ if (ops->sb_tc_pool_bind_set)
+ return ops->sb_tc_pool_bind_set(devlink_port, sb_index,
+ tc_index, pool_type,
+ pool_index, threshold, extack);
+ return -EOPNOTSUPP;
+}
+
+int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_port *devlink_port = info->user_ptr[1];
+ struct devlink *devlink = info->user_ptr[0];
+ enum devlink_sb_pool_type pool_type;
+ struct devlink_sb *devlink_sb;
+ u16 tc_index;
+ u16 pool_index;
+ u32 threshold;
+ int err;
+
+ devlink_sb = devlink_sb_get_from_info(devlink, info);
+ if (IS_ERR(devlink_sb))
+ return PTR_ERR(devlink_sb);
+
+ err = devlink_sb_pool_type_get_from_info(info, &pool_type);
+ if (err)
+ return err;
+
+ err = devlink_sb_tc_index_get_from_info(devlink_sb, info,
+ pool_type, &tc_index);
+ if (err)
+ return err;
+
+ err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
+ &pool_index);
+ if (err)
+ return err;
+
+ if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_SB_THRESHOLD))
+ return -EINVAL;
+
+ threshold = nla_get_u32(info->attrs[DEVLINK_ATTR_SB_THRESHOLD]);
+ return devlink_sb_tc_pool_bind_set(devlink_port, devlink_sb->index,
+ tc_index, pool_type,
+ pool_index, threshold, info->extack);
+}
+
+int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ const struct devlink_ops *ops = devlink->ops;
+ struct devlink_sb *devlink_sb;
+
+ devlink_sb = devlink_sb_get_from_info(devlink, info);
+ if (IS_ERR(devlink_sb))
+ return PTR_ERR(devlink_sb);
+
+ if (ops->sb_occ_snapshot)
+ return ops->sb_occ_snapshot(devlink, devlink_sb->index);
+ return -EOPNOTSUPP;
+}
+
+int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ const struct devlink_ops *ops = devlink->ops;
+ struct devlink_sb *devlink_sb;
+
+ devlink_sb = devlink_sb_get_from_info(devlink, info);
+ if (IS_ERR(devlink_sb))
+ return PTR_ERR(devlink_sb);
+
+ if (ops->sb_occ_max_clear)
+ return ops->sb_occ_max_clear(devlink, devlink_sb->index);
+ return -EOPNOTSUPP;
+}
+
+int devl_sb_register(struct devlink *devlink, unsigned int sb_index,
+ u32 size, u16 ingress_pools_count,
+ u16 egress_pools_count, u16 ingress_tc_count,
+ u16 egress_tc_count)
+{
+ struct devlink_sb *devlink_sb;
+
+ lockdep_assert_held(&devlink->lock);
+
+ if (devlink_sb_index_exists(devlink, sb_index))
+ return -EEXIST;
+
+ devlink_sb = kzalloc(sizeof(*devlink_sb), GFP_KERNEL);
+ if (!devlink_sb)
+ return -ENOMEM;
+ devlink_sb->index = sb_index;
+ devlink_sb->size = size;
+ devlink_sb->ingress_pools_count = ingress_pools_count;
+ devlink_sb->egress_pools_count = egress_pools_count;
+ devlink_sb->ingress_tc_count = ingress_tc_count;
+ devlink_sb->egress_tc_count = egress_tc_count;
+ list_add_tail(&devlink_sb->list, &devlink->sb_list);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devl_sb_register);
+
+int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
+ u32 size, u16 ingress_pools_count,
+ u16 egress_pools_count, u16 ingress_tc_count,
+ u16 egress_tc_count)
+{
+ int err;
+
+ devl_lock(devlink);
+ err = devl_sb_register(devlink, sb_index, size, ingress_pools_count,
+ egress_pools_count, ingress_tc_count,
+ egress_tc_count);
+ devl_unlock(devlink);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_sb_register);
+
+void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index)
+{
+ struct devlink_sb *devlink_sb;
+
+ lockdep_assert_held(&devlink->lock);
+
+ devlink_sb = devlink_sb_get_by_index(devlink, sb_index);
+ WARN_ON(!devlink_sb);
+ list_del(&devlink_sb->list);
+ kfree(devlink_sb);
+}
+EXPORT_SYMBOL_GPL(devl_sb_unregister);
+
+void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index)
+{
+ devl_lock(devlink);
+ devl_sb_unregister(devlink, sb_index);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_sb_unregister);
diff --git a/net/devlink/trap.c b/net/devlink/trap.c
new file mode 100644
index 000000000000..c26bf9b29bca
--- /dev/null
+++ b/net/devlink/trap.c
@@ -0,0 +1,1861 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ */
+
+#include <trace/events/devlink.h>
+
+#include "devl_internal.h"
+
+struct devlink_stats {
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_packets;
+ struct u64_stats_sync syncp;
+};
+
+/**
+ * struct devlink_trap_policer_item - Packet trap policer attributes.
+ * @policer: Immutable packet trap policer attributes.
+ * @rate: Rate in packets / sec.
+ * @burst: Burst size in packets.
+ * @list: trap_policer_list member.
+ *
+ * Describes packet trap policer attributes. Created by devlink during trap
+ * policer registration.
+ */
+struct devlink_trap_policer_item {
+ const struct devlink_trap_policer *policer;
+ u64 rate;
+ u64 burst;
+ struct list_head list;
+};
+
+/**
+ * struct devlink_trap_group_item - Packet trap group attributes.
+ * @group: Immutable packet trap group attributes.
+ * @policer_item: Associated policer item. Can be NULL.
+ * @list: trap_group_list member.
+ * @stats: Trap group statistics.
+ *
+ * Describes packet trap group attributes. Created by devlink during trap
+ * group registration.
+ */
+struct devlink_trap_group_item {
+ const struct devlink_trap_group *group;
+ struct devlink_trap_policer_item *policer_item;
+ struct list_head list;
+ struct devlink_stats __percpu *stats;
+};
+
+/**
+ * struct devlink_trap_item - Packet trap attributes.
+ * @trap: Immutable packet trap attributes.
+ * @group_item: Associated group item.
+ * @list: trap_list member.
+ * @action: Trap action.
+ * @stats: Trap statistics.
+ * @priv: Driver private information.
+ *
+ * Describes both mutable and immutable packet trap attributes. Created by
+ * devlink during trap registration and used for all trap related operations.
+ */
+struct devlink_trap_item {
+ const struct devlink_trap *trap;
+ struct devlink_trap_group_item *group_item;
+ struct list_head list;
+ enum devlink_trap_action action;
+ struct devlink_stats __percpu *stats;
+ void *priv;
+};
+
+static struct devlink_trap_policer_item *
+devlink_trap_policer_item_lookup(struct devlink *devlink, u32 id)
+{
+ struct devlink_trap_policer_item *policer_item;
+
+ list_for_each_entry(policer_item, &devlink->trap_policer_list, list) {
+ if (policer_item->policer->id == id)
+ return policer_item;
+ }
+
+ return NULL;
+}
+
+static struct devlink_trap_item *
+devlink_trap_item_lookup(struct devlink *devlink, const char *name)
+{
+ struct devlink_trap_item *trap_item;
+
+ list_for_each_entry(trap_item, &devlink->trap_list, list) {
+ if (!strcmp(trap_item->trap->name, name))
+ return trap_item;
+ }
+
+ return NULL;
+}
+
+static struct devlink_trap_item *
+devlink_trap_item_get_from_info(struct devlink *devlink,
+ struct genl_info *info)
+{
+ struct nlattr *attr;
+
+ if (!info->attrs[DEVLINK_ATTR_TRAP_NAME])
+ return NULL;
+ attr = info->attrs[DEVLINK_ATTR_TRAP_NAME];
+
+ return devlink_trap_item_lookup(devlink, nla_data(attr));
+}
+
+static int
+devlink_trap_action_get_from_info(struct genl_info *info,
+ enum devlink_trap_action *p_trap_action)
+{
+ u8 val;
+
+ val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]);
+ switch (val) {
+ case DEVLINK_TRAP_ACTION_DROP:
+ case DEVLINK_TRAP_ACTION_TRAP:
+ case DEVLINK_TRAP_ACTION_MIRROR:
+ *p_trap_action = val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int devlink_trap_metadata_put(struct sk_buff *msg,
+ const struct devlink_trap *trap)
+{
+ struct nlattr *attr;
+
+ attr = nla_nest_start(msg, DEVLINK_ATTR_TRAP_METADATA);
+ if (!attr)
+ return -EMSGSIZE;
+
+ if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT) &&
+ nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT))
+ goto nla_put_failure;
+ if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE) &&
+ nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, attr);
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, attr);
+ return -EMSGSIZE;
+}
+
+static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats,
+ struct devlink_stats *stats)
+{
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+ for_each_possible_cpu(i) {
+ struct devlink_stats *cpu_stats;
+ u64 rx_packets, rx_bytes;
+ unsigned int start;
+
+ cpu_stats = per_cpu_ptr(trap_stats, i);
+ do {
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
+ rx_packets = u64_stats_read(&cpu_stats->rx_packets);
+ rx_bytes = u64_stats_read(&cpu_stats->rx_bytes);
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
+
+ u64_stats_add(&stats->rx_packets, rx_packets);
+ u64_stats_add(&stats->rx_bytes, rx_bytes);
+ }
+}
+
+static int
+devlink_trap_group_stats_put(struct sk_buff *msg,
+ struct devlink_stats __percpu *trap_stats)
+{
+ struct devlink_stats stats;
+ struct nlattr *attr;
+
+ devlink_trap_stats_read(trap_stats, &stats);
+
+ attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
+ if (!attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
+ u64_stats_read(&stats.rx_packets),
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
+ u64_stats_read(&stats.rx_bytes),
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, attr);
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, attr);
+ return -EMSGSIZE;
+}
+
+static int devlink_trap_stats_put(struct sk_buff *msg, struct devlink *devlink,
+ const struct devlink_trap_item *trap_item)
+{
+ struct devlink_stats stats;
+ struct nlattr *attr;
+ u64 drops = 0;
+ int err;
+
+ if (devlink->ops->trap_drop_counter_get) {
+ err = devlink->ops->trap_drop_counter_get(devlink,
+ trap_item->trap,
+ &drops);
+ if (err)
+ return err;
+ }
+
+ devlink_trap_stats_read(trap_item->stats, &stats);
+
+ attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
+ if (!attr)
+ return -EMSGSIZE;
+
+ if (devlink->ops->trap_drop_counter_get &&
+ nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS,
+ u64_stats_read(&stats.rx_packets),
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES,
+ u64_stats_read(&stats.rx_bytes),
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, attr);
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, attr);
+ return -EMSGSIZE;
+}
+
+static int devlink_nl_trap_fill(struct sk_buff *msg, struct devlink *devlink,
+ const struct devlink_trap_item *trap_item,
+ enum devlink_command cmd, u32 portid, u32 seq,
+ int flags)
+{
+ struct devlink_trap_group_item *group_item = trap_item->group_item;
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+
+ if (nla_put_string(msg, DEVLINK_ATTR_TRAP_GROUP_NAME,
+ group_item->group->name))
+ goto nla_put_failure;
+
+ if (nla_put_string(msg, DEVLINK_ATTR_TRAP_NAME, trap_item->trap->name))
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, DEVLINK_ATTR_TRAP_TYPE, trap_item->trap->type))
+ goto nla_put_failure;
+
+ if (trap_item->trap->generic &&
+ nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC))
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, DEVLINK_ATTR_TRAP_ACTION, trap_item->action))
+ goto nla_put_failure;
+
+ err = devlink_trap_metadata_put(msg, trap_item->trap);
+ if (err)
+ goto nla_put_failure;
+
+ err = devlink_trap_stats_put(msg, devlink, trap_item);
+ if (err)
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+int devlink_nl_trap_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_trap_item *trap_item;
+ struct sk_buff *msg;
+ int err;
+
+ if (list_empty(&devlink->trap_list))
+ return -EOPNOTSUPP;
+
+ trap_item = devlink_trap_item_get_from_info(devlink, info);
+ if (!trap_item) {
+ NL_SET_ERR_MSG(extack, "Device did not register this trap");
+ return -ENOENT;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_trap_fill(msg, devlink, trap_item,
+ DEVLINK_CMD_TRAP_NEW, info->snd_portid,
+ info->snd_seq, 0);
+ if (err)
+ goto err_trap_fill;
+
+ return genlmsg_reply(msg, info);
+
+err_trap_fill:
+ nlmsg_free(msg);
+ return err;
+}
+
+static int devlink_nl_trap_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb, int flags)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_trap_item *trap_item;
+ int idx = 0;
+ int err = 0;
+
+ list_for_each_entry(trap_item, &devlink->trap_list, list) {
+ if (idx < state->idx) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_trap_fill(msg, devlink, trap_item,
+ DEVLINK_CMD_TRAP_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags);
+ if (err) {
+ state->idx = idx;
+ break;
+ }
+ idx++;
+ }
+
+ return err;
+}
+
+int devlink_nl_trap_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_trap_get_dump_one);
+}
+
+static int __devlink_trap_action_set(struct devlink *devlink,
+ struct devlink_trap_item *trap_item,
+ enum devlink_trap_action trap_action,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (trap_item->action != trap_action &&
+ trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP) {
+ NL_SET_ERR_MSG(extack, "Cannot change action of non-drop traps. Skipping");
+ return 0;
+ }
+
+ err = devlink->ops->trap_action_set(devlink, trap_item->trap,
+ trap_action, extack);
+ if (err)
+ return err;
+
+ trap_item->action = trap_action;
+
+ return 0;
+}
+
+static int devlink_trap_action_set(struct devlink *devlink,
+ struct devlink_trap_item *trap_item,
+ struct genl_info *info)
+{
+ enum devlink_trap_action trap_action;
+ int err;
+
+ if (!info->attrs[DEVLINK_ATTR_TRAP_ACTION])
+ return 0;
+
+ err = devlink_trap_action_get_from_info(info, &trap_action);
+ if (err) {
+ NL_SET_ERR_MSG(info->extack, "Invalid trap action");
+ return -EINVAL;
+ }
+
+ return __devlink_trap_action_set(devlink, trap_item, trap_action,
+ info->extack);
+}
+
+int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_trap_item *trap_item;
+
+ if (list_empty(&devlink->trap_list))
+ return -EOPNOTSUPP;
+
+ trap_item = devlink_trap_item_get_from_info(devlink, info);
+ if (!trap_item) {
+ NL_SET_ERR_MSG(extack, "Device did not register this trap");
+ return -ENOENT;
+ }
+
+ return devlink_trap_action_set(devlink, trap_item, info);
+}
+
+static struct devlink_trap_group_item *
+devlink_trap_group_item_lookup(struct devlink *devlink, const char *name)
+{
+ struct devlink_trap_group_item *group_item;
+
+ list_for_each_entry(group_item, &devlink->trap_group_list, list) {
+ if (!strcmp(group_item->group->name, name))
+ return group_item;
+ }
+
+ return NULL;
+}
+
+static struct devlink_trap_group_item *
+devlink_trap_group_item_lookup_by_id(struct devlink *devlink, u16 id)
+{
+ struct devlink_trap_group_item *group_item;
+
+ list_for_each_entry(group_item, &devlink->trap_group_list, list) {
+ if (group_item->group->id == id)
+ return group_item;
+ }
+
+ return NULL;
+}
+
+static struct devlink_trap_group_item *
+devlink_trap_group_item_get_from_info(struct devlink *devlink,
+ struct genl_info *info)
+{
+ char *name;
+
+ if (!info->attrs[DEVLINK_ATTR_TRAP_GROUP_NAME])
+ return NULL;
+ name = nla_data(info->attrs[DEVLINK_ATTR_TRAP_GROUP_NAME]);
+
+ return devlink_trap_group_item_lookup(devlink, name);
+}
+
+static int
+devlink_nl_trap_group_fill(struct sk_buff *msg, struct devlink *devlink,
+ const struct devlink_trap_group_item *group_item,
+ enum devlink_command cmd, u32 portid, u32 seq,
+ int flags)
+{
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+
+ if (nla_put_string(msg, DEVLINK_ATTR_TRAP_GROUP_NAME,
+ group_item->group->name))
+ goto nla_put_failure;
+
+ if (group_item->group->generic &&
+ nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC))
+ goto nla_put_failure;
+
+ if (group_item->policer_item &&
+ nla_put_u32(msg, DEVLINK_ATTR_TRAP_POLICER_ID,
+ group_item->policer_item->policer->id))
+ goto nla_put_failure;
+
+ err = devlink_trap_group_stats_put(msg, group_item->stats);
+ if (err)
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+int devlink_nl_trap_group_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_trap_group_item *group_item;
+ struct sk_buff *msg;
+ int err;
+
+ if (list_empty(&devlink->trap_group_list))
+ return -EOPNOTSUPP;
+
+ group_item = devlink_trap_group_item_get_from_info(devlink, info);
+ if (!group_item) {
+ NL_SET_ERR_MSG(extack, "Device did not register this trap group");
+ return -ENOENT;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_trap_group_fill(msg, devlink, group_item,
+ DEVLINK_CMD_TRAP_GROUP_NEW,
+ info->snd_portid, info->snd_seq, 0);
+ if (err)
+ goto err_trap_group_fill;
+
+ return genlmsg_reply(msg, info);
+
+err_trap_group_fill:
+ nlmsg_free(msg);
+ return err;
+}
+
+static int devlink_nl_trap_group_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_trap_group_item *group_item;
+ int idx = 0;
+ int err = 0;
+
+ list_for_each_entry(group_item, &devlink->trap_group_list, list) {
+ if (idx < state->idx) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_trap_group_fill(msg, devlink, group_item,
+ DEVLINK_CMD_TRAP_GROUP_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags);
+ if (err) {
+ state->idx = idx;
+ break;
+ }
+ idx++;
+ }
+
+ return err;
+}
+
+int devlink_nl_trap_group_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_trap_group_get_dump_one);
+}
+
+static int
+__devlink_trap_group_action_set(struct devlink *devlink,
+ struct devlink_trap_group_item *group_item,
+ enum devlink_trap_action trap_action,
+ struct netlink_ext_ack *extack)
+{
+ const char *group_name = group_item->group->name;
+ struct devlink_trap_item *trap_item;
+ int err;
+
+ if (devlink->ops->trap_group_action_set) {
+ err = devlink->ops->trap_group_action_set(devlink, group_item->group,
+ trap_action, extack);
+ if (err)
+ return err;
+
+ list_for_each_entry(trap_item, &devlink->trap_list, list) {
+ if (strcmp(trap_item->group_item->group->name, group_name))
+ continue;
+ if (trap_item->action != trap_action &&
+ trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP)
+ continue;
+ trap_item->action = trap_action;
+ }
+
+ return 0;
+ }
+
+ list_for_each_entry(trap_item, &devlink->trap_list, list) {
+ if (strcmp(trap_item->group_item->group->name, group_name))
+ continue;
+ err = __devlink_trap_action_set(devlink, trap_item,
+ trap_action, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+devlink_trap_group_action_set(struct devlink *devlink,
+ struct devlink_trap_group_item *group_item,
+ struct genl_info *info, bool *p_modified)
+{
+ enum devlink_trap_action trap_action;
+ int err;
+
+ if (!info->attrs[DEVLINK_ATTR_TRAP_ACTION])
+ return 0;
+
+ err = devlink_trap_action_get_from_info(info, &trap_action);
+ if (err) {
+ NL_SET_ERR_MSG(info->extack, "Invalid trap action");
+ return -EINVAL;
+ }
+
+ err = __devlink_trap_group_action_set(devlink, group_item, trap_action,
+ info->extack);
+ if (err)
+ return err;
+
+ *p_modified = true;
+
+ return 0;
+}
+
+static int devlink_trap_group_set(struct devlink *devlink,
+ struct devlink_trap_group_item *group_item,
+ struct genl_info *info)
+{
+ struct devlink_trap_policer_item *policer_item;
+ struct netlink_ext_ack *extack = info->extack;
+ const struct devlink_trap_policer *policer;
+ struct nlattr **attrs = info->attrs;
+ u32 policer_id;
+ int err;
+
+ if (!attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
+ return 0;
+
+ if (!devlink->ops->trap_group_set)
+ return -EOPNOTSUPP;
+
+ policer_id = nla_get_u32(attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
+ policer_item = devlink_trap_policer_item_lookup(devlink, policer_id);
+ if (policer_id && !policer_item) {
+ NL_SET_ERR_MSG(extack, "Device did not register this trap policer");
+ return -ENOENT;
+ }
+ policer = policer_item ? policer_item->policer : NULL;
+
+ err = devlink->ops->trap_group_set(devlink, group_item->group, policer,
+ extack);
+ if (err)
+ return err;
+
+ group_item->policer_item = policer_item;
+
+ return 0;
+}
+
+int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_trap_group_item *group_item;
+ bool modified = false;
+ int err;
+
+ if (list_empty(&devlink->trap_group_list))
+ return -EOPNOTSUPP;
+
+ group_item = devlink_trap_group_item_get_from_info(devlink, info);
+ if (!group_item) {
+ NL_SET_ERR_MSG(extack, "Device did not register this trap group");
+ return -ENOENT;
+ }
+
+ err = devlink_trap_group_action_set(devlink, group_item, info,
+ &modified);
+ if (err)
+ return err;
+
+ err = devlink_trap_group_set(devlink, group_item, info);
+ if (err)
+ goto err_trap_group_set;
+
+ return 0;
+
+err_trap_group_set:
+ if (modified)
+ NL_SET_ERR_MSG(extack, "Trap group set failed, but some changes were committed already");
+ return err;
+}
+
+static struct devlink_trap_policer_item *
+devlink_trap_policer_item_get_from_info(struct devlink *devlink,
+ struct genl_info *info)
+{
+ u32 id;
+
+ if (!info->attrs[DEVLINK_ATTR_TRAP_POLICER_ID])
+ return NULL;
+ id = nla_get_u32(info->attrs[DEVLINK_ATTR_TRAP_POLICER_ID]);
+
+ return devlink_trap_policer_item_lookup(devlink, id);
+}
+
+static int
+devlink_trap_policer_stats_put(struct sk_buff *msg, struct devlink *devlink,
+ const struct devlink_trap_policer *policer)
+{
+ struct nlattr *attr;
+ u64 drops;
+ int err;
+
+ if (!devlink->ops->trap_policer_counter_get)
+ return 0;
+
+ err = devlink->ops->trap_policer_counter_get(devlink, policer, &drops);
+ if (err)
+ return err;
+
+ attr = nla_nest_start(msg, DEVLINK_ATTR_STATS);
+ if (!attr)
+ return -EMSGSIZE;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_DROPPED, drops,
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, attr);
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, attr);
+ return -EMSGSIZE;
+}
+
+static int
+devlink_nl_trap_policer_fill(struct sk_buff *msg, struct devlink *devlink,
+ const struct devlink_trap_policer_item *policer_item,
+ enum devlink_command cmd, u32 portid, u32 seq,
+ int flags)
+{
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, DEVLINK_ATTR_TRAP_POLICER_ID,
+ policer_item->policer->id))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_RATE,
+ policer_item->rate, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_TRAP_POLICER_BURST,
+ policer_item->burst, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+
+ err = devlink_trap_policer_stats_put(msg, devlink,
+ policer_item->policer);
+ if (err)
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+int devlink_nl_trap_policer_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_trap_policer_item *policer_item;
+ struct netlink_ext_ack *extack = info->extack;
+ struct devlink *devlink = info->user_ptr[0];
+ struct sk_buff *msg;
+ int err;
+
+ if (list_empty(&devlink->trap_policer_list))
+ return -EOPNOTSUPP;
+
+ policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
+ if (!policer_item) {
+ NL_SET_ERR_MSG(extack, "Device did not register this trap policer");
+ return -ENOENT;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ err = devlink_nl_trap_policer_fill(msg, devlink, policer_item,
+ DEVLINK_CMD_TRAP_POLICER_NEW,
+ info->snd_portid, info->snd_seq, 0);
+ if (err)
+ goto err_trap_policer_fill;
+
+ return genlmsg_reply(msg, info);
+
+err_trap_policer_fill:
+ nlmsg_free(msg);
+ return err;
+}
+
+static int devlink_nl_trap_policer_get_dump_one(struct sk_buff *msg,
+ struct devlink *devlink,
+ struct netlink_callback *cb,
+ int flags)
+{
+ struct devlink_nl_dump_state *state = devlink_dump_state(cb);
+ struct devlink_trap_policer_item *policer_item;
+ int idx = 0;
+ int err = 0;
+
+ list_for_each_entry(policer_item, &devlink->trap_policer_list, list) {
+ if (idx < state->idx) {
+ idx++;
+ continue;
+ }
+ err = devlink_nl_trap_policer_fill(msg, devlink, policer_item,
+ DEVLINK_CMD_TRAP_POLICER_NEW,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, flags);
+ if (err) {
+ state->idx = idx;
+ break;
+ }
+ idx++;
+ }
+
+ return err;
+}
+
+int devlink_nl_trap_policer_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return devlink_nl_dumpit(skb, cb, devlink_nl_trap_policer_get_dump_one);
+}
+
+static int
+devlink_trap_policer_set(struct devlink *devlink,
+ struct devlink_trap_policer_item *policer_item,
+ struct genl_info *info)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ struct nlattr **attrs = info->attrs;
+ u64 rate, burst;
+ int err;
+
+ rate = policer_item->rate;
+ burst = policer_item->burst;
+
+ if (attrs[DEVLINK_ATTR_TRAP_POLICER_RATE])
+ rate = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_RATE]);
+
+ if (attrs[DEVLINK_ATTR_TRAP_POLICER_BURST])
+ burst = nla_get_u64(attrs[DEVLINK_ATTR_TRAP_POLICER_BURST]);
+
+ if (rate < policer_item->policer->min_rate) {
+ NL_SET_ERR_MSG(extack, "Policer rate lower than limit");
+ return -EINVAL;
+ }
+
+ if (rate > policer_item->policer->max_rate) {
+ NL_SET_ERR_MSG(extack, "Policer rate higher than limit");
+ return -EINVAL;
+ }
+
+ if (burst < policer_item->policer->min_burst) {
+ NL_SET_ERR_MSG(extack, "Policer burst size lower than limit");
+ return -EINVAL;
+ }
+
+ if (burst > policer_item->policer->max_burst) {
+ NL_SET_ERR_MSG(extack, "Policer burst size higher than limit");
+ return -EINVAL;
+ }
+
+ err = devlink->ops->trap_policer_set(devlink, policer_item->policer,
+ rate, burst, info->extack);
+ if (err)
+ return err;
+
+ policer_item->rate = rate;
+ policer_item->burst = burst;
+
+ return 0;
+}
+
+int devlink_nl_cmd_trap_policer_set_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink_trap_policer_item *policer_item;
+ struct netlink_ext_ack *extack = info->extack;
+ struct devlink *devlink = info->user_ptr[0];
+
+ if (list_empty(&devlink->trap_policer_list))
+ return -EOPNOTSUPP;
+
+ if (!devlink->ops->trap_policer_set)
+ return -EOPNOTSUPP;
+
+ policer_item = devlink_trap_policer_item_get_from_info(devlink, info);
+ if (!policer_item) {
+ NL_SET_ERR_MSG(extack, "Device did not register this trap policer");
+ return -ENOENT;
+ }
+
+ return devlink_trap_policer_set(devlink, policer_item, info);
+}
+
+#define DEVLINK_TRAP(_id, _type) \
+ { \
+ .type = DEVLINK_TRAP_TYPE_##_type, \
+ .id = DEVLINK_TRAP_GENERIC_ID_##_id, \
+ .name = DEVLINK_TRAP_GENERIC_NAME_##_id, \
+ }
+
+static const struct devlink_trap devlink_trap_generic[] = {
+ DEVLINK_TRAP(SMAC_MC, DROP),
+ DEVLINK_TRAP(VLAN_TAG_MISMATCH, DROP),
+ DEVLINK_TRAP(INGRESS_VLAN_FILTER, DROP),
+ DEVLINK_TRAP(INGRESS_STP_FILTER, DROP),
+ DEVLINK_TRAP(EMPTY_TX_LIST, DROP),
+ DEVLINK_TRAP(PORT_LOOPBACK_FILTER, DROP),
+ DEVLINK_TRAP(BLACKHOLE_ROUTE, DROP),
+ DEVLINK_TRAP(TTL_ERROR, EXCEPTION),
+ DEVLINK_TRAP(TAIL_DROP, DROP),
+ DEVLINK_TRAP(NON_IP_PACKET, DROP),
+ DEVLINK_TRAP(UC_DIP_MC_DMAC, DROP),
+ DEVLINK_TRAP(DIP_LB, DROP),
+ DEVLINK_TRAP(SIP_MC, DROP),
+ DEVLINK_TRAP(SIP_LB, DROP),
+ DEVLINK_TRAP(CORRUPTED_IP_HDR, DROP),
+ DEVLINK_TRAP(IPV4_SIP_BC, DROP),
+ DEVLINK_TRAP(IPV6_MC_DIP_RESERVED_SCOPE, DROP),
+ DEVLINK_TRAP(IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE, DROP),
+ DEVLINK_TRAP(MTU_ERROR, EXCEPTION),
+ DEVLINK_TRAP(UNRESOLVED_NEIGH, EXCEPTION),
+ DEVLINK_TRAP(RPF, EXCEPTION),
+ DEVLINK_TRAP(REJECT_ROUTE, EXCEPTION),
+ DEVLINK_TRAP(IPV4_LPM_UNICAST_MISS, EXCEPTION),
+ DEVLINK_TRAP(IPV6_LPM_UNICAST_MISS, EXCEPTION),
+ DEVLINK_TRAP(NON_ROUTABLE, DROP),
+ DEVLINK_TRAP(DECAP_ERROR, EXCEPTION),
+ DEVLINK_TRAP(OVERLAY_SMAC_MC, DROP),
+ DEVLINK_TRAP(INGRESS_FLOW_ACTION_DROP, DROP),
+ DEVLINK_TRAP(EGRESS_FLOW_ACTION_DROP, DROP),
+ DEVLINK_TRAP(STP, CONTROL),
+ DEVLINK_TRAP(LACP, CONTROL),
+ DEVLINK_TRAP(LLDP, CONTROL),
+ DEVLINK_TRAP(IGMP_QUERY, CONTROL),
+ DEVLINK_TRAP(IGMP_V1_REPORT, CONTROL),
+ DEVLINK_TRAP(IGMP_V2_REPORT, CONTROL),
+ DEVLINK_TRAP(IGMP_V3_REPORT, CONTROL),
+ DEVLINK_TRAP(IGMP_V2_LEAVE, CONTROL),
+ DEVLINK_TRAP(MLD_QUERY, CONTROL),
+ DEVLINK_TRAP(MLD_V1_REPORT, CONTROL),
+ DEVLINK_TRAP(MLD_V2_REPORT, CONTROL),
+ DEVLINK_TRAP(MLD_V1_DONE, CONTROL),
+ DEVLINK_TRAP(IPV4_DHCP, CONTROL),
+ DEVLINK_TRAP(IPV6_DHCP, CONTROL),
+ DEVLINK_TRAP(ARP_REQUEST, CONTROL),
+ DEVLINK_TRAP(ARP_RESPONSE, CONTROL),
+ DEVLINK_TRAP(ARP_OVERLAY, CONTROL),
+ DEVLINK_TRAP(IPV6_NEIGH_SOLICIT, CONTROL),
+ DEVLINK_TRAP(IPV6_NEIGH_ADVERT, CONTROL),
+ DEVLINK_TRAP(IPV4_BFD, CONTROL),
+ DEVLINK_TRAP(IPV6_BFD, CONTROL),
+ DEVLINK_TRAP(IPV4_OSPF, CONTROL),
+ DEVLINK_TRAP(IPV6_OSPF, CONTROL),
+ DEVLINK_TRAP(IPV4_BGP, CONTROL),
+ DEVLINK_TRAP(IPV6_BGP, CONTROL),
+ DEVLINK_TRAP(IPV4_VRRP, CONTROL),
+ DEVLINK_TRAP(IPV6_VRRP, CONTROL),
+ DEVLINK_TRAP(IPV4_PIM, CONTROL),
+ DEVLINK_TRAP(IPV6_PIM, CONTROL),
+ DEVLINK_TRAP(UC_LB, CONTROL),
+ DEVLINK_TRAP(LOCAL_ROUTE, CONTROL),
+ DEVLINK_TRAP(EXTERNAL_ROUTE, CONTROL),
+ DEVLINK_TRAP(IPV6_UC_DIP_LINK_LOCAL_SCOPE, CONTROL),
+ DEVLINK_TRAP(IPV6_DIP_ALL_NODES, CONTROL),
+ DEVLINK_TRAP(IPV6_DIP_ALL_ROUTERS, CONTROL),
+ DEVLINK_TRAP(IPV6_ROUTER_SOLICIT, CONTROL),
+ DEVLINK_TRAP(IPV6_ROUTER_ADVERT, CONTROL),
+ DEVLINK_TRAP(IPV6_REDIRECT, CONTROL),
+ DEVLINK_TRAP(IPV4_ROUTER_ALERT, CONTROL),
+ DEVLINK_TRAP(IPV6_ROUTER_ALERT, CONTROL),
+ DEVLINK_TRAP(PTP_EVENT, CONTROL),
+ DEVLINK_TRAP(PTP_GENERAL, CONTROL),
+ DEVLINK_TRAP(FLOW_ACTION_SAMPLE, CONTROL),
+ DEVLINK_TRAP(FLOW_ACTION_TRAP, CONTROL),
+ DEVLINK_TRAP(EARLY_DROP, DROP),
+ DEVLINK_TRAP(VXLAN_PARSING, DROP),
+ DEVLINK_TRAP(LLC_SNAP_PARSING, DROP),
+ DEVLINK_TRAP(VLAN_PARSING, DROP),
+ DEVLINK_TRAP(PPPOE_PPP_PARSING, DROP),
+ DEVLINK_TRAP(MPLS_PARSING, DROP),
+ DEVLINK_TRAP(ARP_PARSING, DROP),
+ DEVLINK_TRAP(IP_1_PARSING, DROP),
+ DEVLINK_TRAP(IP_N_PARSING, DROP),
+ DEVLINK_TRAP(GRE_PARSING, DROP),
+ DEVLINK_TRAP(UDP_PARSING, DROP),
+ DEVLINK_TRAP(TCP_PARSING, DROP),
+ DEVLINK_TRAP(IPSEC_PARSING, DROP),
+ DEVLINK_TRAP(SCTP_PARSING, DROP),
+ DEVLINK_TRAP(DCCP_PARSING, DROP),
+ DEVLINK_TRAP(GTP_PARSING, DROP),
+ DEVLINK_TRAP(ESP_PARSING, DROP),
+ DEVLINK_TRAP(BLACKHOLE_NEXTHOP, DROP),
+ DEVLINK_TRAP(DMAC_FILTER, DROP),
+ DEVLINK_TRAP(EAPOL, CONTROL),
+ DEVLINK_TRAP(LOCKED_PORT, DROP),
+};
+
+#define DEVLINK_TRAP_GROUP(_id) \
+ { \
+ .id = DEVLINK_TRAP_GROUP_GENERIC_ID_##_id, \
+ .name = DEVLINK_TRAP_GROUP_GENERIC_NAME_##_id, \
+ }
+
+static const struct devlink_trap_group devlink_trap_group_generic[] = {
+ DEVLINK_TRAP_GROUP(L2_DROPS),
+ DEVLINK_TRAP_GROUP(L3_DROPS),
+ DEVLINK_TRAP_GROUP(L3_EXCEPTIONS),
+ DEVLINK_TRAP_GROUP(BUFFER_DROPS),
+ DEVLINK_TRAP_GROUP(TUNNEL_DROPS),
+ DEVLINK_TRAP_GROUP(ACL_DROPS),
+ DEVLINK_TRAP_GROUP(STP),
+ DEVLINK_TRAP_GROUP(LACP),
+ DEVLINK_TRAP_GROUP(LLDP),
+ DEVLINK_TRAP_GROUP(MC_SNOOPING),
+ DEVLINK_TRAP_GROUP(DHCP),
+ DEVLINK_TRAP_GROUP(NEIGH_DISCOVERY),
+ DEVLINK_TRAP_GROUP(BFD),
+ DEVLINK_TRAP_GROUP(OSPF),
+ DEVLINK_TRAP_GROUP(BGP),
+ DEVLINK_TRAP_GROUP(VRRP),
+ DEVLINK_TRAP_GROUP(PIM),
+ DEVLINK_TRAP_GROUP(UC_LB),
+ DEVLINK_TRAP_GROUP(LOCAL_DELIVERY),
+ DEVLINK_TRAP_GROUP(EXTERNAL_DELIVERY),
+ DEVLINK_TRAP_GROUP(IPV6),
+ DEVLINK_TRAP_GROUP(PTP_EVENT),
+ DEVLINK_TRAP_GROUP(PTP_GENERAL),
+ DEVLINK_TRAP_GROUP(ACL_SAMPLE),
+ DEVLINK_TRAP_GROUP(ACL_TRAP),
+ DEVLINK_TRAP_GROUP(PARSER_ERROR_DROPS),
+ DEVLINK_TRAP_GROUP(EAPOL),
+};
+
+static int devlink_trap_generic_verify(const struct devlink_trap *trap)
+{
+ if (trap->id > DEVLINK_TRAP_GENERIC_ID_MAX)
+ return -EINVAL;
+
+ if (strcmp(trap->name, devlink_trap_generic[trap->id].name))
+ return -EINVAL;
+
+ if (trap->type != devlink_trap_generic[trap->id].type)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int devlink_trap_driver_verify(const struct devlink_trap *trap)
+{
+ int i;
+
+ if (trap->id <= DEVLINK_TRAP_GENERIC_ID_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(devlink_trap_generic); i++) {
+ if (!strcmp(trap->name, devlink_trap_generic[i].name))
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+static int devlink_trap_verify(const struct devlink_trap *trap)
+{
+ if (!trap || !trap->name)
+ return -EINVAL;
+
+ if (trap->generic)
+ return devlink_trap_generic_verify(trap);
+ else
+ return devlink_trap_driver_verify(trap);
+}
+
+static int
+devlink_trap_group_generic_verify(const struct devlink_trap_group *group)
+{
+ if (group->id > DEVLINK_TRAP_GROUP_GENERIC_ID_MAX)
+ return -EINVAL;
+
+ if (strcmp(group->name, devlink_trap_group_generic[group->id].name))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+devlink_trap_group_driver_verify(const struct devlink_trap_group *group)
+{
+ int i;
+
+ if (group->id <= DEVLINK_TRAP_GROUP_GENERIC_ID_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(devlink_trap_group_generic); i++) {
+ if (!strcmp(group->name, devlink_trap_group_generic[i].name))
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+static int devlink_trap_group_verify(const struct devlink_trap_group *group)
+{
+ if (group->generic)
+ return devlink_trap_group_generic_verify(group);
+ else
+ return devlink_trap_group_driver_verify(group);
+}
+
+static void
+devlink_trap_group_notify(struct devlink *devlink,
+ const struct devlink_trap_group_item *group_item,
+ enum devlink_command cmd)
+{
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_GROUP_NEW &&
+ cmd != DEVLINK_CMD_TRAP_GROUP_DEL);
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_trap_group_fill(msg, devlink, group_item, cmd, 0, 0,
+ 0);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+void devlink_trap_groups_notify_register(struct devlink *devlink)
+{
+ struct devlink_trap_group_item *group_item;
+
+ list_for_each_entry(group_item, &devlink->trap_group_list, list)
+ devlink_trap_group_notify(devlink, group_item,
+ DEVLINK_CMD_TRAP_GROUP_NEW);
+}
+
+void devlink_trap_groups_notify_unregister(struct devlink *devlink)
+{
+ struct devlink_trap_group_item *group_item;
+
+ list_for_each_entry_reverse(group_item, &devlink->trap_group_list, list)
+ devlink_trap_group_notify(devlink, group_item,
+ DEVLINK_CMD_TRAP_GROUP_DEL);
+}
+
+static int
+devlink_trap_item_group_link(struct devlink *devlink,
+ struct devlink_trap_item *trap_item)
+{
+ u16 group_id = trap_item->trap->init_group_id;
+ struct devlink_trap_group_item *group_item;
+
+ group_item = devlink_trap_group_item_lookup_by_id(devlink, group_id);
+ if (WARN_ON_ONCE(!group_item))
+ return -EINVAL;
+
+ trap_item->group_item = group_item;
+
+ return 0;
+}
+
+static void devlink_trap_notify(struct devlink *devlink,
+ const struct devlink_trap_item *trap_item,
+ enum devlink_command cmd)
+{
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_NEW &&
+ cmd != DEVLINK_CMD_TRAP_DEL);
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_trap_fill(msg, devlink, trap_item, cmd, 0, 0, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+void devlink_traps_notify_register(struct devlink *devlink)
+{
+ struct devlink_trap_item *trap_item;
+
+ list_for_each_entry(trap_item, &devlink->trap_list, list)
+ devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW);
+}
+
+void devlink_traps_notify_unregister(struct devlink *devlink)
+{
+ struct devlink_trap_item *trap_item;
+
+ list_for_each_entry_reverse(trap_item, &devlink->trap_list, list)
+ devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL);
+}
+
+static int
+devlink_trap_register(struct devlink *devlink,
+ const struct devlink_trap *trap, void *priv)
+{
+ struct devlink_trap_item *trap_item;
+ int err;
+
+ if (devlink_trap_item_lookup(devlink, trap->name))
+ return -EEXIST;
+
+ trap_item = kzalloc(sizeof(*trap_item), GFP_KERNEL);
+ if (!trap_item)
+ return -ENOMEM;
+
+ trap_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats);
+ if (!trap_item->stats) {
+ err = -ENOMEM;
+ goto err_stats_alloc;
+ }
+
+ trap_item->trap = trap;
+ trap_item->action = trap->init_action;
+ trap_item->priv = priv;
+
+ err = devlink_trap_item_group_link(devlink, trap_item);
+ if (err)
+ goto err_group_link;
+
+ err = devlink->ops->trap_init(devlink, trap, trap_item);
+ if (err)
+ goto err_trap_init;
+
+ list_add_tail(&trap_item->list, &devlink->trap_list);
+ devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW);
+
+ return 0;
+
+err_trap_init:
+err_group_link:
+ free_percpu(trap_item->stats);
+err_stats_alloc:
+ kfree(trap_item);
+ return err;
+}
+
+static void devlink_trap_unregister(struct devlink *devlink,
+ const struct devlink_trap *trap)
+{
+ struct devlink_trap_item *trap_item;
+
+ trap_item = devlink_trap_item_lookup(devlink, trap->name);
+ if (WARN_ON_ONCE(!trap_item))
+ return;
+
+ devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL);
+ list_del(&trap_item->list);
+ if (devlink->ops->trap_fini)
+ devlink->ops->trap_fini(devlink, trap, trap_item);
+ free_percpu(trap_item->stats);
+ kfree(trap_item);
+}
+
+static void devlink_trap_disable(struct devlink *devlink,
+ const struct devlink_trap *trap)
+{
+ struct devlink_trap_item *trap_item;
+
+ trap_item = devlink_trap_item_lookup(devlink, trap->name);
+ if (WARN_ON_ONCE(!trap_item))
+ return;
+
+ devlink->ops->trap_action_set(devlink, trap, DEVLINK_TRAP_ACTION_DROP,
+ NULL);
+ trap_item->action = DEVLINK_TRAP_ACTION_DROP;
+}
+
+/**
+ * devl_traps_register - Register packet traps with devlink.
+ * @devlink: devlink.
+ * @traps: Packet traps.
+ * @traps_count: Count of provided packet traps.
+ * @priv: Driver private information.
+ *
+ * Return: Non-zero value on failure.
+ */
+int devl_traps_register(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count, void *priv)
+{
+ int i, err;
+
+ if (!devlink->ops->trap_init || !devlink->ops->trap_action_set)
+ return -EINVAL;
+
+ devl_assert_locked(devlink);
+ for (i = 0; i < traps_count; i++) {
+ const struct devlink_trap *trap = &traps[i];
+
+ err = devlink_trap_verify(trap);
+ if (err)
+ goto err_trap_verify;
+
+ err = devlink_trap_register(devlink, trap, priv);
+ if (err)
+ goto err_trap_register;
+ }
+
+ return 0;
+
+err_trap_register:
+err_trap_verify:
+ for (i--; i >= 0; i--)
+ devlink_trap_unregister(devlink, &traps[i]);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devl_traps_register);
+
+/**
+ * devlink_traps_register - Register packet traps with devlink.
+ * @devlink: devlink.
+ * @traps: Packet traps.
+ * @traps_count: Count of provided packet traps.
+ * @priv: Driver private information.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ *
+ * Return: Non-zero value on failure.
+ */
+int devlink_traps_register(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count, void *priv)
+{
+ int err;
+
+ devl_lock(devlink);
+ err = devl_traps_register(devlink, traps, traps_count, priv);
+ devl_unlock(devlink);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_traps_register);
+
+/**
+ * devl_traps_unregister - Unregister packet traps from devlink.
+ * @devlink: devlink.
+ * @traps: Packet traps.
+ * @traps_count: Count of provided packet traps.
+ */
+void devl_traps_unregister(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count)
+{
+ int i;
+
+ devl_assert_locked(devlink);
+ /* Make sure we do not have any packets in-flight while unregistering
+ * traps by disabling all of them and waiting for a grace period.
+ */
+ for (i = traps_count - 1; i >= 0; i--)
+ devlink_trap_disable(devlink, &traps[i]);
+ synchronize_rcu();
+ for (i = traps_count - 1; i >= 0; i--)
+ devlink_trap_unregister(devlink, &traps[i]);
+}
+EXPORT_SYMBOL_GPL(devl_traps_unregister);
+
+/**
+ * devlink_traps_unregister - Unregister packet traps from devlink.
+ * @devlink: devlink.
+ * @traps: Packet traps.
+ * @traps_count: Count of provided packet traps.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_traps_unregister(struct devlink *devlink,
+ const struct devlink_trap *traps,
+ size_t traps_count)
+{
+ devl_lock(devlink);
+ devl_traps_unregister(devlink, traps, traps_count);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_traps_unregister);
+
+static void
+devlink_trap_stats_update(struct devlink_stats __percpu *trap_stats,
+ size_t skb_len)
+{
+ struct devlink_stats *stats;
+
+ stats = this_cpu_ptr(trap_stats);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_add(&stats->rx_bytes, skb_len);
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_update_end(&stats->syncp);
+}
+
+static void
+devlink_trap_report_metadata_set(struct devlink_trap_metadata *metadata,
+ const struct devlink_trap_item *trap_item,
+ struct devlink_port *in_devlink_port,
+ const struct flow_action_cookie *fa_cookie)
+{
+ metadata->trap_name = trap_item->trap->name;
+ metadata->trap_group_name = trap_item->group_item->group->name;
+ metadata->fa_cookie = fa_cookie;
+ metadata->trap_type = trap_item->trap->type;
+
+ spin_lock(&in_devlink_port->type_lock);
+ if (in_devlink_port->type == DEVLINK_PORT_TYPE_ETH)
+ metadata->input_dev = in_devlink_port->type_eth.netdev;
+ spin_unlock(&in_devlink_port->type_lock);
+}
+
+/**
+ * devlink_trap_report - Report trapped packet to drop monitor.
+ * @devlink: devlink.
+ * @skb: Trapped packet.
+ * @trap_ctx: Trap context.
+ * @in_devlink_port: Input devlink port.
+ * @fa_cookie: Flow action cookie. Could be NULL.
+ */
+void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
+ void *trap_ctx, struct devlink_port *in_devlink_port,
+ const struct flow_action_cookie *fa_cookie)
+
+{
+ struct devlink_trap_item *trap_item = trap_ctx;
+
+ devlink_trap_stats_update(trap_item->stats, skb->len);
+ devlink_trap_stats_update(trap_item->group_item->stats, skb->len);
+
+ if (tracepoint_enabled(devlink_trap_report)) {
+ struct devlink_trap_metadata metadata = {};
+
+ devlink_trap_report_metadata_set(&metadata, trap_item,
+ in_devlink_port, fa_cookie);
+ trace_devlink_trap_report(devlink, skb, &metadata);
+ }
+}
+EXPORT_SYMBOL_GPL(devlink_trap_report);
+
+/**
+ * devlink_trap_ctx_priv - Trap context to driver private information.
+ * @trap_ctx: Trap context.
+ *
+ * Return: Driver private information passed during registration.
+ */
+void *devlink_trap_ctx_priv(void *trap_ctx)
+{
+ struct devlink_trap_item *trap_item = trap_ctx;
+
+ return trap_item->priv;
+}
+EXPORT_SYMBOL_GPL(devlink_trap_ctx_priv);
+
+static int
+devlink_trap_group_item_policer_link(struct devlink *devlink,
+ struct devlink_trap_group_item *group_item)
+{
+ u32 policer_id = group_item->group->init_policer_id;
+ struct devlink_trap_policer_item *policer_item;
+
+ if (policer_id == 0)
+ return 0;
+
+ policer_item = devlink_trap_policer_item_lookup(devlink, policer_id);
+ if (WARN_ON_ONCE(!policer_item))
+ return -EINVAL;
+
+ group_item->policer_item = policer_item;
+
+ return 0;
+}
+
+static int
+devlink_trap_group_register(struct devlink *devlink,
+ const struct devlink_trap_group *group)
+{
+ struct devlink_trap_group_item *group_item;
+ int err;
+
+ if (devlink_trap_group_item_lookup(devlink, group->name))
+ return -EEXIST;
+
+ group_item = kzalloc(sizeof(*group_item), GFP_KERNEL);
+ if (!group_item)
+ return -ENOMEM;
+
+ group_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats);
+ if (!group_item->stats) {
+ err = -ENOMEM;
+ goto err_stats_alloc;
+ }
+
+ group_item->group = group;
+
+ err = devlink_trap_group_item_policer_link(devlink, group_item);
+ if (err)
+ goto err_policer_link;
+
+ if (devlink->ops->trap_group_init) {
+ err = devlink->ops->trap_group_init(devlink, group);
+ if (err)
+ goto err_group_init;
+ }
+
+ list_add_tail(&group_item->list, &devlink->trap_group_list);
+ devlink_trap_group_notify(devlink, group_item,
+ DEVLINK_CMD_TRAP_GROUP_NEW);
+
+ return 0;
+
+err_group_init:
+err_policer_link:
+ free_percpu(group_item->stats);
+err_stats_alloc:
+ kfree(group_item);
+ return err;
+}
+
+static void
+devlink_trap_group_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *group)
+{
+ struct devlink_trap_group_item *group_item;
+
+ group_item = devlink_trap_group_item_lookup(devlink, group->name);
+ if (WARN_ON_ONCE(!group_item))
+ return;
+
+ devlink_trap_group_notify(devlink, group_item,
+ DEVLINK_CMD_TRAP_GROUP_DEL);
+ list_del(&group_item->list);
+ free_percpu(group_item->stats);
+ kfree(group_item);
+}
+
+/**
+ * devl_trap_groups_register - Register packet trap groups with devlink.
+ * @devlink: devlink.
+ * @groups: Packet trap groups.
+ * @groups_count: Count of provided packet trap groups.
+ *
+ * Return: Non-zero value on failure.
+ */
+int devl_trap_groups_register(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count)
+{
+ int i, err;
+
+ devl_assert_locked(devlink);
+ for (i = 0; i < groups_count; i++) {
+ const struct devlink_trap_group *group = &groups[i];
+
+ err = devlink_trap_group_verify(group);
+ if (err)
+ goto err_trap_group_verify;
+
+ err = devlink_trap_group_register(devlink, group);
+ if (err)
+ goto err_trap_group_register;
+ }
+
+ return 0;
+
+err_trap_group_register:
+err_trap_group_verify:
+ for (i--; i >= 0; i--)
+ devlink_trap_group_unregister(devlink, &groups[i]);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devl_trap_groups_register);
+
+/**
+ * devlink_trap_groups_register - Register packet trap groups with devlink.
+ * @devlink: devlink.
+ * @groups: Packet trap groups.
+ * @groups_count: Count of provided packet trap groups.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ *
+ * Return: Non-zero value on failure.
+ */
+int devlink_trap_groups_register(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count)
+{
+ int err;
+
+ devl_lock(devlink);
+ err = devl_trap_groups_register(devlink, groups, groups_count);
+ devl_unlock(devlink);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devlink_trap_groups_register);
+
+/**
+ * devl_trap_groups_unregister - Unregister packet trap groups from devlink.
+ * @devlink: devlink.
+ * @groups: Packet trap groups.
+ * @groups_count: Count of provided packet trap groups.
+ */
+void devl_trap_groups_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count)
+{
+ int i;
+
+ devl_assert_locked(devlink);
+ for (i = groups_count - 1; i >= 0; i--)
+ devlink_trap_group_unregister(devlink, &groups[i]);
+}
+EXPORT_SYMBOL_GPL(devl_trap_groups_unregister);
+
+/**
+ * devlink_trap_groups_unregister - Unregister packet trap groups from devlink.
+ * @devlink: devlink.
+ * @groups: Packet trap groups.
+ * @groups_count: Count of provided packet trap groups.
+ *
+ * Context: Takes and release devlink->lock <mutex>.
+ */
+void devlink_trap_groups_unregister(struct devlink *devlink,
+ const struct devlink_trap_group *groups,
+ size_t groups_count)
+{
+ devl_lock(devlink);
+ devl_trap_groups_unregister(devlink, groups, groups_count);
+ devl_unlock(devlink);
+}
+EXPORT_SYMBOL_GPL(devlink_trap_groups_unregister);
+
+static void
+devlink_trap_policer_notify(struct devlink *devlink,
+ const struct devlink_trap_policer_item *policer_item,
+ enum devlink_command cmd)
+{
+ struct sk_buff *msg;
+ int err;
+
+ WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_POLICER_NEW &&
+ cmd != DEVLINK_CMD_TRAP_POLICER_DEL);
+ if (!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED))
+ return;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ err = devlink_nl_trap_policer_fill(msg, devlink, policer_item, cmd, 0,
+ 0, 0);
+ if (err) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink),
+ msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL);
+}
+
+void devlink_trap_policers_notify_register(struct devlink *devlink)
+{
+ struct devlink_trap_policer_item *policer_item;
+
+ list_for_each_entry(policer_item, &devlink->trap_policer_list, list)
+ devlink_trap_policer_notify(devlink, policer_item,
+ DEVLINK_CMD_TRAP_POLICER_NEW);
+}
+
+void devlink_trap_policers_notify_unregister(struct devlink *devlink)
+{
+ struct devlink_trap_policer_item *policer_item;
+
+ list_for_each_entry_reverse(policer_item, &devlink->trap_policer_list,
+ list)
+ devlink_trap_policer_notify(devlink, policer_item,
+ DEVLINK_CMD_TRAP_POLICER_DEL);
+}
+
+static int
+devlink_trap_policer_register(struct devlink *devlink,
+ const struct devlink_trap_policer *policer)
+{
+ struct devlink_trap_policer_item *policer_item;
+ int err;
+
+ if (devlink_trap_policer_item_lookup(devlink, policer->id))
+ return -EEXIST;
+
+ policer_item = kzalloc(sizeof(*policer_item), GFP_KERNEL);
+ if (!policer_item)
+ return -ENOMEM;
+
+ policer_item->policer = policer;
+ policer_item->rate = policer->init_rate;
+ policer_item->burst = policer->init_burst;
+
+ if (devlink->ops->trap_policer_init) {
+ err = devlink->ops->trap_policer_init(devlink, policer);
+ if (err)
+ goto err_policer_init;
+ }
+
+ list_add_tail(&policer_item->list, &devlink->trap_policer_list);
+ devlink_trap_policer_notify(devlink, policer_item,
+ DEVLINK_CMD_TRAP_POLICER_NEW);
+
+ return 0;
+
+err_policer_init:
+ kfree(policer_item);
+ return err;
+}
+
+static void
+devlink_trap_policer_unregister(struct devlink *devlink,
+ const struct devlink_trap_policer *policer)
+{
+ struct devlink_trap_policer_item *policer_item;
+
+ policer_item = devlink_trap_policer_item_lookup(devlink, policer->id);
+ if (WARN_ON_ONCE(!policer_item))
+ return;
+
+ devlink_trap_policer_notify(devlink, policer_item,
+ DEVLINK_CMD_TRAP_POLICER_DEL);
+ list_del(&policer_item->list);
+ if (devlink->ops->trap_policer_fini)
+ devlink->ops->trap_policer_fini(devlink, policer);
+ kfree(policer_item);
+}
+
+/**
+ * devl_trap_policers_register - Register packet trap policers with devlink.
+ * @devlink: devlink.
+ * @policers: Packet trap policers.
+ * @policers_count: Count of provided packet trap policers.
+ *
+ * Return: Non-zero value on failure.
+ */
+int
+devl_trap_policers_register(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count)
+{
+ int i, err;
+
+ devl_assert_locked(devlink);
+ for (i = 0; i < policers_count; i++) {
+ const struct devlink_trap_policer *policer = &policers[i];
+
+ if (WARN_ON(policer->id == 0 ||
+ policer->max_rate < policer->min_rate ||
+ policer->max_burst < policer->min_burst)) {
+ err = -EINVAL;
+ goto err_trap_policer_verify;
+ }
+
+ err = devlink_trap_policer_register(devlink, policer);
+ if (err)
+ goto err_trap_policer_register;
+ }
+ return 0;
+
+err_trap_policer_register:
+err_trap_policer_verify:
+ for (i--; i >= 0; i--)
+ devlink_trap_policer_unregister(devlink, &policers[i]);
+ return err;
+}
+EXPORT_SYMBOL_GPL(devl_trap_policers_register);
+
+/**
+ * devl_trap_policers_unregister - Unregister packet trap policers from devlink.
+ * @devlink: devlink.
+ * @policers: Packet trap policers.
+ * @policers_count: Count of provided packet trap policers.
+ */
+void
+devl_trap_policers_unregister(struct devlink *devlink,
+ const struct devlink_trap_policer *policers,
+ size_t policers_count)
+{
+ int i;
+
+ devl_assert_locked(devlink);
+ for (i = policers_count - 1; i >= 0; i--)
+ devlink_trap_policer_unregister(devlink, &policers[i]);
+}
+EXPORT_SYMBOL_GPL(devl_trap_policers_unregister);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 54ad0f0d5c2d..d1c73660b844 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1007,12 +1007,10 @@ int do_ip_setsockopt(struct sock *sk, int level, int optname,
return 0;
case IP_TRANSPARENT:
if (!!val && !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
- !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
- err = -EPERM;
- break;
- }
+ !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
if (optlen < 1)
- goto e_inval;
+ return -EINVAL;
inet_assign_bit(TRANSPARENT, sk, val);
return 0;
case IP_NODEFRAG:
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 5bc6b1329465..5cec0c251e86 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -470,6 +470,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_local *local = sdata->local;
struct sta_info *sta = NULL;
struct ieee80211_key *key;
+ int err;
lockdep_assert_wiphy(local->hw.wiphy);
@@ -561,7 +562,12 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
break;
}
- return ieee80211_key_link(key, link, sta);
+ err = ieee80211_key_link(key, link, sta);
+ /* KRACK protection, shouldn't happen but just silently accept key */
+ if (err == -EALREADY)
+ err = 0;
+
+ return err;
}
static struct ieee80211_key *
@@ -1836,7 +1842,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
/* VHT can override some HT caps such as the A-MSDU max length */
if (params->vht_capa)
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
- params->vht_capa, link_sta);
+ params->vht_capa, NULL,
+ link_sta);
if (params->he_capa)
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a7736acadf3c..8b1e02f2f9ae 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -1046,7 +1046,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
&chandef);
memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
- &cap_ie,
+ &cap_ie, NULL,
&sta->deflink);
if (memcmp(&cap, &sta->sta.deflink.vht_cap, sizeof(cap)))
rates_updated |= true;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 19429f84afc3..e92eaf835ee0 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -698,7 +698,7 @@ struct ieee80211_if_mesh {
struct timer_list mesh_path_root_timer;
unsigned long wrkq_flags;
- unsigned long mbss_changed;
+ unsigned long mbss_changed[64 / BITS_PER_LONG];
bool userspace_handles_dfs;
@@ -2103,6 +2103,7 @@ void
ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_vht_cap *vht_cap_ie,
+ const struct ieee80211_vht_cap *vht_cap_ie2,
struct link_sta_info *link_sta);
enum ieee80211_sta_rx_bandwidth
ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index ac410f6632b5..e0ff3a753e15 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -799,6 +799,9 @@ static void ieee80211_key_destroy(struct ieee80211_key *key,
void ieee80211_key_free_unused(struct ieee80211_key *key)
{
+ if (!key)
+ return;
+
WARN_ON(key->sdata || key->local);
ieee80211_key_free_common(key);
}
@@ -867,8 +870,10 @@ int ieee80211_key_link(struct ieee80211_key *key,
* the same cipher. Enforce the assumption for pairwise keys.
*/
if ((alt_key && alt_key->conf.cipher != key->conf.cipher) ||
- (old_key && old_key->conf.cipher != key->conf.cipher))
- return -EOPNOTSUPP;
+ (old_key && old_key->conf.cipher != key->conf.cipher)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
} else if (sta) {
struct link_sta_info *link_sta = &sta->deflink;
int link_id = key->conf.link_id;
@@ -893,8 +898,10 @@ int ieee80211_key_link(struct ieee80211_key *key,
/* Non-pairwise keys must also not switch the cipher on rekey */
if (!pairwise) {
- if (old_key && old_key->conf.cipher != key->conf.cipher)
- return -EOPNOTSUPP;
+ if (old_key && old_key->conf.cipher != key->conf.cipher) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
}
/*
@@ -902,8 +909,8 @@ int ieee80211_key_link(struct ieee80211_key *key,
* new version of the key to avoid nonce reuse or replay issues.
*/
if (ieee80211_key_identical(sdata, old_key, key)) {
- ieee80211_key_free_unused(key);
- return 0;
+ ret = -EALREADY;
+ goto out;
}
key->local = sdata->local;
@@ -927,6 +934,10 @@ int ieee80211_key_link(struct ieee80211_key *key,
ieee80211_key_free(key, delay_tailroom);
}
+ key = NULL;
+
+ out:
+ ieee80211_key_free_unused(key);
return ret;
}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 0d0fbae51b61..092a1dc7314d 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1175,7 +1175,7 @@ void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
/* if we race with running work, worst case this work becomes a noop */
for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE)
- set_bit(bit, &ifmsh->mbss_changed);
+ set_bit(bit, ifmsh->mbss_changed);
set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags);
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
}
@@ -1257,7 +1257,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
/* clear any mesh work (for next join) we may have accrued */
ifmsh->wrkq_flags = 0;
- ifmsh->mbss_changed = 0;
+ memset(ifmsh->mbss_changed, 0, sizeof(ifmsh->mbss_changed));
local->fif_other_bss--;
atomic_dec(&local->iff_allmultis);
@@ -1722,9 +1722,9 @@ static void mesh_bss_info_changed(struct ieee80211_sub_if_data *sdata)
u32 bit;
u64 changed = 0;
- for_each_set_bit(bit, &ifmsh->mbss_changed,
+ for_each_set_bit(bit, ifmsh->mbss_changed,
sizeof(changed) * BITS_PER_BYTE) {
- clear_bit(bit, &ifmsh->mbss_changed);
+ clear_bit(bit, ifmsh->mbss_changed);
changed |= BIT(bit);
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index f3d5bb0a59f1..a1e526419e9d 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -451,7 +451,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
changed |= IEEE80211_RC_BW_CHANGED;
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
- elems->vht_cap_elem,
+ elems->vht_cap_elem, NULL,
&sta->deflink);
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems->he_cap,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2ac36ad9fa91..8d2514a9a6c4 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4166,10 +4166,33 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
elems->ht_cap_elem,
link_sta);
- if (elems->vht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT))
+ if (elems->vht_cap_elem &&
+ !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ const struct ieee80211_vht_cap *bss_vht_cap = NULL;
+ const struct cfg80211_bss_ies *ies;
+
+ /*
+ * Cisco AP module 9115 with FW 17.3 has a bug and sends a
+ * too large maximum MPDU length in the association response
+ * (indicating 12k) that it cannot actually process ...
+ * Work around that.
+ */
+ rcu_read_lock();
+ ies = rcu_dereference(cbss->ies);
+ if (ies) {
+ const struct element *elem;
+
+ elem = cfg80211_find_elem(WLAN_EID_VHT_CAPABILITY,
+ ies->data, ies->len);
+ if (elem && elem->datalen >= sizeof(*bss_vht_cap))
+ bss_vht_cap = (const void *)elem->data;
+ }
+
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
elems->vht_cap_elem,
- link_sta);
+ bss_vht_cap, link_sta);
+ rcu_read_unlock();
+ }
if (elems->he_operation && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
elems->he_cap) {
@@ -5078,9 +5101,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
continue;
valid_links |= BIT(link_id);
- if (assoc_data->link[link_id].disabled) {
+ if (assoc_data->link[link_id].disabled)
dormant_links |= BIT(link_id);
- } else if (link_id != assoc_data->assoc_link_id) {
+
+ if (link_id != assoc_data->assoc_link_id) {
err = ieee80211_sta_allocate_link(sta, link_id);
if (err)
goto out_err;
@@ -5095,7 +5119,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link;
struct link_sta_info *link_sta;
- if (!cbss || assoc_data->link[link_id].disabled)
+ if (!cbss)
continue;
link = sdata_dereference(sdata->link[link_id], sdata);
@@ -5397,17 +5421,18 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
struct ieee80211_link_data *link;
- link = sdata_dereference(sdata->link[link_id], sdata);
- if (!link)
- continue;
-
if (!assoc_data->link[link_id].bss)
continue;
resp.links[link_id].bss = assoc_data->link[link_id].bss;
- resp.links[link_id].addr = link->conf->addr;
+ ether_addr_copy(resp.links[link_id].addr,
+ assoc_data->link[link_id].addr);
resp.links[link_id].status = assoc_data->link[link_id].status;
+ link = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link)
+ continue;
+
/* get uapsd queues configuration - same for all links */
resp.uapsd_queues = 0;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index a984fc54644e..ed4fdf655343 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -661,7 +661,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
}
if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
- !ieee80211_is_deauth(hdr->frame_control)))
+ !ieee80211_is_deauth(hdr->frame_control)) &&
+ tx->skb->protocol != tx->sdata->control_port_protocol)
return TX_DROP;
if (!skip_hw && tx->key &&
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index c1250aa47808..b3a5c3e96a72 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -4,7 +4,7 @@
*
* Portions of this file
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2022 Intel Corporation
+ * Copyright (C) 2018 - 2023 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -116,12 +116,14 @@ void
ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_vht_cap *vht_cap_ie,
+ const struct ieee80211_vht_cap *vht_cap_ie2,
struct link_sta_info *link_sta)
{
struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap;
struct ieee80211_sta_vht_cap own_cap;
u32 cap_info, i;
bool have_80mhz;
+ u32 mpdu_len;
memset(vht_cap, 0, sizeof(*vht_cap));
@@ -318,10 +320,20 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
/*
+ * Work around the Cisco 9115 FW 17.3 bug by taking the min of
+ * both reported MPDU lengths.
+ */
+ mpdu_len = vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK;
+ if (vht_cap_ie2)
+ mpdu_len = min_t(u32, mpdu_len,
+ le32_get_bits(vht_cap_ie2->vht_cap_info,
+ IEEE80211_VHT_CAP_MAX_MPDU_MASK));
+
+ /*
* FIXME - should the amsdu len be per link? store per link
* and maintain a minimum?
*/
- switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
+ switch (mpdu_len) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
break;
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index eb8ccbd58df7..96e91ab71573 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -660,6 +660,11 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
goto out_release;
}
+ if (sock->state == SS_CONNECTING) {
+ err = -EALREADY;
+ goto out_release;
+ }
+
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 01fca7a10b4b..08630896b6c8 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -48,6 +48,7 @@ struct rfkill {
bool persistent;
bool polling_paused;
bool suspended;
+ bool need_sync;
const struct rfkill_ops *ops;
void *data;
@@ -368,6 +369,17 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
rfkill_event(rfkill);
}
+static void rfkill_sync(struct rfkill *rfkill)
+{
+ lockdep_assert_held(&rfkill_global_mutex);
+
+ if (!rfkill->need_sync)
+ return;
+
+ rfkill_set_block(rfkill, rfkill_global_states[rfkill->type].cur);
+ rfkill->need_sync = false;
+}
+
static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
{
int i;
@@ -730,6 +742,10 @@ static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_sync(rfkill);
+ mutex_unlock(&rfkill_global_mutex);
+
return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0);
}
@@ -751,6 +767,7 @@ static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&rfkill_global_mutex);
+ rfkill_sync(rfkill);
rfkill_set_block(rfkill, state);
mutex_unlock(&rfkill_global_mutex);
@@ -783,6 +800,10 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_sync(rfkill);
+ mutex_unlock(&rfkill_global_mutex);
+
return sysfs_emit(buf, "%d\n", user_state_from_blocked(rfkill->state));
}
@@ -805,6 +826,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&rfkill_global_mutex);
+ rfkill_sync(rfkill);
rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
mutex_unlock(&rfkill_global_mutex);
@@ -1032,14 +1054,10 @@ static void rfkill_uevent_work(struct work_struct *work)
static void rfkill_sync_work(struct work_struct *work)
{
- struct rfkill *rfkill;
- bool cur;
-
- rfkill = container_of(work, struct rfkill, sync_work);
+ struct rfkill *rfkill = container_of(work, struct rfkill, sync_work);
mutex_lock(&rfkill_global_mutex);
- cur = rfkill_global_states[rfkill->type].cur;
- rfkill_set_block(rfkill, cur);
+ rfkill_sync(rfkill);
mutex_unlock(&rfkill_global_mutex);
}
@@ -1087,6 +1105,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
round_jiffies_relative(POLL_INTERVAL));
if (!rfkill->persistent || rfkill_epo_lock_active) {
+ rfkill->need_sync = true;
schedule_work(&rfkill->sync_work);
} else {
#ifdef CONFIG_RFKILL_INPUT
@@ -1171,6 +1190,7 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
goto free;
+ rfkill_sync(rfkill);
rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
list_add_tail(&ev->list, &data->events);
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 98805303218d..3554085bc2be 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1011,6 +1011,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (parent == NULL)
return -ENOENT;
}
+ if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
+ NL_SET_ERR_MSG(extack, "Invalid parent - parent class must have FSC");
+ return -EINVAL;
+ }
if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
return -EINVAL;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 0b6034fab9ab..f420d8457345 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -472,7 +472,7 @@ rpc_get_inode(struct super_block *sb, umode_t mode)
return NULL;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
switch (mode & S_IFMT) {
case S_IFDIR:
inode->i_fop = &simple_dir_operations;
diff --git a/net/tls/tls.h b/net/tls/tls.h
index 164d6a955e26..28a8c0e80e3c 100644
--- a/net/tls/tls.h
+++ b/net/tls/tls.h
@@ -51,6 +51,59 @@
#define TLS_DEC_STATS(net, field) \
SNMP_DEC_STATS((net)->mib.tls_statistics, field)
+struct tls_cipher_desc {
+ unsigned int nonce;
+ unsigned int iv;
+ unsigned int key;
+ unsigned int salt;
+ unsigned int tag;
+ unsigned int rec_seq;
+ unsigned int iv_offset;
+ unsigned int key_offset;
+ unsigned int salt_offset;
+ unsigned int rec_seq_offset;
+ char *cipher_name;
+ bool offloadable;
+ size_t crypto_info;
+};
+
+#define TLS_CIPHER_MIN TLS_CIPHER_AES_GCM_128
+#define TLS_CIPHER_MAX TLS_CIPHER_ARIA_GCM_256
+extern const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN];
+
+static inline const struct tls_cipher_desc *get_cipher_desc(u16 cipher_type)
+{
+ if (cipher_type < TLS_CIPHER_MIN || cipher_type > TLS_CIPHER_MAX)
+ return NULL;
+
+ return &tls_cipher_desc[cipher_type - TLS_CIPHER_MIN];
+}
+
+static inline char *crypto_info_iv(struct tls_crypto_info *crypto_info,
+ const struct tls_cipher_desc *cipher_desc)
+{
+ return (char *)crypto_info + cipher_desc->iv_offset;
+}
+
+static inline char *crypto_info_key(struct tls_crypto_info *crypto_info,
+ const struct tls_cipher_desc *cipher_desc)
+{
+ return (char *)crypto_info + cipher_desc->key_offset;
+}
+
+static inline char *crypto_info_salt(struct tls_crypto_info *crypto_info,
+ const struct tls_cipher_desc *cipher_desc)
+{
+ return (char *)crypto_info + cipher_desc->salt_offset;
+}
+
+static inline char *crypto_info_rec_seq(struct tls_crypto_info *crypto_info,
+ const struct tls_cipher_desc *cipher_desc)
+{
+ return (char *)crypto_info + cipher_desc->rec_seq_offset;
+}
+
+
/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
* allocated or mapped for each TLS record. After encryption, the records are
* stores in a linked list.
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 2392d06845aa..8c94c926606a 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -884,7 +884,7 @@ static int
tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
{
struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
- const struct tls_cipher_size_desc *cipher_sz;
+ const struct tls_cipher_desc *cipher_desc;
int err, offset, copy, data_len, pos;
struct sk_buff *skb, *skb_iter;
struct scatterlist sg[1];
@@ -898,10 +898,10 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
default:
return -EINVAL;
}
- cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_recv.info.cipher_type];
+ cipher_desc = get_cipher_desc(tls_ctx->crypto_recv.info.cipher_type);
rxm = strp_msg(tls_strp_msg(sw_ctx));
- orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv,
+ orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv,
sk->sk_allocation);
if (!orig_buf)
return -ENOMEM;
@@ -917,8 +917,8 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
sg_init_table(sg, 1);
sg_set_buf(&sg[0], buf,
- rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv);
- err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_sz->iv);
+ rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv);
+ err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_desc->iv);
if (err)
goto free_buf;
@@ -929,7 +929,7 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
else
err = 0;
- data_len = rxm->full_len - cipher_sz->tag;
+ data_len = rxm->full_len - cipher_desc->tag;
if (skb_pagelen(skb) > offset) {
copy = min_t(int, skb_pagelen(skb) - offset, data_len);
@@ -1046,7 +1046,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
- const struct tls_cipher_size_desc *cipher_sz;
+ const struct tls_cipher_desc *cipher_desc;
struct tls_record_info *start_marker_record;
struct tls_offload_context_tx *offload_ctx;
struct tls_crypto_info *crypto_info;
@@ -1079,46 +1079,32 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
goto release_netdev;
}
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
- rec_seq =
- ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
- break;
- case TLS_CIPHER_AES_GCM_256:
- iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
- rec_seq =
- ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
- break;
- default:
+ cipher_desc = get_cipher_desc(crypto_info->cipher_type);
+ if (!cipher_desc || !cipher_desc->offloadable) {
rc = -EINVAL;
goto release_netdev;
}
- cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type];
- /* Sanity-check the rec_seq_size for stack allocations */
- if (cipher_sz->rec_seq > TLS_MAX_REC_SEQ_SIZE) {
- rc = -EINVAL;
- goto release_netdev;
- }
+ iv = crypto_info_iv(crypto_info, cipher_desc);
+ rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
prot->version = crypto_info->version;
prot->cipher_type = crypto_info->cipher_type;
- prot->prepend_size = TLS_HEADER_SIZE + cipher_sz->iv;
- prot->tag_size = cipher_sz->tag;
+ prot->prepend_size = TLS_HEADER_SIZE + cipher_desc->iv;
+ prot->tag_size = cipher_desc->tag;
prot->overhead_size = prot->prepend_size + prot->tag_size;
- prot->iv_size = cipher_sz->iv;
- prot->salt_size = cipher_sz->salt;
- ctx->tx.iv = kmalloc(cipher_sz->iv + cipher_sz->salt, GFP_KERNEL);
+ prot->iv_size = cipher_desc->iv;
+ prot->salt_size = cipher_desc->salt;
+ ctx->tx.iv = kmalloc(cipher_desc->iv + cipher_desc->salt, GFP_KERNEL);
if (!ctx->tx.iv) {
rc = -ENOMEM;
goto release_netdev;
}
- memcpy(ctx->tx.iv + cipher_sz->salt, iv, cipher_sz->iv);
+ memcpy(ctx->tx.iv + cipher_desc->salt, iv, cipher_desc->iv);
- prot->rec_seq_size = cipher_sz->rec_seq;
- ctx->tx.rec_seq = kmemdup(rec_seq, cipher_sz->rec_seq, GFP_KERNEL);
+ prot->rec_seq_size = cipher_desc->rec_seq;
+ ctx->tx.rec_seq = kmemdup(rec_seq, cipher_desc->rec_seq, GFP_KERNEL);
if (!ctx->tx.rec_seq) {
rc = -ENOMEM;
goto free_iv;
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index b28c5e296dfd..1d743f310f4f 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -55,7 +55,7 @@ static int tls_enc_record(struct aead_request *aead_req,
struct tls_prot_info *prot)
{
unsigned char buf[TLS_HEADER_SIZE + MAX_IV_SIZE];
- const struct tls_cipher_size_desc *cipher_sz;
+ const struct tls_cipher_desc *cipher_desc;
struct scatterlist sg_in[3];
struct scatterlist sg_out[3];
unsigned int buf_size;
@@ -69,9 +69,9 @@ static int tls_enc_record(struct aead_request *aead_req,
default:
return -EINVAL;
}
- cipher_sz = &tls_cipher_size_desc[prot->cipher_type];
+ cipher_desc = get_cipher_desc(prot->cipher_type);
- buf_size = TLS_HEADER_SIZE + cipher_sz->iv;
+ buf_size = TLS_HEADER_SIZE + cipher_desc->iv;
len = min_t(int, *in_len, buf_size);
scatterwalk_copychunks(buf, in, len, 0);
@@ -85,11 +85,11 @@ static int tls_enc_record(struct aead_request *aead_req,
scatterwalk_pagedone(out, 1, 1);
len = buf[4] | (buf[3] << 8);
- len -= cipher_sz->iv;
+ len -= cipher_desc->iv;
- tls_make_aad(aad, len - cipher_sz->tag, (char *)&rcd_sn, buf[0], prot);
+ tls_make_aad(aad, len - cipher_desc->tag, (char *)&rcd_sn, buf[0], prot);
- memcpy(iv + cipher_sz->salt, buf + TLS_HEADER_SIZE, cipher_sz->iv);
+ memcpy(iv + cipher_desc->salt, buf + TLS_HEADER_SIZE, cipher_desc->iv);
sg_init_table(sg_in, ARRAY_SIZE(sg_in));
sg_init_table(sg_out, ARRAY_SIZE(sg_out));
@@ -100,7 +100,7 @@ static int tls_enc_record(struct aead_request *aead_req,
*in_len -= len;
if (*in_len < 0) {
- *in_len += cipher_sz->tag;
+ *in_len += cipher_desc->tag;
/* the input buffer doesn't contain the entire record.
* trim len accordingly. The resulting authentication tag
* will contain garbage, but we don't care, so we won't
@@ -121,7 +121,7 @@ static int tls_enc_record(struct aead_request *aead_req,
scatterwalk_pagedone(out, 1, 1);
}
- len -= cipher_sz->tag;
+ len -= cipher_desc->tag;
aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);
rc = crypto_aead_encrypt(aead_req);
@@ -309,14 +309,14 @@ static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
int sync_size,
void *dummy_buf)
{
- const struct tls_cipher_size_desc *cipher_sz =
- &tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type];
+ const struct tls_cipher_desc *cipher_desc =
+ get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
sg_set_buf(&sg_out[0], dummy_buf, sync_size);
sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
/* Add room for authentication tag produced by crypto */
dummy_buf += sync_size;
- sg_set_buf(&sg_out[2], dummy_buf, cipher_sz->tag);
+ sg_set_buf(&sg_out[2], dummy_buf, cipher_desc->tag);
}
static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
@@ -328,7 +328,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
int tcp_payload_offset = skb_tcp_all_headers(skb);
int payload_len = skb->len - tcp_payload_offset;
- const struct tls_cipher_size_desc *cipher_sz;
+ const struct tls_cipher_desc *cipher_desc;
void *buf, *iv, *aad, *dummy_buf, *salt;
struct aead_request *aead_req;
struct sk_buff *nskb = NULL;
@@ -348,16 +348,16 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
default:
goto free_req;
}
- cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type];
- buf_len = cipher_sz->salt + cipher_sz->iv + TLS_AAD_SPACE_SIZE +
- sync_size + cipher_sz->tag;
+ cipher_desc = get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
+ buf_len = cipher_desc->salt + cipher_desc->iv + TLS_AAD_SPACE_SIZE +
+ sync_size + cipher_desc->tag;
buf = kmalloc(buf_len, GFP_ATOMIC);
if (!buf)
goto free_req;
iv = buf;
- memcpy(iv, salt, cipher_sz->salt);
- aad = buf + cipher_sz->salt + cipher_sz->iv;
+ memcpy(iv, salt, cipher_desc->salt);
+ aad = buf + cipher_desc->salt + cipher_desc->iv;
dummy_buf = aad + TLS_AAD_SPACE_SIZE;
nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
@@ -471,12 +471,15 @@ int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info)
{
- const struct tls_cipher_size_desc *cipher_sz;
- const u8 *key;
+ const struct tls_cipher_desc *cipher_desc;
int rc;
+ cipher_desc = get_cipher_desc(crypto_info->cipher_type);
+ if (!cipher_desc || !cipher_desc->offloadable)
+ return -EINVAL;
+
offload_ctx->aead_send =
- crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ crypto_alloc_aead(cipher_desc->cipher_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(offload_ctx->aead_send)) {
rc = PTR_ERR(offload_ctx->aead_send);
pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
@@ -484,24 +487,13 @@ int tls_sw_fallback_init(struct sock *sk,
goto err_out;
}
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key;
- break;
- case TLS_CIPHER_AES_GCM_256:
- key = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->key;
- break;
- default:
- rc = -EINVAL;
- goto free_aead;
- }
- cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type];
-
- rc = crypto_aead_setkey(offload_ctx->aead_send, key, cipher_sz->key);
+ rc = crypto_aead_setkey(offload_ctx->aead_send,
+ crypto_info_key(crypto_info, cipher_desc),
+ cipher_desc->key);
if (rc)
goto free_aead;
- rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_sz->tag);
+ rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_desc->tag);
if (rc)
goto free_aead;
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index f550c84f3408..02f583ff9239 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -58,23 +58,66 @@ enum {
TLS_NUM_PROTS,
};
-#define CIPHER_SIZE_DESC(cipher) [cipher] = { \
+#define CHECK_CIPHER_DESC(cipher,ci) \
+ static_assert(cipher ## _IV_SIZE <= MAX_IV_SIZE); \
+ static_assert(cipher ## _REC_SEQ_SIZE <= TLS_MAX_REC_SEQ_SIZE); \
+ static_assert(cipher ## _TAG_SIZE == TLS_TAG_SIZE); \
+ static_assert(sizeof_field(struct ci, iv) == cipher ## _IV_SIZE); \
+ static_assert(sizeof_field(struct ci, key) == cipher ## _KEY_SIZE); \
+ static_assert(sizeof_field(struct ci, salt) == cipher ## _SALT_SIZE); \
+ static_assert(sizeof_field(struct ci, rec_seq) == cipher ## _REC_SEQ_SIZE);
+
+#define __CIPHER_DESC(ci) \
+ .iv_offset = offsetof(struct ci, iv), \
+ .key_offset = offsetof(struct ci, key), \
+ .salt_offset = offsetof(struct ci, salt), \
+ .rec_seq_offset = offsetof(struct ci, rec_seq), \
+ .crypto_info = sizeof(struct ci)
+
+#define CIPHER_DESC(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \
+ .nonce = cipher ## _IV_SIZE, \
.iv = cipher ## _IV_SIZE, \
.key = cipher ## _KEY_SIZE, \
.salt = cipher ## _SALT_SIZE, \
.tag = cipher ## _TAG_SIZE, \
.rec_seq = cipher ## _REC_SEQ_SIZE, \
+ .cipher_name = algname, \
+ .offloadable = _offloadable, \
+ __CIPHER_DESC(ci), \
}
-const struct tls_cipher_size_desc tls_cipher_size_desc[] = {
- CIPHER_SIZE_DESC(TLS_CIPHER_AES_GCM_128),
- CIPHER_SIZE_DESC(TLS_CIPHER_AES_GCM_256),
- CIPHER_SIZE_DESC(TLS_CIPHER_AES_CCM_128),
- CIPHER_SIZE_DESC(TLS_CIPHER_CHACHA20_POLY1305),
- CIPHER_SIZE_DESC(TLS_CIPHER_SM4_GCM),
- CIPHER_SIZE_DESC(TLS_CIPHER_SM4_CCM),
+#define CIPHER_DESC_NONCE0(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \
+ .nonce = 0, \
+ .iv = cipher ## _IV_SIZE, \
+ .key = cipher ## _KEY_SIZE, \
+ .salt = cipher ## _SALT_SIZE, \
+ .tag = cipher ## _TAG_SIZE, \
+ .rec_seq = cipher ## _REC_SEQ_SIZE, \
+ .cipher_name = algname, \
+ .offloadable = _offloadable, \
+ __CIPHER_DESC(ci), \
+}
+
+const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN] = {
+ CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128, "gcm(aes)", true),
+ CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256, "gcm(aes)", true),
+ CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128, "ccm(aes)", false),
+ CIPHER_DESC_NONCE0(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305, "rfc7539(chacha20,poly1305)", false),
+ CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm, "gcm(sm4)", false),
+ CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm, "ccm(sm4)", false),
+ CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128, "gcm(aria)", false),
+ CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256, "gcm(aria)", false),
};
+CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128);
+CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256);
+CHECK_CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128);
+CHECK_CIPHER_DESC(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305);
+CHECK_CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm);
+CHECK_CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm);
+CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128);
+CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256);
+
static const struct proto *saved_tcpv6_prot;
static DEFINE_MUTEX(tcpv6_prot_mutex);
static const struct proto *saved_tcpv4_prot;
@@ -392,6 +435,7 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
int __user *optlen, int tx)
{
int rc = 0;
+ const struct tls_cipher_desc *cipher_desc;
struct tls_context *ctx = tls_get_ctx(sk);
struct tls_crypto_info *crypto_info;
struct cipher_context *cctx;
@@ -430,172 +474,19 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
goto out;
}
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128: {
- struct tls12_crypto_info_aes_gcm_128 *
- crypto_info_aes_gcm_128 =
- container_of(crypto_info,
- struct tls12_crypto_info_aes_gcm_128,
- info);
-
- if (len != sizeof(*crypto_info_aes_gcm_128)) {
- rc = -EINVAL;
- goto out;
- }
- memcpy(crypto_info_aes_gcm_128->iv,
- cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
- TLS_CIPHER_AES_GCM_128_IV_SIZE);
- memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq,
- TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
- if (copy_to_user(optval,
- crypto_info_aes_gcm_128,
- sizeof(*crypto_info_aes_gcm_128)))
- rc = -EFAULT;
- break;
- }
- case TLS_CIPHER_AES_GCM_256: {
- struct tls12_crypto_info_aes_gcm_256 *
- crypto_info_aes_gcm_256 =
- container_of(crypto_info,
- struct tls12_crypto_info_aes_gcm_256,
- info);
-
- if (len != sizeof(*crypto_info_aes_gcm_256)) {
- rc = -EINVAL;
- goto out;
- }
- memcpy(crypto_info_aes_gcm_256->iv,
- cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
- TLS_CIPHER_AES_GCM_256_IV_SIZE);
- memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq,
- TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
- if (copy_to_user(optval,
- crypto_info_aes_gcm_256,
- sizeof(*crypto_info_aes_gcm_256)))
- rc = -EFAULT;
- break;
- }
- case TLS_CIPHER_AES_CCM_128: {
- struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 =
- container_of(crypto_info,
- struct tls12_crypto_info_aes_ccm_128, info);
-
- if (len != sizeof(*aes_ccm_128)) {
- rc = -EINVAL;
- goto out;
- }
- memcpy(aes_ccm_128->iv,
- cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE,
- TLS_CIPHER_AES_CCM_128_IV_SIZE);
- memcpy(aes_ccm_128->rec_seq, cctx->rec_seq,
- TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
- if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128)))
- rc = -EFAULT;
- break;
- }
- case TLS_CIPHER_CHACHA20_POLY1305: {
- struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 =
- container_of(crypto_info,
- struct tls12_crypto_info_chacha20_poly1305,
- info);
-
- if (len != sizeof(*chacha20_poly1305)) {
- rc = -EINVAL;
- goto out;
- }
- memcpy(chacha20_poly1305->iv,
- cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE,
- TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE);
- memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq,
- TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
- if (copy_to_user(optval, chacha20_poly1305,
- sizeof(*chacha20_poly1305)))
- rc = -EFAULT;
- break;
+ cipher_desc = get_cipher_desc(crypto_info->cipher_type);
+ if (!cipher_desc || len != cipher_desc->crypto_info) {
+ rc = -EINVAL;
+ goto out;
}
- case TLS_CIPHER_SM4_GCM: {
- struct tls12_crypto_info_sm4_gcm *sm4_gcm_info =
- container_of(crypto_info,
- struct tls12_crypto_info_sm4_gcm, info);
- if (len != sizeof(*sm4_gcm_info)) {
- rc = -EINVAL;
- goto out;
- }
- memcpy(sm4_gcm_info->iv,
- cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE,
- TLS_CIPHER_SM4_GCM_IV_SIZE);
- memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq,
- TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE);
- if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info)))
- rc = -EFAULT;
- break;
- }
- case TLS_CIPHER_SM4_CCM: {
- struct tls12_crypto_info_sm4_ccm *sm4_ccm_info =
- container_of(crypto_info,
- struct tls12_crypto_info_sm4_ccm, info);
+ memcpy(crypto_info_iv(crypto_info, cipher_desc),
+ cctx->iv + cipher_desc->salt, cipher_desc->iv);
+ memcpy(crypto_info_rec_seq(crypto_info, cipher_desc),
+ cctx->rec_seq, cipher_desc->rec_seq);
- if (len != sizeof(*sm4_ccm_info)) {
- rc = -EINVAL;
- goto out;
- }
- memcpy(sm4_ccm_info->iv,
- cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE,
- TLS_CIPHER_SM4_CCM_IV_SIZE);
- memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq,
- TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE);
- if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info)))
- rc = -EFAULT;
- break;
- }
- case TLS_CIPHER_ARIA_GCM_128: {
- struct tls12_crypto_info_aria_gcm_128 *
- crypto_info_aria_gcm_128 =
- container_of(crypto_info,
- struct tls12_crypto_info_aria_gcm_128,
- info);
-
- if (len != sizeof(*crypto_info_aria_gcm_128)) {
- rc = -EINVAL;
- goto out;
- }
- memcpy(crypto_info_aria_gcm_128->iv,
- cctx->iv + TLS_CIPHER_ARIA_GCM_128_SALT_SIZE,
- TLS_CIPHER_ARIA_GCM_128_IV_SIZE);
- memcpy(crypto_info_aria_gcm_128->rec_seq, cctx->rec_seq,
- TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE);
- if (copy_to_user(optval,
- crypto_info_aria_gcm_128,
- sizeof(*crypto_info_aria_gcm_128)))
- rc = -EFAULT;
- break;
- }
- case TLS_CIPHER_ARIA_GCM_256: {
- struct tls12_crypto_info_aria_gcm_256 *
- crypto_info_aria_gcm_256 =
- container_of(crypto_info,
- struct tls12_crypto_info_aria_gcm_256,
- info);
-
- if (len != sizeof(*crypto_info_aria_gcm_256)) {
- rc = -EINVAL;
- goto out;
- }
- memcpy(crypto_info_aria_gcm_256->iv,
- cctx->iv + TLS_CIPHER_ARIA_GCM_256_SALT_SIZE,
- TLS_CIPHER_ARIA_GCM_256_IV_SIZE);
- memcpy(crypto_info_aria_gcm_256->rec_seq, cctx->rec_seq,
- TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE);
- if (copy_to_user(optval,
- crypto_info_aria_gcm_256,
- sizeof(*crypto_info_aria_gcm_256)))
- rc = -EFAULT;
- break;
- }
- default:
- rc = -EINVAL;
- }
+ if (copy_to_user(optval, crypto_info, cipher_desc->crypto_info))
+ rc = -EFAULT;
out:
return rc;
@@ -696,7 +587,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
struct tls_crypto_info *crypto_info;
struct tls_crypto_info *alt_crypto_info;
struct tls_context *ctx = tls_get_ctx(sk);
- size_t optsize;
+ const struct tls_cipher_desc *cipher_desc;
int rc = 0;
int conf;
@@ -737,46 +628,23 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
}
}
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- optsize = sizeof(struct tls12_crypto_info_aes_gcm_128);
- break;
- case TLS_CIPHER_AES_GCM_256: {
- optsize = sizeof(struct tls12_crypto_info_aes_gcm_256);
- break;
+ cipher_desc = get_cipher_desc(crypto_info->cipher_type);
+ if (!cipher_desc) {
+ rc = -EINVAL;
+ goto err_crypto_info;
}
- case TLS_CIPHER_AES_CCM_128:
- optsize = sizeof(struct tls12_crypto_info_aes_ccm_128);
- break;
- case TLS_CIPHER_CHACHA20_POLY1305:
- optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305);
- break;
- case TLS_CIPHER_SM4_GCM:
- optsize = sizeof(struct tls12_crypto_info_sm4_gcm);
- break;
- case TLS_CIPHER_SM4_CCM:
- optsize = sizeof(struct tls12_crypto_info_sm4_ccm);
- break;
+
+ switch (crypto_info->cipher_type) {
case TLS_CIPHER_ARIA_GCM_128:
- if (crypto_info->version != TLS_1_2_VERSION) {
- rc = -EINVAL;
- goto err_crypto_info;
- }
- optsize = sizeof(struct tls12_crypto_info_aria_gcm_128);
- break;
case TLS_CIPHER_ARIA_GCM_256:
if (crypto_info->version != TLS_1_2_VERSION) {
rc = -EINVAL;
goto err_crypto_info;
}
- optsize = sizeof(struct tls12_crypto_info_aria_gcm_256);
break;
- default:
- rc = -EINVAL;
- goto err_crypto_info;
}
- if (optlen != optsize) {
+ if (optlen != cipher_desc->crypto_info) {
rc = -EINVAL;
goto err_crypto_info;
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 5c122d7bb784..1ed4a611631f 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2590,10 +2590,10 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
struct tls_sw_context_rx *sw_ctx_rx = NULL;
struct cipher_context *cctx;
struct crypto_aead **aead;
- u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
struct crypto_tfm *tfm;
- char *iv, *rec_seq, *key, *salt, *cipher_name;
- size_t keysize;
+ char *iv, *rec_seq, *key, *salt;
+ const struct tls_cipher_desc *cipher_desc;
+ u16 nonce_size;
int rc = 0;
if (!ctx) {
@@ -2647,148 +2647,19 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
aead = &sw_ctx_rx->aead_recv;
}
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128: {
- struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
-
- gcm_128_info = (void *)crypto_info;
- nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
- tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
- iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
- iv = gcm_128_info->iv;
- rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
- rec_seq = gcm_128_info->rec_seq;
- keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
- key = gcm_128_info->key;
- salt = gcm_128_info->salt;
- salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
- cipher_name = "gcm(aes)";
- break;
- }
- case TLS_CIPHER_AES_GCM_256: {
- struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
-
- gcm_256_info = (void *)crypto_info;
- nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
- tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
- iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
- iv = gcm_256_info->iv;
- rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
- rec_seq = gcm_256_info->rec_seq;
- keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
- key = gcm_256_info->key;
- salt = gcm_256_info->salt;
- salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
- cipher_name = "gcm(aes)";
- break;
- }
- case TLS_CIPHER_AES_CCM_128: {
- struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
-
- ccm_128_info = (void *)crypto_info;
- nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
- tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
- iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
- iv = ccm_128_info->iv;
- rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
- rec_seq = ccm_128_info->rec_seq;
- keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
- key = ccm_128_info->key;
- salt = ccm_128_info->salt;
- salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
- cipher_name = "ccm(aes)";
- break;
- }
- case TLS_CIPHER_CHACHA20_POLY1305: {
- struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
-
- chacha20_poly1305_info = (void *)crypto_info;
- nonce_size = 0;
- tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
- iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
- iv = chacha20_poly1305_info->iv;
- rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
- rec_seq = chacha20_poly1305_info->rec_seq;
- keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
- key = chacha20_poly1305_info->key;
- salt = chacha20_poly1305_info->salt;
- salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
- cipher_name = "rfc7539(chacha20,poly1305)";
- break;
- }
- case TLS_CIPHER_SM4_GCM: {
- struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
-
- sm4_gcm_info = (void *)crypto_info;
- nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
- tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
- iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
- iv = sm4_gcm_info->iv;
- rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
- rec_seq = sm4_gcm_info->rec_seq;
- keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
- key = sm4_gcm_info->key;
- salt = sm4_gcm_info->salt;
- salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
- cipher_name = "gcm(sm4)";
- break;
- }
- case TLS_CIPHER_SM4_CCM: {
- struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
-
- sm4_ccm_info = (void *)crypto_info;
- nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
- tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
- iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
- iv = sm4_ccm_info->iv;
- rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
- rec_seq = sm4_ccm_info->rec_seq;
- keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
- key = sm4_ccm_info->key;
- salt = sm4_ccm_info->salt;
- salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
- cipher_name = "ccm(sm4)";
- break;
- }
- case TLS_CIPHER_ARIA_GCM_128: {
- struct tls12_crypto_info_aria_gcm_128 *aria_gcm_128_info;
-
- aria_gcm_128_info = (void *)crypto_info;
- nonce_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
- tag_size = TLS_CIPHER_ARIA_GCM_128_TAG_SIZE;
- iv_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
- iv = aria_gcm_128_info->iv;
- rec_seq_size = TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE;
- rec_seq = aria_gcm_128_info->rec_seq;
- keysize = TLS_CIPHER_ARIA_GCM_128_KEY_SIZE;
- key = aria_gcm_128_info->key;
- salt = aria_gcm_128_info->salt;
- salt_size = TLS_CIPHER_ARIA_GCM_128_SALT_SIZE;
- cipher_name = "gcm(aria)";
- break;
- }
- case TLS_CIPHER_ARIA_GCM_256: {
- struct tls12_crypto_info_aria_gcm_256 *gcm_256_info;
-
- gcm_256_info = (void *)crypto_info;
- nonce_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
- tag_size = TLS_CIPHER_ARIA_GCM_256_TAG_SIZE;
- iv_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
- iv = gcm_256_info->iv;
- rec_seq_size = TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE;
- rec_seq = gcm_256_info->rec_seq;
- keysize = TLS_CIPHER_ARIA_GCM_256_KEY_SIZE;
- key = gcm_256_info->key;
- salt = gcm_256_info->salt;
- salt_size = TLS_CIPHER_ARIA_GCM_256_SALT_SIZE;
- cipher_name = "gcm(aria)";
- break;
- }
- default:
+ cipher_desc = get_cipher_desc(crypto_info->cipher_type);
+ if (!cipher_desc) {
rc = -EINVAL;
goto free_priv;
}
+ nonce_size = cipher_desc->nonce;
+
+ iv = crypto_info_iv(crypto_info, cipher_desc);
+ key = crypto_info_key(crypto_info, cipher_desc);
+ salt = crypto_info_salt(crypto_info, cipher_desc);
+ rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
+
if (crypto_info->version == TLS_1_3_VERSION) {
nonce_size = 0;
prot->aad_size = TLS_HEADER_SIZE;
@@ -2799,9 +2670,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
}
/* Sanity-check the sizes for stack allocations. */
- if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
- rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE ||
- prot->aad_size > TLS_MAX_AAD_SIZE) {
+ if (nonce_size > MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE) {
rc = -EINVAL;
goto free_priv;
}
@@ -2809,28 +2678,29 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
prot->version = crypto_info->version;
prot->cipher_type = crypto_info->cipher_type;
prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
- prot->tag_size = tag_size;
+ prot->tag_size = cipher_desc->tag;
prot->overhead_size = prot->prepend_size +
prot->tag_size + prot->tail_size;
- prot->iv_size = iv_size;
- prot->salt_size = salt_size;
- cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
+ prot->iv_size = cipher_desc->iv;
+ prot->salt_size = cipher_desc->salt;
+ cctx->iv = kmalloc(cipher_desc->iv + cipher_desc->salt, GFP_KERNEL);
if (!cctx->iv) {
rc = -ENOMEM;
goto free_priv;
}
/* Note: 128 & 256 bit salt are the same size */
- prot->rec_seq_size = rec_seq_size;
- memcpy(cctx->iv, salt, salt_size);
- memcpy(cctx->iv + salt_size, iv, iv_size);
- cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
+ prot->rec_seq_size = cipher_desc->rec_seq;
+ memcpy(cctx->iv, salt, cipher_desc->salt);
+ memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv);
+
+ cctx->rec_seq = kmemdup(rec_seq, cipher_desc->rec_seq, GFP_KERNEL);
if (!cctx->rec_seq) {
rc = -ENOMEM;
goto free_iv;
}
if (!*aead) {
- *aead = crypto_alloc_aead(cipher_name, 0, 0);
+ *aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0);
if (IS_ERR(*aead)) {
rc = PTR_ERR(*aead);
*aead = NULL;
@@ -2840,8 +2710,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
ctx->push_pending_record = tls_sw_push_pending_record;
- rc = crypto_aead_setkey(*aead, key, keysize);
-
+ rc = crypto_aead_setkey(*aead, key, cipher_desc->key);
if (rc)
goto free_aead;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index b0f6ae3ce78f..7df8ffcfa0c4 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1184,16 +1184,11 @@ void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked,
}
EXPORT_SYMBOL(wiphy_rfkill_set_hw_state_reason);
-void cfg80211_cqm_config_free(struct wireless_dev *wdev)
-{
- kfree(wdev->cqm_config);
- wdev->cqm_config = NULL;
-}
-
static void _cfg80211_unregister_wdev(struct wireless_dev *wdev,
bool unregister_netdev)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct cfg80211_cqm_config *cqm_config;
unsigned int link_id;
ASSERT_RTNL();
@@ -1230,7 +1225,10 @@ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev,
kfree_sensitive(wdev->wext.keys);
wdev->wext.keys = NULL;
#endif
- cfg80211_cqm_config_free(wdev);
+ wiphy_work_cancel(wdev->wiphy, &wdev->cqm_rssi_work);
+ /* deleted from the list, so can't be found from nl80211 any more */
+ cqm_config = rcu_access_pointer(wdev->cqm_config);
+ kfree_rcu(cqm_config, rcu_head);
/*
* Ensure that all events have been processed and
@@ -1372,6 +1370,8 @@ void cfg80211_init_wdev(struct wireless_dev *wdev)
wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
#endif
+ wiphy_work_init(&wdev->cqm_rssi_work, cfg80211_cqm_rssi_notify_work);
+
if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
wdev->ps = true;
else
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 866f0a6934e6..79b1c6d17847 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -289,12 +289,17 @@ struct cfg80211_beacon_registration {
};
struct cfg80211_cqm_config {
+ struct rcu_head rcu_head;
u32 rssi_hyst;
s32 last_rssi_event_value;
+ enum nl80211_cqm_rssi_threshold_event last_rssi_event_type;
int n_rssi_thresholds;
s32 rssi_thresholds[] __counted_by(n_rssi_thresholds);
};
+void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy,
+ struct wiphy_work *work);
+
void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev);
/* free object */
@@ -530,8 +535,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
#define CFG80211_DEV_WARN_ON(cond) ({bool __r = (cond); __r; })
#endif
-void cfg80211_cqm_config_free(struct wireless_dev *wdev);
-
void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid);
void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev);
void cfg80211_pmsr_free_wk(struct work_struct *work);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index cc7ae9ea84ea..579fea2f3548 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -52,7 +52,8 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
cr.links[link_id].bssid = data->links[link_id].bss->bssid;
cr.links[link_id].addr = data->links[link_id].addr;
/* need to have local link addresses for MLO connections */
- WARN_ON(cr.ap_mld_addr && !cr.links[link_id].addr);
+ WARN_ON(cr.ap_mld_addr &&
+ !is_valid_ether_addr(cr.links[link_id].addr));
BUG_ON(!cr.links[link_id].bss->channel);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 87b21c0c0f25..2650543dcebe 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5885,6 +5885,21 @@ out:
nlmsg_free(msg);
}
+static int nl80211_validate_ap_phy_operation(struct cfg80211_ap_settings *params)
+{
+ struct ieee80211_channel *channel = params->chandef.chan;
+
+ if ((params->he_cap || params->he_oper) &&
+ (channel->flags & IEEE80211_CHAN_NO_HE))
+ return -EOPNOTSUPP;
+
+ if ((params->eht_cap || params->eht_oper) &&
+ (channel->flags & IEEE80211_CHAN_NO_EHT))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6152,6 +6167,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (err)
goto out;
+ err = nl80211_validate_ap_phy_operation(params);
+ if (err)
+ goto out;
+
if (info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS])
params->flags = nla_get_u32(
info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS]);
@@ -8458,7 +8477,7 @@ static int nl80211_update_mesh_config(struct sk_buff *skb,
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct mesh_config cfg;
+ struct mesh_config cfg = {};
u32 mask;
int err;
@@ -12759,7 +12778,8 @@ static int nl80211_set_cqm_txe(struct genl_info *info,
}
static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
- struct net_device *dev)
+ struct net_device *dev,
+ struct cfg80211_cqm_config *cqm_config)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
s32 last, low, high;
@@ -12768,7 +12788,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
int err;
/* RSSI reporting disabled? */
- if (!wdev->cqm_config)
+ if (!cqm_config)
return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
/*
@@ -12777,7 +12797,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
* connection is established and enough beacons received to calculate
* the average.
*/
- if (!wdev->cqm_config->last_rssi_event_value &&
+ if (!cqm_config->last_rssi_event_value &&
wdev->links[0].client.current_bss &&
rdev->ops->get_station) {
struct station_info sinfo = {};
@@ -12791,30 +12811,30 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
cfg80211_sinfo_release_content(&sinfo);
if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
- wdev->cqm_config->last_rssi_event_value =
+ cqm_config->last_rssi_event_value =
(s8) sinfo.rx_beacon_signal_avg;
}
- last = wdev->cqm_config->last_rssi_event_value;
- hyst = wdev->cqm_config->rssi_hyst;
- n = wdev->cqm_config->n_rssi_thresholds;
+ last = cqm_config->last_rssi_event_value;
+ hyst = cqm_config->rssi_hyst;
+ n = cqm_config->n_rssi_thresholds;
for (i = 0; i < n; i++) {
i = array_index_nospec(i, n);
- if (last < wdev->cqm_config->rssi_thresholds[i])
+ if (last < cqm_config->rssi_thresholds[i])
break;
}
low_index = i - 1;
if (low_index >= 0) {
low_index = array_index_nospec(low_index, n);
- low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
+ low = cqm_config->rssi_thresholds[low_index] - hyst;
} else {
low = S32_MIN;
}
if (i < n) {
i = array_index_nospec(i, n);
- high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
+ high = cqm_config->rssi_thresholds[i] + hyst - 1;
} else {
high = S32_MAX;
}
@@ -12827,10 +12847,11 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
u32 hysteresis)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct cfg80211_cqm_config *cqm_config = NULL, *old;
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
s32 prev = S32_MIN;
- int i;
+ int i, err;
/* Check all values negative and sorted */
for (i = 0; i < n_thresholds; i++) {
@@ -12844,8 +12865,6 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
return -EOPNOTSUPP;
- cfg80211_cqm_config_free(wdev);
-
if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
@@ -12861,9 +12880,9 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
if (n_thresholds == 1 && thresholds[0] == 0) /* Disabling */
n_thresholds = 0;
- if (n_thresholds) {
- struct cfg80211_cqm_config *cqm_config;
+ old = wiphy_dereference(wdev->wiphy, wdev->cqm_config);
+ if (n_thresholds) {
cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds,
n_thresholds),
GFP_KERNEL);
@@ -12876,10 +12895,20 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
flex_array_size(cqm_config, rssi_thresholds,
n_thresholds));
- wdev->cqm_config = cqm_config;
+ rcu_assign_pointer(wdev->cqm_config, cqm_config);
+ } else {
+ RCU_INIT_POINTER(wdev->cqm_config, NULL);
}
- return cfg80211_cqm_rssi_update(rdev, dev);
+ err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
+ if (err) {
+ rcu_assign_pointer(wdev->cqm_config, old);
+ kfree_rcu(cqm_config, rcu_head);
+ } else {
+ kfree_rcu(old, rcu_head);
+ }
+
+ return err;
}
static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
@@ -18949,9 +18978,8 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
enum nl80211_cqm_rssi_threshold_event rssi_event,
s32 rssi_level, gfp_t gfp)
{
- struct sk_buff *msg;
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct cfg80211_cqm_config *cqm_config;
trace_cfg80211_cqm_rssi_notify(dev, rssi_event, rssi_level);
@@ -18959,16 +18987,37 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH))
return;
- if (wdev->cqm_config) {
- wdev->cqm_config->last_rssi_event_value = rssi_level;
+ rcu_read_lock();
+ cqm_config = rcu_dereference(wdev->cqm_config);
+ if (cqm_config) {
+ cqm_config->last_rssi_event_value = rssi_level;
+ cqm_config->last_rssi_event_type = rssi_event;
+ wiphy_work_queue(wdev->wiphy, &wdev->cqm_rssi_work);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
- cfg80211_cqm_rssi_update(rdev, dev);
+void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct wireless_dev *wdev = container_of(work, struct wireless_dev,
+ cqm_rssi_work);
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ enum nl80211_cqm_rssi_threshold_event rssi_event;
+ struct cfg80211_cqm_config *cqm_config;
+ struct sk_buff *msg;
+ s32 rssi_level;
- if (rssi_level == 0)
- rssi_level = wdev->cqm_config->last_rssi_event_value;
- }
+ cqm_config = wiphy_dereference(wdev->wiphy, wdev->cqm_config);
+ if (!wdev->cqm_config)
+ return;
- msg = cfg80211_prepare_cqm(dev, NULL, gfp);
+ cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
+
+ rssi_level = cqm_config->last_rssi_event_value;
+ rssi_event = cqm_config->last_rssi_event_type;
+
+ msg = cfg80211_prepare_cqm(wdev->netdev, NULL, GFP_KERNEL);
if (!msg)
return;
@@ -18980,14 +19029,13 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
rssi_level))
goto nla_put_failure;
- cfg80211_send_cqm(msg, gfp);
+ cfg80211_send_cqm(msg, GFP_KERNEL);
return;
nla_put_failure:
nlmsg_free(msg);
}
-EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
void cfg80211_cqm_txe_notify(struct net_device *dev,
const u8 *peer, u32 num_packets,
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index a5758edf53b8..8d114faf4842 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -908,6 +908,10 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
!cfg80211_find_ssid_match(ap, request))
continue;
+ if (!is_broadcast_ether_addr(request->bssid) &&
+ !ether_addr_equal(request->bssid, ap->bssid))
+ continue;
+
if (!request->n_ssids && ap->multi_bss && !ap->transmitted_bssid)
continue;
diff --git a/rust/.gitignore b/rust/.gitignore
index 21552992b401..d3829ffab80b 100644
--- a/rust/.gitignore
+++ b/rust/.gitignore
@@ -2,6 +2,8 @@
bindings_generated.rs
bindings_helpers_generated.rs
+doctests_kernel_generated.rs
+doctests_kernel_generated_kunit.c
uapi_generated.rs
exports_*_generated.h
doc/
diff --git a/rust/Makefile b/rust/Makefile
index 4124bfa01798..0d4bb06c5cee 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -27,6 +27,12 @@ endif
obj-$(CONFIG_RUST) += exports.o
+always-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated.rs
+always-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated_kunit.c
+
+obj-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated.o
+obj-$(CONFIG_RUST_KERNEL_DOCTESTS) += doctests_kernel_generated_kunit.o
+
# Avoids running `$(RUSTC)` for the sysroot when it may not be available.
ifdef CONFIG_RUST
@@ -39,9 +45,11 @@ ifeq ($(quiet),silent_)
cargo_quiet=-q
rust_test_quiet=-q
rustdoc_test_quiet=--test-args -q
+rustdoc_test_kernel_quiet=>/dev/null
else ifeq ($(quiet),quiet_)
rust_test_quiet=-q
rustdoc_test_quiet=--test-args -q
+rustdoc_test_kernel_quiet=>/dev/null
else
cargo_quiet=--verbose
endif
@@ -157,6 +165,27 @@ quiet_cmd_rustdoc_test = RUSTDOC T $<
-L$(objtree)/$(obj)/test --output $(objtree)/$(obj)/doc \
--crate-name $(subst rusttest-,,$@) $<
+quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
+ cmd_rustdoc_test_kernel = \
+ rm -rf $(objtree)/$(obj)/test/doctests/kernel; \
+ mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \
+ OBJTREE=$(abspath $(objtree)) \
+ $(RUSTDOC) --test $(rust_flags) \
+ @$(objtree)/include/generated/rustc_cfg \
+ -L$(objtree)/$(obj) --extern alloc --extern kernel \
+ --extern build_error --extern macros \
+ --extern bindings --extern uapi \
+ --no-run --crate-name kernel -Zunstable-options \
+ --test-builder $(objtree)/scripts/rustdoc_test_builder \
+ $< $(rustdoc_test_kernel_quiet); \
+ $(objtree)/scripts/rustdoc_test_gen
+
+%/doctests_kernel_generated.rs %/doctests_kernel_generated_kunit.c: \
+ $(src)/kernel/lib.rs $(obj)/kernel.o \
+ $(objtree)/scripts/rustdoc_test_builder \
+ $(objtree)/scripts/rustdoc_test_gen FORCE
+ $(call if_changed,rustdoc_test_kernel)
+
# We cannot use `-Zpanic-abort-tests` because some tests are dynamic,
# so for the moment we skip `-Cpanic=abort`.
quiet_cmd_rustc_test = RUSTC T $<
@@ -300,7 +329,7 @@ quiet_cmd_bindgen = BINDGEN $@
$(BINDGEN) $< $(bindgen_target_flags) \
--use-core --with-derive-default --ctypes-prefix core::ffi --no-layout-tests \
--no-debug '.*' \
- --size_t-is-usize -o $@ -- $(bindgen_c_flags_final) -DMODULE \
+ -o $@ -- $(bindgen_c_flags_final) -DMODULE \
$(bindgen_target_cflags) $(bindgen_target_extra)
$(obj)/bindings/bindings_generated.rs: private bindgen_target_flags = \
@@ -320,8 +349,8 @@ $(obj)/uapi/uapi_generated.rs: $(src)/uapi/uapi_helper.h \
# given it is `libclang`; but for consistency, future Clang changes and/or
# a potential future GCC backend for `bindgen`, we disable it too.
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_flags = \
- --blacklist-type '.*' --whitelist-var '' \
- --whitelist-function 'rust_helper_.*'
+ --blocklist-type '.*' --allowlist-var '' \
+ --allowlist-function 'rust_helper_.*'
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_cflags = \
-I$(objtree)/$(obj) -Wno-missing-prototypes -Wno-missing-declarations
$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_extra = ; \
@@ -373,12 +402,15 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L
$(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@)
rust-analyzer:
- $(Q)$(srctree)/scripts/generate_rust_analyzer.py $(srctree) $(objtree) \
- $(RUST_LIB_SRC) > $(objtree)/rust-project.json
+ $(Q)$(srctree)/scripts/generate_rust_analyzer.py \
+ --cfgs='core=$(core-cfgs)' --cfgs='alloc=$(alloc-cfgs)' \
+ $(abs_srctree) $(abs_objtree) \
+ $(RUST_LIB_SRC) $(KBUILD_EXTMOD) > \
+ $(if $(KBUILD_EXTMOD),$(extmod_prefix),$(objtree))/rust-project.json
redirect-intrinsics = \
- __eqsf2 __gesf2 __lesf2 __nesf2 __unordsf2 \
- __unorddf2 \
+ __addsf3 __eqsf2 __gesf2 __lesf2 __ltsf2 __mulsf3 __nesf2 __unordsf2 \
+ __adddf3 __ledf2 __ltdf2 __muldf3 __unorddf2 \
__muloti4 __multi3 \
__udivmodti4 __udivti3 __umodti3
diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs
index acf22d45e6f2..0b6bf5b6da43 100644
--- a/rust/alloc/alloc.rs
+++ b/rust/alloc/alloc.rs
@@ -16,8 +16,6 @@ use core::ptr::{self, NonNull};
#[doc(inline)]
pub use core::alloc::*;
-use core::marker::Destruct;
-
#[cfg(test)]
mod tests;
@@ -41,6 +39,9 @@ extern "Rust" {
#[rustc_allocator_zeroed]
#[rustc_nounwind]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
+
+ #[cfg(not(bootstrap))]
+ static __rust_no_alloc_shim_is_unstable: u8;
}
/// The global memory allocator.
@@ -94,7 +95,14 @@ pub use std::alloc::Global;
#[must_use = "losing the pointer will leak memory"]
#[inline]
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
- unsafe { __rust_alloc(layout.size(), layout.align()) }
+ unsafe {
+ // Make sure we don't accidentally allow omitting the allocator shim in
+ // stable code until it is actually stabilized.
+ #[cfg(not(bootstrap))]
+ core::ptr::read_volatile(&__rust_no_alloc_shim_is_unstable);
+
+ __rust_alloc(layout.size(), layout.align())
+ }
}
/// Deallocate memory with the global allocator.
@@ -333,16 +341,12 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
#[cfg_attr(not(test), lang = "box_free")]
#[inline]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
// This signature has to be the same as `Box`, otherwise an ICE will happen.
// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
// well.
// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
-pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Destruct>(
- ptr: Unique<T>,
- alloc: A,
-) {
+pub(crate) unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: Unique<T>, alloc: A) {
unsafe {
let size = size_of_val(ptr.as_ref());
let align = min_align_of_val(ptr.as_ref());
diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs
index 14af9860c36c..c8173cea8317 100644
--- a/rust/alloc/boxed.rs
+++ b/rust/alloc/boxed.rs
@@ -152,16 +152,13 @@ use core::any::Any;
use core::async_iter::AsyncIterator;
use core::borrow;
use core::cmp::Ordering;
-use core::convert::{From, TryFrom};
use core::error::Error;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
-#[cfg(not(no_global_oom_handling))]
-use core::iter::FromIterator;
-use core::iter::{FusedIterator, Iterator};
+use core::iter::FusedIterator;
use core::marker::Tuple;
-use core::marker::{Destruct, Unpin, Unsize};
+use core::marker::Unsize;
use core::mem;
use core::ops::{
CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
@@ -218,6 +215,7 @@ impl<T> Box<T> {
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
+ #[rustc_diagnostic_item = "box_new"]
pub fn new(x: T) -> Self {
#[rustc_box]
Box::new(x)
@@ -287,9 +285,7 @@ impl<T> Box<T> {
#[must_use]
#[inline(always)]
pub fn pin(x: T) -> Pin<Box<T>> {
- (#[rustc_box]
- Box::new(x))
- .into()
+ Box::new(x).into()
}
/// Allocates memory on the heap then places `x` into it,
@@ -381,12 +377,11 @@ impl<T, A: Allocator> Box<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline]
- pub const fn new_in(x: T, alloc: A) -> Self
+ pub fn new_in(x: T, alloc: A) -> Self
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let mut boxed = Self::new_uninit_in(alloc);
unsafe {
@@ -411,12 +406,10 @@ impl<T, A: Allocator> Box<T, A> {
/// # Ok::<(), std::alloc::AllocError>(())
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
+ pub fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
where
- T: ~const Destruct,
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let mut boxed = Self::try_new_uninit_in(alloc)?;
unsafe {
@@ -446,13 +439,12 @@ impl<T, A: Allocator> Box<T, A> {
/// assert_eq!(*five, 5)
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[cfg(not(no_global_oom_handling))]
#[must_use]
// #[unstable(feature = "new_uninit", issue = "63291")]
- pub const fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ pub fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
@@ -487,10 +479,9 @@ impl<T, A: Allocator> Box<T, A> {
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ pub fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate(layout)?.cast();
@@ -518,13 +509,12 @@ impl<T, A: Allocator> Box<T, A> {
///
/// [zeroed]: mem::MaybeUninit::zeroed
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[cfg(not(no_global_oom_handling))]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
- pub const fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ pub fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
@@ -559,10 +549,9 @@ impl<T, A: Allocator> Box<T, A> {
/// [zeroed]: mem::MaybeUninit::zeroed
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ pub fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate_zeroed(layout)?.cast();
@@ -578,12 +567,11 @@ impl<T, A: Allocator> Box<T, A> {
/// construct a (pinned) `Box` in a different way than with [`Box::new_in`].
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline(always)]
- pub const fn pin_in(x: T, alloc: A) -> Pin<Self>
+ pub fn pin_in(x: T, alloc: A) -> Pin<Self>
where
- A: 'static + ~const Allocator + ~const Destruct,
+ A: 'static + Allocator,
{
Self::into_pin(Self::new_in(x, alloc))
}
@@ -592,8 +580,7 @@ impl<T, A: Allocator> Box<T, A> {
///
/// This conversion does not allocate on the heap and happens in place.
#[unstable(feature = "box_into_boxed_slice", issue = "71582")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
+ pub fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
let (raw, alloc) = Box::into_raw_with_allocator(boxed);
unsafe { Box::from_raw_in(raw as *mut [T; 1], alloc) }
}
@@ -610,12 +597,8 @@ impl<T, A: Allocator> Box<T, A> {
/// assert_eq!(Box::into_inner(c), 5);
/// ```
#[unstable(feature = "box_into_inner", issue = "80437")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn into_inner(boxed: Self) -> T
- where
- Self: ~const Destruct,
- {
+ pub fn into_inner(boxed: Self) -> T {
*boxed
}
}
@@ -829,9 +812,8 @@ impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
/// assert_eq!(*five, 5)
/// ```
#[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const unsafe fn assume_init(self) -> Box<T, A> {
+ pub unsafe fn assume_init(self) -> Box<T, A> {
let (raw, alloc) = Box::into_raw_with_allocator(self);
unsafe { Box::from_raw_in(raw as *mut T, alloc) }
}
@@ -864,9 +846,8 @@ impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
/// }
/// ```
#[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn write(mut boxed: Self, value: T) -> Box<T, A> {
+ pub fn write(mut boxed: Self, value: T) -> Box<T, A> {
unsafe {
(*boxed).write(value);
boxed.assume_init()
@@ -1110,9 +1091,8 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
///
/// [memory layout]: self#memory-layout
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
+ pub fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
let (leaked, alloc) = Box::into_unique(b);
(leaked.as_ptr(), alloc)
}
@@ -1122,10 +1102,9 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
issue = "none",
reason = "use `Box::leak(b).into()` or `Unique::from(Box::leak(b))` instead"
)]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
#[doc(hidden)]
- pub const fn into_unique(b: Self) -> (Unique<T>, A) {
+ pub fn into_unique(b: Self) -> (Unique<T>, A) {
// Box is recognized as a "unique pointer" by Stacked Borrows, but internally it is a
// raw pointer for the type system. Turning it directly into a raw pointer would not be
// recognized as "releasing" the unique pointer to permit aliased raw accesses,
@@ -1183,9 +1162,8 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
/// assert_eq!(*static_ref, [4, 2, 3]);
/// ```
#[stable(feature = "box_leak", since = "1.26.0")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn leak<'a>(b: Self) -> &'a mut T
+ pub fn leak<'a>(b: Self) -> &'a mut T
where
A: 'a,
{
@@ -1246,16 +1224,16 @@ unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Box<T> {
/// Creates a `Box<T>`, with the `Default` value for T.
+ #[inline]
fn default() -> Self {
- #[rustc_box]
Box::new(T::default())
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
-impl<T> const Default for Box<[T]> {
+impl<T> Default for Box<[T]> {
+ #[inline]
fn default() -> Self {
let ptr: Unique<[T]> = Unique::<[T; 0]>::dangling();
Box(ptr, Global)
@@ -1264,8 +1242,8 @@ impl<T> const Default for Box<[T]> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "default_box_extra", since = "1.17.0")]
-#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
-impl const Default for Box<str> {
+impl Default for Box<str> {
+ #[inline]
fn default() -> Self {
// SAFETY: This is the same as `Unique::cast<U>` but with an unsized `U = str`.
let ptr: Unique<str> = unsafe {
@@ -1461,8 +1439,7 @@ impl<T> From<T> for Box<T> {
}
#[stable(feature = "pin", since = "1.33.0")]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
-impl<T: ?Sized, A: Allocator> const From<Box<T, A>> for Pin<Box<T, A>>
+impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Pin<Box<T, A>>
where
A: 'static,
{
@@ -1482,9 +1459,36 @@ where
}
}
+/// Specialization trait used for `From<&[T]>`.
+#[cfg(not(no_global_oom_handling))]
+trait BoxFromSlice<T> {
+ fn from_slice(slice: &[T]) -> Self;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone> BoxFromSlice<T> for Box<[T]> {
+ #[inline]
+ default fn from_slice(slice: &[T]) -> Self {
+ slice.to_vec().into_boxed_slice()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Copy> BoxFromSlice<T> for Box<[T]> {
+ #[inline]
+ fn from_slice(slice: &[T]) -> Self {
+ let len = slice.len();
+ let buf = RawVec::with_capacity(len);
+ unsafe {
+ ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
+ buf.into_box(slice.len()).assume_init()
+ }
+ }
+}
+
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "box_from_slice", since = "1.17.0")]
-impl<T: Copy> From<&[T]> for Box<[T]> {
+impl<T: Clone> From<&[T]> for Box<[T]> {
/// Converts a `&[T]` into a `Box<[T]>`
///
/// This conversion allocates on the heap
@@ -1498,19 +1502,15 @@ impl<T: Copy> From<&[T]> for Box<[T]> {
///
/// println!("{boxed_slice:?}");
/// ```
+ #[inline]
fn from(slice: &[T]) -> Box<[T]> {
- let len = slice.len();
- let buf = RawVec::with_capacity(len);
- unsafe {
- ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
- buf.into_box(slice.len()).assume_init()
- }
+ <Self as BoxFromSlice<T>>::from_slice(slice)
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "box_from_cow", since = "1.45.0")]
-impl<T: Copy> From<Cow<'_, [T]>> for Box<[T]> {
+impl<T: Clone> From<Cow<'_, [T]>> for Box<[T]> {
/// Converts a `Cow<'_, [T]>` into a `Box<[T]>`
///
/// When `cow` is the `Cow::Borrowed` variant, this
@@ -1620,7 +1620,6 @@ impl<T, const N: usize> From<[T; N]> for Box<[T]> {
/// println!("{boxed:?}");
/// ```
fn from(array: [T; N]) -> Box<[T]> {
- #[rustc_box]
Box::new(array)
}
}
@@ -1899,8 +1898,7 @@ impl<T: ?Sized, A: Allocator> fmt::Pointer for Box<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
-impl<T: ?Sized, A: Allocator> const Deref for Box<T, A> {
+impl<T: ?Sized, A: Allocator> Deref for Box<T, A> {
type Target = T;
fn deref(&self) -> &T {
@@ -1909,8 +1907,7 @@ impl<T: ?Sized, A: Allocator> const Deref for Box<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
-impl<T: ?Sized, A: Allocator> const DerefMut for Box<T, A> {
+impl<T: ?Sized, A: Allocator> DerefMut for Box<T, A> {
fn deref_mut(&mut self) -> &mut T {
&mut **self
}
diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs
index 5f374378b0d4..85e91356ecb3 100644
--- a/rust/alloc/lib.rs
+++ b/rust/alloc/lib.rs
@@ -89,35 +89,37 @@
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
+#![warn(multiple_supertrait_upcastable)]
//
// Library features:
+// tidy-alphabetical-start
+#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
+#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
+#![cfg_attr(test, feature(is_sorted))]
+#![cfg_attr(test, feature(new_uninit))]
#![feature(alloc_layout_extra)]
#![feature(allocator_api)]
#![feature(array_chunks)]
#![feature(array_into_iter_constructors)]
#![feature(array_methods)]
#![feature(array_windows)]
+#![feature(ascii_char)]
#![feature(assert_matches)]
#![feature(async_iterator)]
#![feature(coerce_unsized)]
-#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
+#![feature(const_align_of_val)]
#![feature(const_box)]
-#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
#![cfg_attr(not(no_borrow), feature(const_cow_is_borrowed))]
-#![feature(const_convert)]
-#![feature(const_size_of_val)]
-#![feature(const_align_of_val)]
-#![feature(const_ptr_read)]
-#![feature(const_maybe_uninit_zeroed)]
-#![feature(const_maybe_uninit_write)]
+#![feature(const_eval_select)]
#![feature(const_maybe_uninit_as_mut_ptr)]
+#![feature(const_maybe_uninit_write)]
+#![feature(const_maybe_uninit_zeroed)]
+#![feature(const_pin)]
#![feature(const_refs_to_cell)]
+#![feature(const_size_of_val)]
+#![feature(const_waker)]
#![feature(core_intrinsics)]
#![feature(core_panic)]
-#![feature(const_eval_select)]
-#![feature(const_pin)]
-#![feature(const_waker)]
-#![feature(cstr_from_bytes_until_nul)]
#![feature(dispatch_from_dyn)]
#![feature(error_generic_member_access)]
#![feature(error_in_core)]
@@ -128,7 +130,6 @@
#![feature(hasher_prefixfree_extras)]
#![feature(inline_const)]
#![feature(inplace_iteration)]
-#![cfg_attr(test, feature(is_sorted))]
#![feature(iter_advance_by)]
#![feature(iter_next_chunk)]
#![feature(iter_repeat_n)]
@@ -136,8 +137,6 @@
#![feature(maybe_uninit_slice)]
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_uninit_array_transpose)]
-#![cfg_attr(test, feature(new_uninit))]
-#![feature(nonnull_slice_from_raw_parts)]
#![feature(pattern)]
#![feature(pointer_byte_offsets)]
#![feature(provide_any)]
@@ -153,6 +152,7 @@
#![feature(slice_ptr_get)]
#![feature(slice_ptr_len)]
#![feature(slice_range)]
+#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
#![feature(trusted_len)]
@@ -163,40 +163,42 @@
#![feature(unicode_internals)]
#![feature(unsize)]
#![feature(utf8_chunks)]
-#![feature(std_internals)]
+// tidy-alphabetical-end
//
// Language features:
+// tidy-alphabetical-start
+#![cfg_attr(not(test), feature(generator_trait))]
+#![cfg_attr(test, feature(panic_update_hook))]
+#![cfg_attr(test, feature(test))]
#![feature(allocator_internals)]
#![feature(allow_internal_unstable)]
#![feature(associated_type_bounds)]
+#![feature(c_unwind)]
#![feature(cfg_sanitize)]
-#![feature(const_deref)]
#![feature(const_mut_refs)]
-#![feature(const_ptr_write)]
#![feature(const_precise_live_drops)]
+#![feature(const_ptr_write)]
#![feature(const_trait_impl)]
#![feature(const_try)]
#![feature(dropck_eyepatch)]
#![feature(exclusive_range_pattern)]
#![feature(fundamental)]
-#![cfg_attr(not(test), feature(generator_trait))]
#![feature(hashmap_internals)]
#![feature(lang_items)]
#![feature(min_specialization)]
+#![feature(multiple_supertrait_upcastable)]
#![feature(negative_impls)]
#![feature(never_type)]
+#![feature(pointer_is_aligned)]
#![feature(rustc_allow_const_fn_unstable)]
#![feature(rustc_attrs)]
-#![feature(pointer_is_aligned)]
#![feature(slice_internals)]
#![feature(staged_api)]
#![feature(stmt_expr_attributes)]
-#![cfg_attr(test, feature(test))]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
-#![feature(c_unwind)]
#![feature(with_negative_coherence)]
-#![cfg_attr(test, feature(panic_update_hook))]
+// tidy-alphabetical-end
//
// Rustdoc features:
#![feature(doc_cfg)]
diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs
index 5db87eac53b7..65d5ce15828e 100644
--- a/rust/alloc/raw_vec.rs
+++ b/rust/alloc/raw_vec.rs
@@ -6,7 +6,6 @@ use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
-use core::ops::Drop;
use core::ptr::{self, NonNull, Unique};
use core::slice;
@@ -274,10 +273,15 @@ impl<T, A: Allocator> RawVec<T, A> {
if T::IS_ZST || self.cap == 0 {
None
} else {
- // We have an allocated chunk of memory, so we can bypass runtime
- // checks to get our current layout.
+ // We could use Layout::array here which ensures the absence of isize and usize overflows
+ // and could hypothetically handle differences between stride and size, but this memory
+ // has already been allocated so we know it can't overflow and currently rust does not
+ // support such types. So we can do better by skipping some checks and avoid an unwrap.
+ let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
unsafe {
- let layout = Layout::array::<T>(self.cap).unwrap_unchecked();
+ let align = mem::align_of::<T>();
+ let size = mem::size_of::<T>().unchecked_mul(self.cap);
+ let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
}
@@ -465,11 +469,13 @@ impl<T, A: Allocator> RawVec<T, A> {
assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity");
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
-
+ // See current_memory() why this assert is here
+ let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
let ptr = unsafe {
// `Layout::array` cannot overflow here because it would have
// overflowed earlier when capacity was larger.
- let new_layout = Layout::array::<T>(cap).unwrap_unchecked();
+ let new_size = mem::size_of::<T>().unchecked_mul(cap);
+ let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
diff --git a/rust/alloc/slice.rs b/rust/alloc/slice.rs
index 245e01590df7..6ac463bd3edc 100644
--- a/rust/alloc/slice.rs
+++ b/rust/alloc/slice.rs
@@ -784,6 +784,38 @@ impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> {
}
}
+// Specializable trait for implementing ToOwned::clone_into. This is
+// public in the crate and has the Allocator parameter so that
+// vec::clone_from use it too.
+#[cfg(not(no_global_oom_handling))]
+pub(crate) trait SpecCloneIntoVec<T, A: Allocator> {
+ fn clone_into(&self, target: &mut Vec<T, A>);
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone, A: Allocator> SpecCloneIntoVec<T, A> for [T] {
+ default fn clone_into(&self, target: &mut Vec<T, A>) {
+ // drop anything in target that will not be overwritten
+ target.truncate(self.len());
+
+ // target.len <= self.len due to the truncate above, so the
+ // slices here are always in-bounds.
+ let (init, tail) = self.split_at(target.len());
+
+ // reuse the contained values' allocations/resources.
+ target.clone_from_slice(init);
+ target.extend_from_slice(tail);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Copy, A: Allocator> SpecCloneIntoVec<T, A> for [T] {
+ fn clone_into(&self, target: &mut Vec<T, A>) {
+ target.clear();
+ target.extend_from_slice(self);
+ }
+}
+
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> ToOwned for [T] {
@@ -799,16 +831,7 @@ impl<T: Clone> ToOwned for [T] {
}
fn clone_into(&self, target: &mut Vec<T>) {
- // drop anything in target that will not be overwritten
- target.truncate(self.len());
-
- // target.len <= self.len due to the truncate above, so the
- // slices here are always in-bounds.
- let (init, tail) = self.split_at(target.len());
-
- // reuse the contained values' allocations/resources.
- target.clone_from_slice(init);
- target.extend_from_slice(tail);
+ SpecCloneIntoVec::clone_into(self, target);
}
}
diff --git a/rust/alloc/vec/drain.rs b/rust/alloc/vec/drain.rs
index d503d2f478ce..78177a9e2ad0 100644
--- a/rust/alloc/vec/drain.rs
+++ b/rust/alloc/vec/drain.rs
@@ -18,7 +18,7 @@ use super::Vec;
///
/// ```
/// let mut v = vec![0, 1, 2];
-/// let iter: std::vec::Drain<_> = v.drain(..);
+/// let iter: std::vec::Drain<'_, _> = v.drain(..);
/// ```
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<
@@ -114,9 +114,7 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
let unyielded_ptr = this.iter.as_slice().as_ptr();
// ZSTs have no identity, so we don't need to move them around.
- let needs_move = mem::size_of::<T>() != 0;
-
- if needs_move {
+ if !T::IS_ZST {
let start_ptr = source_vec.as_mut_ptr().add(start);
// memmove back unyielded elements
@@ -199,7 +197,7 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
}
}
- let iter = mem::replace(&mut self.iter, (&mut []).iter());
+ let iter = mem::take(&mut self.iter);
let drop_len = iter.len();
let mut vec = self.vec;
diff --git a/rust/alloc/vec/drain_filter.rs b/rust/alloc/vec/drain_filter.rs
index 4b019220657d..09efff090e42 100644
--- a/rust/alloc/vec/drain_filter.rs
+++ b/rust/alloc/vec/drain_filter.rs
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
use crate::alloc::{Allocator, Global};
-use core::mem::{self, ManuallyDrop};
+use core::mem::{ManuallyDrop, SizedTypeProperties};
use core::ptr;
use core::slice;
@@ -18,7 +18,7 @@ use super::Vec;
/// #![feature(drain_filter)]
///
/// let mut v = vec![0, 1, 2];
-/// let iter: std::vec::DrainFilter<_, _> = v.drain_filter(|x| *x % 2 == 0);
+/// let iter: std::vec::DrainFilter<'_, _, _> = v.drain_filter(|x| *x % 2 == 0);
/// ```
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
#[derive(Debug)]
@@ -98,9 +98,7 @@ where
unsafe {
// ZSTs have no identity, so we don't need to move them around.
- let needs_move = mem::size_of::<T>() != 0;
-
- if needs_move && this.idx < this.old_len && this.del > 0 {
+ if !T::IS_ZST && this.idx < this.old_len && this.del > 0 {
let ptr = this.vec.as_mut_ptr();
let src = ptr.add(this.idx);
let dst = src.sub(this.del);
diff --git a/rust/alloc/vec/into_iter.rs b/rust/alloc/vec/into_iter.rs
index 34a2a70d6ded..aac0ec16aef1 100644
--- a/rust/alloc/vec/into_iter.rs
+++ b/rust/alloc/vec/into_iter.rs
@@ -13,6 +13,7 @@ use core::iter::{
};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
+use core::num::NonZeroUsize;
#[cfg(not(no_global_oom_handling))]
use core::ops::Deref;
use core::ptr::{self, NonNull};
@@ -109,7 +110,7 @@ impl<T, A: Allocator> IntoIter<T, A> {
/// ```
/// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
/// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter());
- /// (&mut into_iter).for_each(core::mem::drop);
+ /// (&mut into_iter).for_each(drop);
/// std::mem::forget(into_iter);
/// ```
///
@@ -215,7 +216,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
if T::IS_ZST {
@@ -229,10 +230,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
unsafe {
ptr::drop_in_place(to_drop);
}
- if step_size < n {
- return Err(step_size);
- }
- Ok(())
+ NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
}
#[inline]
@@ -315,7 +313,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let step_size = self.len().min(n);
if T::IS_ZST {
// SAFETY: same as for advance_by()
@@ -329,10 +327,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
unsafe {
ptr::drop_in_place(to_drop);
}
- if step_size < n {
- return Err(step_size);
- }
- Ok(())
+ NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
}
}
@@ -349,6 +344,24 @@ impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T, A> Default for IntoIter<T, A>
+where
+ A: Allocator + Default,
+{
+ /// Creates an empty `vec::IntoIter`.
+ ///
+ /// ```
+ /// # use std::vec;
+ /// let iter: vec::IntoIter<u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// assert_eq!(iter.as_slice(), &[]);
+ /// ```
+ fn default() -> Self {
+ super::Vec::new_in(Default::default()).into_iter()
+ }
+}
+
#[doc(hidden)]
#[unstable(issue = "none", feature = "std_internals")]
#[rustc_unsafe_specialization_marker]
diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs
index 94995913566b..05c70de0227e 100644
--- a/rust/alloc/vec/mod.rs
+++ b/rust/alloc/vec/mod.rs
@@ -58,13 +58,9 @@
#[cfg(not(no_global_oom_handling))]
use core::cmp;
use core::cmp::Ordering;
-use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::intrinsics::assume;
use core::iter;
-#[cfg(not(no_global_oom_handling))]
-use core::iter::FromIterator;
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ops::{self, Index, IndexMut, Range, RangeBounds};
@@ -381,8 +377,8 @@ mod spec_extend;
/// Currently, `Vec` does not guarantee the order in which elements are dropped.
/// The order has changed in the past and may change again.
///
-/// [`get`]: ../../std/vec/struct.Vec.html#method.get
-/// [`get_mut`]: ../../std/vec/struct.Vec.html#method.get_mut
+/// [`get`]: slice::get
+/// [`get_mut`]: slice::get_mut
/// [`String`]: crate::string::String
/// [`&str`]: type@str
/// [`shrink_to_fit`]: Vec::shrink_to_fit
@@ -708,14 +704,14 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// // The vector contains no items, even though it has capacity for more
/// assert_eq!(vec.len(), 0);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // These are all done without reallocating...
/// for i in 0..10 {
/// vec.push(i);
/// }
/// assert_eq!(vec.len(), 10);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // ...but this may make the vector reallocate
/// vec.push(11);
@@ -766,14 +762,14 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// // The vector contains no items, even though it has capacity for more
/// assert_eq!(vec.len(), 0);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // These are all done without reallocating...
/// for i in 0..10 {
/// vec.push(i);
/// }
/// assert_eq!(vec.len(), 10);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
///
/// // ...but this may make the vector reallocate
/// vec.push(11);
@@ -999,7 +995,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
/// let mut vec: Vec<i32> = Vec::with_capacity(10);
/// vec.push(42);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
@@ -1150,7 +1146,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
/// vec.shrink_to_fit();
/// assert!(vec.capacity() >= 3);
/// ```
@@ -1177,7 +1173,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
/// vec.shrink_to(4);
/// assert!(vec.capacity() >= 4);
/// vec.shrink_to(0);
@@ -1212,7 +1208,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
///
- /// assert_eq!(vec.capacity(), 10);
+ /// assert!(vec.capacity() >= 10);
/// let slice = vec.into_boxed_slice();
/// assert_eq!(slice.into_vec().capacity(), 3);
/// ```
@@ -1358,11 +1354,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn as_ptr(&self) -> *const T {
// We shadow the slice method of the same name to avoid going through
// `deref`, which creates an intermediate reference.
- let ptr = self.buf.ptr();
- unsafe {
- assume(!ptr.is_null());
- }
- ptr
+ self.buf.ptr()
}
/// Returns an unsafe mutable pointer to the vector's buffer, or a dangling
@@ -1395,11 +1387,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn as_mut_ptr(&mut self) -> *mut T {
// We shadow the slice method of the same name to avoid going through
// `deref_mut`, which creates an intermediate reference.
- let ptr = self.buf.ptr();
- unsafe {
- assume(!ptr.is_null());
- }
- ptr
+ self.buf.ptr()
}
/// Returns a reference to the underlying allocator.
@@ -2892,35 +2880,6 @@ impl<T, A: Allocator> ops::DerefMut for Vec<T, A> {
}
#[cfg(not(no_global_oom_handling))]
-trait SpecCloneFrom {
- fn clone_from(this: &mut Self, other: &Self);
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Clone, A: Allocator> SpecCloneFrom for Vec<T, A> {
- default fn clone_from(this: &mut Self, other: &Self) {
- // drop anything that will not be overwritten
- this.truncate(other.len());
-
- // self.len <= other.len due to the truncate above, so the
- // slices here are always in-bounds.
- let (init, tail) = other.split_at(this.len());
-
- // reuse the contained values' allocations/resources.
- this.clone_from_slice(init);
- this.extend_from_slice(tail);
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
-impl<T: Copy, A: Allocator> SpecCloneFrom for Vec<T, A> {
- fn clone_from(this: &mut Self, other: &Self) {
- this.clear();
- this.extend_from_slice(other);
- }
-}
-
-#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
#[cfg(not(test))]
@@ -2940,7 +2899,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
}
fn clone_from(&mut self, other: &Self) {
- SpecCloneFrom::clone_from(self, other)
+ crate::slice::SpecCloneIntoVec::clone_into(other.as_slice(), self);
}
}
@@ -2948,7 +2907,6 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
/// as required by the `core::borrow::Borrow` implementation.
///
/// ```
-/// #![feature(build_hasher_simple_hash_one)]
/// use std::hash::BuildHasher;
///
/// let b = std::collections::hash_map::RandomState::new();
@@ -3330,7 +3288,7 @@ impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
}
}
-/// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+/// Implements comparison of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
#[inline]
@@ -3342,7 +3300,7 @@ impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq, A: Allocator> Eq for Vec<T, A> {}
-/// Implements ordering of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+/// Implements ordering of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord, A: Allocator> Ord for Vec<T, A> {
#[inline]
@@ -3365,8 +3323,7 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
-impl<T> const Default for Vec<T> {
+impl<T> Default for Vec<T> {
/// Creates an empty `Vec<T>`.
///
/// The vector will not allocate until elements are pushed onto it.
@@ -3462,10 +3419,7 @@ impl<T, const N: usize> From<[T; N]> for Vec<T> {
/// ```
#[cfg(not(test))]
fn from(s: [T; N]) -> Vec<T> {
- <[T]>::into_vec(
- #[rustc_box]
- Box::new(s),
- )
+ <[T]>::into_vec(Box::new(s))
}
#[cfg(test)]
@@ -3490,8 +3444,8 @@ where
///
/// ```
/// # use std::borrow::Cow;
- /// let o: Cow<[i32]> = Cow::Owned(vec![1, 2, 3]);
- /// let b: Cow<[i32]> = Cow::Borrowed(&[1, 2, 3]);
+ /// let o: Cow<'_, [i32]> = Cow::Owned(vec![1, 2, 3]);
+ /// let b: Cow<'_, [i32]> = Cow::Borrowed(&[1, 2, 3]);
/// assert_eq!(Vec::from(o), Vec::from(b));
/// ```
fn from(s: Cow<'a, [T]>) -> Vec<T> {
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 058954961bfc..c91a3c24f607 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -6,6 +6,7 @@
* Sorted alphabetically.
*/
+#include <kunit/test.h>
#include <linux/errname.h>
#include <linux/slab.h>
#include <linux/refcount.h>
diff --git a/rust/compiler_builtins.rs b/rust/compiler_builtins.rs
index 43378357ece9..fb8ac3f211de 100644
--- a/rust/compiler_builtins.rs
+++ b/rust/compiler_builtins.rs
@@ -37,14 +37,21 @@ macro_rules! define_panicking_intrinsics(
);
define_panicking_intrinsics!("`f32` should not be used", {
+ __addsf3,
__eqsf2,
__gesf2,
__lesf2,
+ __ltsf2,
+ __mulsf3,
__nesf2,
__unordsf2,
});
define_panicking_intrinsics!("`f64` should not be used", {
+ __adddf3,
+ __ledf2,
+ __ltdf2,
+ __muldf3,
__unorddf2,
});
diff --git a/rust/helpers.c b/rust/helpers.c
index bb594da56137..4c86fe4a7e05 100644
--- a/rust/helpers.c
+++ b/rust/helpers.c
@@ -16,16 +16,19 @@
*
* All symbols are exported as GPL-only to guarantee no GPL-only feature is
* accidentally exposed.
+ *
+ * Sorted alphabetically.
*/
+#include <kunit/test-bug.h>
#include <linux/bug.h>
#include <linux/build_bug.h>
#include <linux/err.h>
#include <linux/errname.h>
-#include <linux/refcount.h>
#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/refcount.h>
#include <linux/sched/signal.h>
+#include <linux/spinlock.h>
#include <linux/wait.h>
__noreturn void rust_helper_BUG(void)
@@ -135,20 +138,25 @@ void rust_helper_put_task_struct(struct task_struct *t)
}
EXPORT_SYMBOL_GPL(rust_helper_put_task_struct);
+struct kunit *rust_helper_kunit_get_current_test(void)
+{
+ return kunit_get_current_test();
+}
+EXPORT_SYMBOL_GPL(rust_helper_kunit_get_current_test);
+
/*
- * We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type
- * as the Rust `usize` type, so we can use it in contexts where Rust
- * expects a `usize` like slice (array) indices. `usize` is defined to be
- * the same as C's `uintptr_t` type (can hold any pointer) but not
- * necessarily the same as `size_t` (can hold the size of any single
- * object). Most modern platforms use the same concrete integer type for
+ * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
+ * use it in contexts where Rust expects a `usize` like slice (array) indices.
+ * `usize` is defined to be the same as C's `uintptr_t` type (can hold any
+ * pointer) but not necessarily the same as `size_t` (can hold the size of any
+ * single object). Most modern platforms use the same concrete integer type for
* both of them, but in case we find ourselves on a platform where
* that's not true, fail early instead of risking ABI or
* integer-overflow issues.
*
* If your platform fails this assertion, it means that you are in
- * danger of integer-overflow bugs (even if you attempt to remove
- * `--size_t-is-usize`). It may be easiest to change the kernel ABI on
+ * danger of integer-overflow bugs (even if you attempt to add
+ * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on
* your platform such that `size_t` matches `uintptr_t` (i.e., to increase
* `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
*/
diff --git a/rust/kernel/allocator.rs b/rust/kernel/allocator.rs
index 9363b527be66..a8f3d5be1af1 100644
--- a/rust/kernel/allocator.rs
+++ b/rust/kernel/allocator.rs
@@ -41,9 +41,9 @@ unsafe fn krealloc_aligned(ptr: *mut u8, new_layout: Layout, flags: bindings::gf
unsafe impl GlobalAlloc for KernelAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- // `krealloc()` is used instead of `kmalloc()` because the latter is
- // an inline function and cannot be bound to as a result.
- unsafe { bindings::krealloc(ptr::null(), layout.size(), bindings::GFP_KERNEL) as *mut u8 }
+ // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety
+ // requirement.
+ unsafe { krealloc_aligned(ptr::null_mut(), layout, bindings::GFP_KERNEL) }
}
unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
@@ -51,58 +51,38 @@ unsafe impl GlobalAlloc for KernelAllocator {
bindings::kfree(ptr as *const core::ffi::c_void);
}
}
-}
-
-#[global_allocator]
-static ALLOCATOR: KernelAllocator = KernelAllocator;
-
-// `rustc` only generates these for some crate types. Even then, we would need
-// to extract the object file that has them from the archive. For the moment,
-// let's generate them ourselves instead.
-//
-// Note: Although these are *safe* functions, they are called by the compiler
-// with parameters that obey the same `GlobalAlloc` function safety
-// requirements: size and align should form a valid layout, and size is
-// greater than 0.
-//
-// Note that `#[no_mangle]` implies exported too, nowadays.
-#[no_mangle]
-fn __rust_alloc(size: usize, align: usize) -> *mut u8 {
- // SAFETY: See assumption above.
- let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
- // SAFETY: `ptr::null_mut()` is null, per assumption above the size of `layout` is greater
- // than 0.
- unsafe { krealloc_aligned(ptr::null_mut(), layout, bindings::GFP_KERNEL) }
-}
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ // SAFETY:
+ // - `new_size`, when rounded up to the nearest multiple of `layout.align()`, will not
+ // overflow `isize` by the function safety requirement.
+ // - `layout.align()` is a proper alignment (i.e. not zero and must be a power of two).
+ let layout = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
+
+ // SAFETY:
+ // - `ptr` is either null or a pointer allocated by this allocator by the function safety
+ // requirement.
+ // - the size of `layout` is not zero because `new_size` is not zero by the function safety
+ // requirement.
+ unsafe { krealloc_aligned(ptr, layout, bindings::GFP_KERNEL) }
+ }
-#[no_mangle]
-fn __rust_dealloc(ptr: *mut u8, _size: usize, _align: usize) {
- unsafe { bindings::kfree(ptr as *const core::ffi::c_void) };
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ // SAFETY: `ptr::null_mut()` is null and `layout` has a non-zero size by the function safety
+ // requirement.
+ unsafe {
+ krealloc_aligned(
+ ptr::null_mut(),
+ layout,
+ bindings::GFP_KERNEL | bindings::__GFP_ZERO,
+ )
+ }
+ }
}
-#[no_mangle]
-fn __rust_realloc(ptr: *mut u8, _old_size: usize, align: usize, new_size: usize) -> *mut u8 {
- // SAFETY: See assumption above.
- let new_layout = unsafe { Layout::from_size_align_unchecked(new_size, align) };
-
- // SAFETY: Per assumption above, `ptr` is allocated by `__rust_*` before, and the size of
- // `new_layout` is greater than 0.
- unsafe { krealloc_aligned(ptr, new_layout, bindings::GFP_KERNEL) }
-}
+#[global_allocator]
+static ALLOCATOR: KernelAllocator = KernelAllocator;
+// See <https://github.com/rust-lang/rust/pull/86844>.
#[no_mangle]
-fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8 {
- // SAFETY: See assumption above.
- let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
-
- // SAFETY: `ptr::null_mut()` is null, per assumption above the size of `layout` is greater
- // than 0.
- unsafe {
- krealloc_aligned(
- ptr::null_mut(),
- layout,
- bindings::GFP_KERNEL | bindings::__GFP_ZERO,
- )
- }
-}
+static __rust_no_alloc_shim_is_unstable: u8 = 0;
diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs
index b4332a4ec1f4..4ebb6f23fc2e 100644
--- a/rust/kernel/init.rs
+++ b/rust/kernel/init.rs
@@ -120,14 +120,24 @@
//! `slot` gets called.
//!
//! ```rust
-//! use kernel::{prelude::*, init};
+//! # #![allow(unreachable_pub, clippy::disallowed_names)]
+//! use kernel::{prelude::*, init, types::Opaque};
//! use core::{ptr::addr_of_mut, marker::PhantomPinned, pin::Pin};
//! # mod bindings {
+//! # #![allow(non_camel_case_types)]
//! # pub struct foo;
//! # pub unsafe fn init_foo(_ptr: *mut foo) {}
//! # pub unsafe fn destroy_foo(_ptr: *mut foo) {}
//! # pub unsafe fn enable_foo(_ptr: *mut foo, _flags: u32) -> i32 { 0 }
//! # }
+//! # // `Error::from_errno` is `pub(crate)` in the `kernel` crate, thus provide a workaround.
+//! # trait FromErrno {
+//! # fn from_errno(errno: core::ffi::c_int) -> Error {
+//! # // Dummy error that can be constructed outside the `kernel` crate.
+//! # Error::from(core::fmt::Error)
+//! # }
+//! # }
+//! # impl FromErrno for Error {}
//! /// # Invariants
//! ///
//! /// `foo` is always initialized
@@ -158,7 +168,7 @@
//! if err != 0 {
//! // Enabling has failed, first clean up the foo and then return the error.
//! bindings::destroy_foo(Opaque::raw_get(foo));
-//! return Err(Error::from_kernel_errno(err));
+//! return Err(Error::from_errno(err));
//! }
//!
//! // All fields of `RawFoo` have been initialized, since `_p` is a ZST.
@@ -202,11 +212,12 @@
use crate::{
error::{self, Error},
sync::UniqueArc,
+ types::{Opaque, ScopeGuard},
};
use alloc::boxed::Box;
use core::{
alloc::AllocError,
- cell::Cell,
+ cell::UnsafeCell,
convert::Infallible,
marker::PhantomData,
mem::MaybeUninit,
@@ -226,8 +237,7 @@ pub mod macros;
///
/// ```rust
/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
-/// # use kernel::{init, pin_init, stack_pin_init, init::*, sync::Mutex, new_mutex};
-/// # use macros::pin_data;
+/// # use kernel::{init, macros::pin_data, pin_init, stack_pin_init, init::*, sync::Mutex, new_mutex};
/// # use core::pin::Pin;
/// #[pin_data]
/// struct Foo {
@@ -277,7 +287,7 @@ macro_rules! stack_pin_init {
///
/// # Examples
///
-/// ```rust
+/// ```rust,ignore
/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
/// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};
/// # use macros::pin_data;
@@ -303,7 +313,7 @@ macro_rules! stack_pin_init {
/// pr_info!("a: {}", &*foo.a.lock());
/// ```
///
-/// ```rust
+/// ```rust,ignore
/// # #![allow(clippy::disallowed_names, clippy::new_ret_no_self)]
/// # use kernel::{init, pin_init, stack_try_pin_init, init::*, sync::Mutex, new_mutex};
/// # use macros::pin_data;
@@ -509,14 +519,17 @@ macro_rules! stack_try_pin_init {
/// - Fields that you want to initialize in-place have to use `<-` instead of `:`.
/// - In front of the initializer you can write `&this in` to have access to a [`NonNull<Self>`]
/// pointer named `this` inside of the initializer.
+/// - Using struct update syntax one can place `..Zeroable::zeroed()` at the very end of the
+/// struct, this initializes every field with 0 and then runs all initializers specified in the
+/// body. This can only be done if [`Zeroable`] is implemented for the struct.
///
/// For instance:
///
/// ```rust
-/// # use kernel::pin_init;
-/// # use macros::pin_data;
+/// # use kernel::{macros::{Zeroable, pin_data}, pin_init};
/// # use core::{ptr::addr_of_mut, marker::PhantomPinned};
/// #[pin_data]
+/// #[derive(Zeroable)]
/// struct Buf {
/// // `ptr` points into `buf`.
/// ptr: *mut u8,
@@ -529,6 +542,10 @@ macro_rules! stack_try_pin_init {
/// ptr: unsafe { addr_of_mut!((*this.as_ptr()).buf).cast() },
/// pin: PhantomPinned,
/// });
+/// pin_init!(Buf {
+/// buf: [1; 64],
+/// ..Zeroable::zeroed()
+/// });
/// ```
///
/// [`try_pin_init!`]: kernel::try_pin_init
@@ -540,11 +557,15 @@ macro_rules! pin_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::try_pin_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)?),
@fields($($fields)*),
@error(::core::convert::Infallible),
+ @data(PinData, use_data),
+ @has_data(HasPinData, __pin_data),
+ @construct_closure(pin_init_from_closure),
+ @munch_fields($($fields)*),
)
};
}
@@ -593,205 +614,31 @@ macro_rules! try_pin_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::try_pin_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)? ),
@fields($($fields)*),
@error($crate::error::Error),
+ @data(PinData, use_data),
+ @has_data(HasPinData, __pin_data),
+ @construct_closure(pin_init_from_closure),
+ @munch_fields($($fields)*),
)
};
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}? $err:ty) => {
- $crate::try_pin_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)? ),
@fields($($fields)*),
@error($err),
+ @data(PinData, use_data),
+ @has_data(HasPinData, __pin_data),
+ @construct_closure(pin_init_from_closure),
+ @munch_fields($($fields)*),
)
};
- (
- @this($($this:ident)?),
- @typ($t:ident $(::<$($generics:ty),*>)?),
- @fields($($fields:tt)*),
- @error($err:ty),
- ) => {{
- // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
- // type and shadow it later when we insert the arbitrary user code. That way there will be
- // no possibility of returning without `unsafe`.
- struct __InitOk;
- // Get the pin data from the supplied type.
- let data = unsafe {
- use $crate::init::__internal::HasPinData;
- $t$(::<$($generics),*>)?::__pin_data()
- };
- // Ensure that `data` really is of type `PinData` and help with type inference:
- let init = $crate::init::__internal::PinData::make_closure::<_, __InitOk, $err>(
- data,
- move |slot| {
- {
- // Shadow the structure so it cannot be used to return early.
- struct __InitOk;
- // Create the `this` so it can be referenced by the user inside of the
- // expressions creating the individual fields.
- $(let $this = unsafe { ::core::ptr::NonNull::new_unchecked(slot) };)?
- // Initialize every field.
- $crate::try_pin_init!(init_slot:
- @data(data),
- @slot(slot),
- @munch_fields($($fields)*,),
- );
- // We use unreachable code to ensure that all fields have been mentioned exactly
- // once, this struct initializer will still be type-checked and complain with a
- // very natural error message if a field is forgotten/mentioned more than once.
- #[allow(unreachable_code, clippy::diverging_sub_expression)]
- if false {
- $crate::try_pin_init!(make_initializer:
- @slot(slot),
- @type_name($t),
- @munch_fields($($fields)*,),
- @acc(),
- );
- }
- // Forget all guards, since initialization was a success.
- $crate::try_pin_init!(forget_guards:
- @munch_fields($($fields)*,),
- );
- }
- Ok(__InitOk)
- }
- );
- let init = move |slot| -> ::core::result::Result<(), $err> {
- init(slot).map(|__InitOk| ())
- };
- let init = unsafe { $crate::init::pin_init_from_closure::<_, $err>(init) };
- init
- }};
- (init_slot:
- @data($data:ident),
- @slot($slot:ident),
- @munch_fields($(,)?),
- ) => {
- // Endpoint of munching, no fields are left.
- };
- (init_slot:
- @data($data:ident),
- @slot($slot:ident),
- // In-place initialization syntax.
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- ) => {
- let $field = $val;
- // Call the initializer.
- //
- // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
- // return when an error/panic occurs.
- // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
- unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), $field)? };
- // Create the drop guard.
- //
- // We only give access to `&DropGuard`, so it cannot be forgotten via safe code.
- //
- // SAFETY: We forget the guard later when initialization has succeeded.
- let $field = &unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
- };
-
- $crate::try_pin_init!(init_slot:
- @data($data),
- @slot($slot),
- @munch_fields($($rest)*),
- );
- };
- (init_slot:
- @data($data:ident),
- @slot($slot:ident),
- // Direct value init, this is safe for every field.
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- ) => {
- $(let $field = $val;)?
- // Initialize the field.
- //
- // SAFETY: The memory at `slot` is uninitialized.
- unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
- // Create the drop guard:
- //
- // We only give access to `&DropGuard`, so it cannot be accidentally forgotten.
- //
- // SAFETY: We forget the guard later when initialization has succeeded.
- let $field = &unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
- };
-
- $crate::try_pin_init!(init_slot:
- @data($data),
- @slot($slot),
- @munch_fields($($rest)*),
- );
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($(,)?),
- @acc($($acc:tt)*),
- ) => {
- // Endpoint, nothing more to munch, create the initializer.
- // Since we are in the `if false` branch, this will never get executed. We abuse `slot` to
- // get the correct type inference here:
- unsafe {
- ::core::ptr::write($slot, $t {
- $($acc)*
- });
- }
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- @acc($($acc:tt)*),
- ) => {
- $crate::try_pin_init!(make_initializer:
- @slot($slot),
- @type_name($t),
- @munch_fields($($rest)*),
- @acc($($acc)* $field: ::core::panic!(),),
- );
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- @acc($($acc:tt)*),
- ) => {
- $crate::try_pin_init!(make_initializer:
- @slot($slot),
- @type_name($t),
- @munch_fields($($rest)*),
- @acc($($acc)* $field: ::core::panic!(),),
- );
- };
- (forget_guards:
- @munch_fields($(,)?),
- ) => {
- // Munching finished.
- };
- (forget_guards:
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- ) => {
- unsafe { $crate::init::__internal::DropGuard::forget($field) };
-
- $crate::try_pin_init!(forget_guards:
- @munch_fields($($rest)*),
- );
- };
- (forget_guards:
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- ) => {
- unsafe { $crate::init::__internal::DropGuard::forget($field) };
-
- $crate::try_pin_init!(forget_guards:
- @munch_fields($($rest)*),
- );
- };
}
/// Construct an in-place initializer for `struct`s.
@@ -816,11 +663,15 @@ macro_rules! init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::try_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)?),
@fields($($fields)*),
@error(::core::convert::Infallible),
+ @data(InitData, /*no use_data*/),
+ @has_data(HasInitData, __init_data),
+ @construct_closure(init_from_closure),
+ @munch_fields($($fields)*),
)
}
}
@@ -841,7 +692,7 @@ macro_rules! init {
/// # Examples
///
/// ```rust
-/// use kernel::{init::PinInit, error::Error, InPlaceInit};
+/// use kernel::{init::{PinInit, zeroed}, error::Error};
/// struct BigBuf {
/// big: Box<[u8; 1024 * 1024 * 1024]>,
/// small: [u8; 1024 * 1024],
@@ -863,199 +714,31 @@ macro_rules! try_init {
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}) => {
- $crate::try_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)?),
@fields($($fields)*),
@error($crate::error::Error),
+ @data(InitData, /*no use_data*/),
+ @has_data(HasInitData, __init_data),
+ @construct_closure(init_from_closure),
+ @munch_fields($($fields)*),
)
};
($(&$this:ident in)? $t:ident $(::<$($generics:ty),* $(,)?>)? {
$($fields:tt)*
}? $err:ty) => {
- $crate::try_init!(
+ $crate::__init_internal!(
@this($($this)?),
@typ($t $(::<$($generics),*>)?),
@fields($($fields)*),
@error($err),
+ @data(InitData, /*no use_data*/),
+ @has_data(HasInitData, __init_data),
+ @construct_closure(init_from_closure),
+ @munch_fields($($fields)*),
)
};
- (
- @this($($this:ident)?),
- @typ($t:ident $(::<$($generics:ty),*>)?),
- @fields($($fields:tt)*),
- @error($err:ty),
- ) => {{
- // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
- // type and shadow it later when we insert the arbitrary user code. That way there will be
- // no possibility of returning without `unsafe`.
- struct __InitOk;
- // Get the init data from the supplied type.
- let data = unsafe {
- use $crate::init::__internal::HasInitData;
- $t$(::<$($generics),*>)?::__init_data()
- };
- // Ensure that `data` really is of type `InitData` and help with type inference:
- let init = $crate::init::__internal::InitData::make_closure::<_, __InitOk, $err>(
- data,
- move |slot| {
- {
- // Shadow the structure so it cannot be used to return early.
- struct __InitOk;
- // Create the `this` so it can be referenced by the user inside of the
- // expressions creating the individual fields.
- $(let $this = unsafe { ::core::ptr::NonNull::new_unchecked(slot) };)?
- // Initialize every field.
- $crate::try_init!(init_slot:
- @slot(slot),
- @munch_fields($($fields)*,),
- );
- // We use unreachable code to ensure that all fields have been mentioned exactly
- // once, this struct initializer will still be type-checked and complain with a
- // very natural error message if a field is forgotten/mentioned more than once.
- #[allow(unreachable_code, clippy::diverging_sub_expression)]
- if false {
- $crate::try_init!(make_initializer:
- @slot(slot),
- @type_name($t),
- @munch_fields($($fields)*,),
- @acc(),
- );
- }
- // Forget all guards, since initialization was a success.
- $crate::try_init!(forget_guards:
- @munch_fields($($fields)*,),
- );
- }
- Ok(__InitOk)
- }
- );
- let init = move |slot| -> ::core::result::Result<(), $err> {
- init(slot).map(|__InitOk| ())
- };
- let init = unsafe { $crate::init::init_from_closure::<_, $err>(init) };
- init
- }};
- (init_slot:
- @slot($slot:ident),
- @munch_fields( $(,)?),
- ) => {
- // Endpoint of munching, no fields are left.
- };
- (init_slot:
- @slot($slot:ident),
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- ) => {
- let $field = $val;
- // Call the initializer.
- //
- // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
- // return when an error/panic occurs.
- unsafe {
- $crate::init::Init::__init($field, ::core::ptr::addr_of_mut!((*$slot).$field))?;
- }
- // Create the drop guard.
- //
- // We only give access to `&DropGuard`, so it cannot be accidentally forgotten.
- //
- // SAFETY: We forget the guard later when initialization has succeeded.
- let $field = &unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
- };
-
- $crate::try_init!(init_slot:
- @slot($slot),
- @munch_fields($($rest)*),
- );
- };
- (init_slot:
- @slot($slot:ident),
- // Direct value init.
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- ) => {
- $(let $field = $val;)?
- // Call the initializer.
- //
- // SAFETY: The memory at `slot` is uninitialized.
- unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
- // Create the drop guard.
- //
- // We only give access to `&DropGuard`, so it cannot be accidentally forgotten.
- //
- // SAFETY: We forget the guard later when initialization has succeeded.
- let $field = &unsafe {
- $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
- };
-
- $crate::try_init!(init_slot:
- @slot($slot),
- @munch_fields($($rest)*),
- );
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields( $(,)?),
- @acc($($acc:tt)*),
- ) => {
- // Endpoint, nothing more to munch, create the initializer.
- // Since we are in the `if false` branch, this will never get executed. We abuse `slot` to
- // get the correct type inference here:
- unsafe {
- ::core::ptr::write($slot, $t {
- $($acc)*
- });
- }
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- @acc($($acc:tt)*),
- ) => {
- $crate::try_init!(make_initializer:
- @slot($slot),
- @type_name($t),
- @munch_fields($($rest)*),
- @acc($($acc)*$field: ::core::panic!(),),
- );
- };
- (make_initializer:
- @slot($slot:ident),
- @type_name($t:ident),
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- @acc($($acc:tt)*),
- ) => {
- $crate::try_init!(make_initializer:
- @slot($slot),
- @type_name($t),
- @munch_fields($($rest)*),
- @acc($($acc)*$field: ::core::panic!(),),
- );
- };
- (forget_guards:
- @munch_fields($(,)?),
- ) => {
- // Munching finished.
- };
- (forget_guards:
- @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
- ) => {
- unsafe { $crate::init::__internal::DropGuard::forget($field) };
-
- $crate::try_init!(forget_guards:
- @munch_fields($($rest)*),
- );
- };
- (forget_guards:
- @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
- ) => {
- unsafe { $crate::init::__internal::DropGuard::forget($field) };
-
- $crate::try_init!(forget_guards:
- @munch_fields($($rest)*),
- );
- };
}
/// A pin-initializer for the type `T`.
@@ -1092,6 +775,79 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
/// deallocate.
/// - `slot` will not move until it is dropped, i.e. it will be pinned.
unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E>;
+
+ /// First initializes the value using `self` then calls the function `f` with the initialized
+ /// value.
+ ///
+ /// If `f` returns an error the value is dropped and the initializer will forward the error.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #![allow(clippy::disallowed_names)]
+ /// use kernel::{types::Opaque, init::pin_init_from_closure};
+ /// #[repr(C)]
+ /// struct RawFoo([u8; 16]);
+ /// extern {
+ /// fn init_foo(_: *mut RawFoo);
+ /// }
+ ///
+ /// #[pin_data]
+ /// struct Foo {
+ /// #[pin]
+ /// raw: Opaque<RawFoo>,
+ /// }
+ ///
+ /// impl Foo {
+ /// fn setup(self: Pin<&mut Self>) {
+ /// pr_info!("Setting up foo");
+ /// }
+ /// }
+ ///
+ /// let foo = pin_init!(Foo {
+ /// raw <- unsafe {
+ /// Opaque::ffi_init(|s| {
+ /// init_foo(s);
+ /// })
+ /// },
+ /// }).pin_chain(|foo| {
+ /// foo.setup();
+ /// Ok(())
+ /// });
+ /// ```
+ fn pin_chain<F>(self, f: F) -> ChainPinInit<Self, F, T, E>
+ where
+ F: FnOnce(Pin<&mut T>) -> Result<(), E>,
+ {
+ ChainPinInit(self, f, PhantomData)
+ }
+}
+
+/// An initializer returned by [`PinInit::pin_chain`].
+pub struct ChainPinInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, Box<T>)>);
+
+// SAFETY: The `__pinned_init` function is implemented such that it
+// - returns `Ok(())` on successful initialization,
+// - returns `Err(err)` on error and in this case `slot` will be dropped.
+// - considers `slot` pinned.
+unsafe impl<T: ?Sized, E, I, F> PinInit<T, E> for ChainPinInit<I, F, T, E>
+where
+ I: PinInit<T, E>,
+ F: FnOnce(Pin<&mut T>) -> Result<(), E>,
+{
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: All requirements fulfilled since this function is `__pinned_init`.
+ unsafe { self.0.__pinned_init(slot)? };
+ // SAFETY: The above call initialized `slot` and we still have unique access.
+ let val = unsafe { &mut *slot };
+ // SAFETY: `slot` is considered pinned.
+ let val = unsafe { Pin::new_unchecked(val) };
+ (self.1)(val).map_err(|e| {
+ // SAFETY: `slot` was initialized above.
+ unsafe { core::ptr::drop_in_place(slot) };
+ e
+ })
+ }
}
/// An initializer for `T`.
@@ -1124,7 +880,7 @@ pub unsafe trait PinInit<T: ?Sized, E = Infallible>: Sized {
///
/// [`Arc<T>`]: crate::sync::Arc
#[must_use = "An initializer must be used in order to create its value."]
-pub unsafe trait Init<T: ?Sized, E = Infallible>: Sized {
+pub unsafe trait Init<T: ?Sized, E = Infallible>: PinInit<T, E> {
/// Initializes `slot`.
///
/// # Safety
@@ -1133,16 +889,73 @@ pub unsafe trait Init<T: ?Sized, E = Infallible>: Sized {
/// - the caller does not touch `slot` when `Err` is returned, they are only permitted to
/// deallocate.
unsafe fn __init(self, slot: *mut T) -> Result<(), E>;
+
+ /// First initializes the value using `self` then calls the function `f` with the initialized
+ /// value.
+ ///
+ /// If `f` returns an error the value is dropped and the initializer will forward the error.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # #![allow(clippy::disallowed_names)]
+ /// use kernel::{types::Opaque, init::{self, init_from_closure}};
+ /// struct Foo {
+ /// buf: [u8; 1_000_000],
+ /// }
+ ///
+ /// impl Foo {
+ /// fn setup(&mut self) {
+ /// pr_info!("Setting up foo");
+ /// }
+ /// }
+ ///
+ /// let foo = init!(Foo {
+ /// buf <- init::zeroed()
+ /// }).chain(|foo| {
+ /// foo.setup();
+ /// Ok(())
+ /// });
+ /// ```
+ fn chain<F>(self, f: F) -> ChainInit<Self, F, T, E>
+ where
+ F: FnOnce(&mut T) -> Result<(), E>,
+ {
+ ChainInit(self, f, PhantomData)
+ }
+}
+
+/// An initializer returned by [`Init::chain`].
+pub struct ChainInit<I, F, T: ?Sized, E>(I, F, __internal::Invariant<(E, Box<T>)>);
+
+// SAFETY: The `__init` function is implemented such that it
+// - returns `Ok(())` on successful initialization,
+// - returns `Err(err)` on error and in this case `slot` will be dropped.
+unsafe impl<T: ?Sized, E, I, F> Init<T, E> for ChainInit<I, F, T, E>
+where
+ I: Init<T, E>,
+ F: FnOnce(&mut T) -> Result<(), E>,
+{
+ unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
+ // SAFETY: All requirements fulfilled since this function is `__init`.
+ unsafe { self.0.__pinned_init(slot)? };
+ // SAFETY: The above call initialized `slot` and we still have unique access.
+ (self.1)(unsafe { &mut *slot }).map_err(|e| {
+ // SAFETY: `slot` was initialized above.
+ unsafe { core::ptr::drop_in_place(slot) };
+ e
+ })
+ }
}
-// SAFETY: Every in-place initializer can also be used as a pin-initializer.
-unsafe impl<T: ?Sized, E, I> PinInit<T, E> for I
+// SAFETY: `__pinned_init` behaves exactly the same as `__init`.
+unsafe impl<T: ?Sized, E, I, F> PinInit<T, E> for ChainInit<I, F, T, E>
where
I: Init<T, E>,
+ F: FnOnce(&mut T) -> Result<(), E>,
{
unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
- // SAFETY: `__init` meets the same requirements as `__pinned_init`, except that it does not
- // require `slot` to not move after init.
+ // SAFETY: `__init` has less strict requirements compared to `__pinned_init`.
unsafe { self.__init(slot) }
}
}
@@ -1194,6 +1007,93 @@ pub fn uninit<T, E>() -> impl Init<MaybeUninit<T>, E> {
unsafe { init_from_closure(|_| Ok(())) }
}
+/// Initializes an array by initializing each element via the provided initializer.
+///
+/// # Examples
+///
+/// ```rust
+/// use kernel::{error::Error, init::init_array_from_fn};
+/// let array: Box<[usize; 1_000]>= Box::init::<Error>(init_array_from_fn(|i| i)).unwrap();
+/// assert_eq!(array.len(), 1_000);
+/// ```
+pub fn init_array_from_fn<I, const N: usize, T, E>(
+ mut make_init: impl FnMut(usize) -> I,
+) -> impl Init<[T; N], E>
+where
+ I: Init<T, E>,
+{
+ let init = move |slot: *mut [T; N]| {
+ let slot = slot.cast::<T>();
+ // Counts the number of initialized elements and when dropped drops that many elements from
+ // `slot`.
+ let mut init_count = ScopeGuard::new_with_data(0, |i| {
+ // We now free every element that has been initialized before:
+ // SAFETY: The loop initialized exactly the values from 0..i and since we
+ // return `Err` below, the caller will consider the memory at `slot` as
+ // uninitialized.
+ unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(slot, i)) };
+ });
+ for i in 0..N {
+ let init = make_init(i);
+ // SAFETY: Since 0 <= `i` < N, it is still in bounds of `[T; N]`.
+ let ptr = unsafe { slot.add(i) };
+ // SAFETY: The pointer is derived from `slot` and thus satisfies the `__init`
+ // requirements.
+ unsafe { init.__init(ptr) }?;
+ *init_count += 1;
+ }
+ init_count.dismiss();
+ Ok(())
+ };
+ // SAFETY: The initializer above initializes every element of the array. On failure it drops
+ // any initialized elements and returns `Err`.
+ unsafe { init_from_closure(init) }
+}
+
+/// Initializes an array by initializing each element via the provided initializer.
+///
+/// # Examples
+///
+/// ```rust
+/// use kernel::{sync::{Arc, Mutex}, init::pin_init_array_from_fn, new_mutex};
+/// let array: Arc<[Mutex<usize>; 1_000]>=
+/// Arc::pin_init(pin_init_array_from_fn(|i| new_mutex!(i))).unwrap();
+/// assert_eq!(array.len(), 1_000);
+/// ```
+pub fn pin_init_array_from_fn<I, const N: usize, T, E>(
+ mut make_init: impl FnMut(usize) -> I,
+) -> impl PinInit<[T; N], E>
+where
+ I: PinInit<T, E>,
+{
+ let init = move |slot: *mut [T; N]| {
+ let slot = slot.cast::<T>();
+ // Counts the number of initialized elements and when dropped drops that many elements from
+ // `slot`.
+ let mut init_count = ScopeGuard::new_with_data(0, |i| {
+ // We now free every element that has been initialized before:
+ // SAFETY: The loop initialized exactly the values from 0..i and since we
+ // return `Err` below, the caller will consider the memory at `slot` as
+ // uninitialized.
+ unsafe { ptr::drop_in_place(ptr::slice_from_raw_parts_mut(slot, i)) };
+ });
+ for i in 0..N {
+ let init = make_init(i);
+ // SAFETY: Since 0 <= `i` < N, it is still in bounds of `[T; N]`.
+ let ptr = unsafe { slot.add(i) };
+ // SAFETY: The pointer is derived from `slot` and thus satisfies the `__init`
+ // requirements.
+ unsafe { init.__pinned_init(ptr) }?;
+ *init_count += 1;
+ }
+ init_count.dismiss();
+ Ok(())
+ };
+ // SAFETY: The initializer above initializes every element of the array. On failure it drops
+ // any initialized elements and returns `Err`.
+ unsafe { pin_init_from_closure(init) }
+}
+
// SAFETY: Every type can be initialized by-value.
unsafe impl<T, E> Init<T, E> for T {
unsafe fn __init(self, slot: *mut T) -> Result<(), E> {
@@ -1202,6 +1102,13 @@ unsafe impl<T, E> Init<T, E> for T {
}
}
+// SAFETY: Every type can be initialized by-value. `__pinned_init` calls `__init`.
+unsafe impl<T, E> PinInit<T, E> for T {
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ unsafe { self.__init(slot) }
+ }
+}
+
/// Smart pointer that can initialize memory in-place.
pub trait InPlaceInit<T>: Sized {
/// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this
@@ -1390,6 +1297,11 @@ impl_zeroable! {
// SAFETY: Type is allowed to take any value, including all zeros.
{<T>} MaybeUninit<T>,
+ // SAFETY: Type is allowed to take any value, including all zeros.
+ {<T>} Opaque<T>,
+
+ // SAFETY: `T: Zeroable` and `UnsafeCell` is `repr(transparent)`.
+ {<T: ?Sized + Zeroable>} UnsafeCell<T>,
// SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee).
Option<NonZeroU8>, Option<NonZeroU16>, Option<NonZeroU32>, Option<NonZeroU64>,
diff --git a/rust/kernel/init/__internal.rs b/rust/kernel/init/__internal.rs
index 44751fb62b51..db3372619ecd 100644
--- a/rust/kernel/init/__internal.rs
+++ b/rust/kernel/init/__internal.rs
@@ -13,7 +13,7 @@ use super::*;
///
/// [nomicon]: https://doc.rust-lang.org/nomicon/subtyping.html
/// [this table]: https://doc.rust-lang.org/nomicon/phantom-data.html#table-of-phantomdata-patterns
-type Invariant<T> = PhantomData<fn(*mut T) -> *mut T>;
+pub(super) type Invariant<T> = PhantomData<fn(*mut T) -> *mut T>;
/// This is the module-internal type implementing `PinInit` and `Init`. It is unsafe to create this
/// type, since the closure needs to fulfill the same safety requirement as the
@@ -32,6 +32,18 @@ where
}
}
+// SAFETY: While constructing the `InitClosure`, the user promised that it upholds the
+// `__pinned_init` invariants.
+unsafe impl<T: ?Sized, F, E> PinInit<T, E> for InitClosure<F, T, E>
+where
+ F: FnOnce(*mut T) -> Result<(), E>,
+{
+ #[inline]
+ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> {
+ (self.0)(slot)
+ }
+}
+
/// This trait is only implemented via the `#[pin_data]` proc-macro. It is used to facilitate
/// the pin projections within the initializers.
///
@@ -174,7 +186,6 @@ impl<T> StackInit<T> {
/// Can be forgotten to prevent the drop.
pub struct DropGuard<T: ?Sized> {
ptr: *mut T,
- do_drop: Cell<bool>,
}
impl<T: ?Sized> DropGuard<T> {
@@ -190,32 +201,16 @@ impl<T: ?Sized> DropGuard<T> {
/// - will not be dropped by any other means.
#[inline]
pub unsafe fn new(ptr: *mut T) -> Self {
- Self {
- ptr,
- do_drop: Cell::new(true),
- }
- }
-
- /// Prevents this guard from dropping the supplied pointer.
- ///
- /// # Safety
- ///
- /// This function is unsafe in order to prevent safe code from forgetting this guard. It should
- /// only be called by the macros in this module.
- #[inline]
- pub unsafe fn forget(&self) {
- self.do_drop.set(false);
+ Self { ptr }
}
}
impl<T: ?Sized> Drop for DropGuard<T> {
#[inline]
fn drop(&mut self) {
- if self.do_drop.get() {
- // SAFETY: A `DropGuard` can only be constructed using the unsafe `new` function
- // ensuring that this operation is safe.
- unsafe { ptr::drop_in_place(self.ptr) }
- }
+ // SAFETY: A `DropGuard` can only be constructed using the unsafe `new` function
+ // ensuring that this operation is safe.
+ unsafe { ptr::drop_in_place(self.ptr) }
}
}
diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs
index 00aa4e956c0a..cb6e61b6c50b 100644
--- a/rust/kernel/init/macros.rs
+++ b/rust/kernel/init/macros.rs
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
//! This module provides the macros that actually implement the proc-macros `pin_data` and
-//! `pinned_drop`.
+//! `pinned_drop`. It also contains `__init_internal` the implementation of the `{try_}{pin_}init!`
+//! macros.
//!
//! These macros should never be called directly, since they expect their input to be
-//! in a certain format which is internal. Use the proc-macros instead.
+//! in a certain format which is internal. If used incorrectly, these macros can lead to UB even in
+//! safe code! Use the public facing macros instead.
//!
//! This architecture has been chosen because the kernel does not yet have access to `syn` which
//! would make matters a lot easier for implementing these as proc-macros.
@@ -43,7 +45,7 @@
//! #[pinned_drop]
//! impl PinnedDrop for Foo {
//! fn drop(self: Pin<&mut Self>) {
-//! println!("{self:p} is getting dropped.");
+//! pr_info!("{self:p} is getting dropped.");
//! }
//! }
//!
@@ -168,8 +170,10 @@
//! t: T,
//! }
//! #[doc(hidden)]
-//! impl<'__pin, T>
-//! ::core::marker::Unpin for Bar<T> where __Unpin<'__pin, T>: ::core::marker::Unpin {}
+//! impl<'__pin, T> ::core::marker::Unpin for Bar<T>
+//! where
+//! __Unpin<'__pin, T>: ::core::marker::Unpin,
+//! {}
//! // Now we need to ensure that `Bar` does not implement `Drop`, since that would give users
//! // access to `&mut self` inside of `drop` even if the struct was pinned. This could lead to
//! // UB with only safe code, so we disallow this by giving a trait implementation error using
@@ -186,8 +190,9 @@
//! // for safety, but a good sanity check, since no normal code calls `PinnedDrop::drop`.
//! #[allow(non_camel_case_types)]
//! trait UselessPinnedDropImpl_you_need_to_specify_PinnedDrop {}
-//! impl<T: ::kernel::init::PinnedDrop>
-//! UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {}
+//! impl<
+//! T: ::kernel::init::PinnedDrop,
+//! > UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for T {}
//! impl<T> UselessPinnedDropImpl_you_need_to_specify_PinnedDrop for Bar<T> {}
//! };
//! ```
@@ -217,7 +222,7 @@
//! // return type and shadow it later when we insert the arbitrary user code. That way
//! // there will be no possibility of returning without `unsafe`.
//! struct __InitOk;
-//! // Get the pin-data type from the initialized type.
+//! // Get the data about fields from the supplied type.
//! // - the function is unsafe, hence the unsafe block
//! // - we `use` the `HasPinData` trait in the block, it is only available in that
//! // scope.
@@ -225,8 +230,7 @@
//! use ::kernel::init::__internal::HasPinData;
//! Self::__pin_data()
//! };
-//! // Use `data` to help with type inference, the closure supplied will have the type
-//! // `FnOnce(*mut Self) -> Result<__InitOk, Infallible>`.
+//! // Ensure that `data` really is of type `PinData` and help with type inference:
//! let init = ::kernel::init::__internal::PinData::make_closure::<
//! _,
//! __InitOk,
@@ -234,71 +238,75 @@
//! >(data, move |slot| {
//! {
//! // Shadow the structure so it cannot be used to return early. If a user
-//! // tries to write `return Ok(__InitOk)`, then they get a type error, since
-//! // that will refer to this struct instead of the one defined above.
+//! // tries to write `return Ok(__InitOk)`, then they get a type error,
+//! // since that will refer to this struct instead of the one defined
+//! // above.
//! struct __InitOk;
//! // This is the expansion of `t,`, which is syntactic sugar for `t: t,`.
-//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).t), t) };
-//! // Since initialization could fail later (not in this case, since the error
-//! // type is `Infallible`) we will need to drop this field if there is an
-//! // error later. This `DropGuard` will drop the field when it gets dropped
-//! // and has not yet been forgotten. We make a reference to it, so users
-//! // cannot `mem::forget` it from the initializer, since the name is the same
-//! // as the field (including hygiene).
-//! let t = &unsafe {
-//! ::kernel::init::__internal::DropGuard::new(
-//! ::core::addr_of_mut!((*slot).t),
-//! )
+//! {
+//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).t), t) };
+//! }
+//! // Since initialization could fail later (not in this case, since the
+//! // error type is `Infallible`) we will need to drop this field if there
+//! // is an error later. This `DropGuard` will drop the field when it gets
+//! // dropped and has not yet been forgotten.
+//! let t = unsafe {
+//! ::pinned_init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).t))
//! };
//! // Expansion of `x: 0,`:
-//! // Since this can be an arbitrary expression we cannot place it inside of
-//! // the `unsafe` block, so we bind it here.
-//! let x = 0;
-//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).x), x) };
+//! // Since this can be an arbitrary expression we cannot place it inside
+//! // of the `unsafe` block, so we bind it here.
+//! {
+//! let x = 0;
+//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).x), x) };
+//! }
//! // We again create a `DropGuard`.
-//! let x = &unsafe {
-//! ::kernel::init::__internal::DropGuard::new(
-//! ::core::addr_of_mut!((*slot).x),
-//! )
+//! let x = unsafe {
+//! ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).x))
//! };
-//!
+//! // Since initialization has successfully completed, we can now forget
+//! // the guards. This is not `mem::forget`, since we only have
+//! // `&DropGuard`.
+//! ::core::mem::forget(x);
+//! ::core::mem::forget(t);
//! // Here we use the type checker to ensure that every field has been
//! // initialized exactly once, since this is `if false` it will never get
//! // executed, but still type-checked.
-//! // Additionally we abuse `slot` to automatically infer the correct type for
-//! // the struct. This is also another check that every field is accessible
-//! // from this scope.
+//! // Additionally we abuse `slot` to automatically infer the correct type
+//! // for the struct. This is also another check that every field is
+//! // accessible from this scope.
//! #[allow(unreachable_code, clippy::diverging_sub_expression)]
-//! if false {
+//! let _ = || {
//! unsafe {
//! ::core::ptr::write(
//! slot,
//! Self {
-//! // We only care about typecheck finding every field here,
-//! // the expression does not matter, just conjure one using
-//! // `panic!()`:
+//! // We only care about typecheck finding every field
+//! // here, the expression does not matter, just conjure
+//! // one using `panic!()`:
//! t: ::core::panic!(),
//! x: ::core::panic!(),
//! },
//! );
//! };
-//! }
-//! // Since initialization has successfully completed, we can now forget the
-//! // guards. This is not `mem::forget`, since we only have `&DropGuard`.
-//! unsafe { ::kernel::init::__internal::DropGuard::forget(t) };
-//! unsafe { ::kernel::init::__internal::DropGuard::forget(x) };
+//! };
//! }
//! // We leave the scope above and gain access to the previously shadowed
//! // `__InitOk` that we need to return.
//! Ok(__InitOk)
//! });
//! // Change the return type from `__InitOk` to `()`.
-//! let init = move |slot| -> ::core::result::Result<(), ::core::convert::Infallible> {
+//! let init = move |
+//! slot,
+//! | -> ::core::result::Result<(), ::core::convert::Infallible> {
//! init(slot).map(|__InitOk| ())
//! };
//! // Construct the initializer.
//! let init = unsafe {
-//! ::kernel::init::pin_init_from_closure::<_, ::core::convert::Infallible>(init)
+//! ::kernel::init::pin_init_from_closure::<
+//! _,
+//! ::core::convert::Infallible,
+//! >(init)
//! };
//! init
//! }
@@ -372,7 +380,10 @@
//! b: Bar<u32>,
//! }
//! #[doc(hidden)]
-//! impl<'__pin> ::core::marker::Unpin for Foo where __Unpin<'__pin>: ::core::marker::Unpin {}
+//! impl<'__pin> ::core::marker::Unpin for Foo
+//! where
+//! __Unpin<'__pin>: ::core::marker::Unpin,
+//! {}
//! // Since we specified `PinnedDrop` as the argument to `#[pin_data]`, we expect `Foo` to
//! // implement `PinnedDrop`. Thus we do not need to prevent `Drop` implementations like
//! // before, instead we implement `Drop` here and delegate to `PinnedDrop`.
@@ -401,7 +412,7 @@
//! #[pinned_drop]
//! impl PinnedDrop for Foo {
//! fn drop(self: Pin<&mut Self>) {
-//! println!("{self:p} is getting dropped.");
+//! pr_info!("{self:p} is getting dropped.");
//! }
//! }
//! ```
@@ -412,7 +423,7 @@
//! // `unsafe`, full path and the token parameter are added, everything else stays the same.
//! unsafe impl ::kernel::init::PinnedDrop for Foo {
//! fn drop(self: Pin<&mut Self>, _: ::kernel::init::__internal::OnlyCallFromDrop) {
-//! println!("{self:p} is getting dropped.");
+//! pr_info!("{self:p} is getting dropped.");
//! }
//! }
//! ```
@@ -447,18 +458,21 @@
//! >(data, move |slot| {
//! {
//! struct __InitOk;
-//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).a), a) };
-//! let a = &unsafe {
+//! {
+//! unsafe { ::core::ptr::write(::core::addr_of_mut!((*slot).a), a) };
+//! }
+//! let a = unsafe {
//! ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).a))
//! };
-//! let b = Bar::new(36);
+//! let init = Bar::new(36);
//! unsafe { data.b(::core::addr_of_mut!((*slot).b), b)? };
-//! let b = &unsafe {
+//! let b = unsafe {
//! ::kernel::init::__internal::DropGuard::new(::core::addr_of_mut!((*slot).b))
//! };
-//!
+//! ::core::mem::forget(b);
+//! ::core::mem::forget(a);
//! #[allow(unreachable_code, clippy::diverging_sub_expression)]
-//! if false {
+//! let _ = || {
//! unsafe {
//! ::core::ptr::write(
//! slot,
@@ -468,13 +482,13 @@
//! },
//! );
//! };
-//! }
-//! unsafe { ::kernel::init::__internal::DropGuard::forget(a) };
-//! unsafe { ::kernel::init::__internal::DropGuard::forget(b) };
+//! };
//! }
//! Ok(__InitOk)
//! });
-//! let init = move |slot| -> ::core::result::Result<(), ::core::convert::Infallible> {
+//! let init = move |
+//! slot,
+//! | -> ::core::result::Result<(), ::core::convert::Infallible> {
//! init(slot).map(|__InitOk| ())
//! };
//! let init = unsafe {
@@ -960,6 +974,7 @@ macro_rules! __pin_data {
where $($whr)*
{
$(
+ $(#[$($p_attr)*])*
$pvis unsafe fn $p_field<E>(
self,
slot: *mut $p_type,
@@ -969,6 +984,7 @@ macro_rules! __pin_data {
}
)*
$(
+ $(#[$($attr)*])*
$fvis unsafe fn $field<E>(
self,
slot: *mut $type,
@@ -980,3 +996,388 @@ macro_rules! __pin_data {
}
};
}
+
+/// The internal init macro. Do not call manually!
+///
+/// This is called by the `{try_}{pin_}init!` macros with various inputs.
+///
+/// This macro has multiple internal call configurations, these are always the very first ident:
+/// - nothing: this is the base case and called by the `{try_}{pin_}init!` macros.
+/// - `with_update_parsed`: when the `..Zeroable::zeroed()` syntax has been handled.
+/// - `init_slot`: recursively creates the code that initializes all fields in `slot`.
+/// - `make_initializer`: recursively create the struct initializer that guarantees that every
+/// field has been initialized exactly once.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __init_internal {
+ (
+ @this($($this:ident)?),
+ @typ($t:path),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ // Either `PinData` or `InitData`, `$use_data` should only be present in the `PinData`
+ // case.
+ @data($data:ident, $($use_data:ident)?),
+ // `HasPinData` or `HasInitData`.
+ @has_data($has_data:ident, $get_data:ident),
+ // `pin_init_from_closure` or `init_from_closure`.
+ @construct_closure($construct_closure:ident),
+ @munch_fields(),
+ ) => {
+ $crate::__init_internal!(with_update_parsed:
+ @this($($this)?),
+ @typ($t),
+ @fields($($fields)*),
+ @error($err),
+ @data($data, $($use_data)?),
+ @has_data($has_data, $get_data),
+ @construct_closure($construct_closure),
+ @zeroed(), // Nothing means default behavior.
+ )
+ };
+ (
+ @this($($this:ident)?),
+ @typ($t:path),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ // Either `PinData` or `InitData`, `$use_data` should only be present in the `PinData`
+ // case.
+ @data($data:ident, $($use_data:ident)?),
+ // `HasPinData` or `HasInitData`.
+ @has_data($has_data:ident, $get_data:ident),
+ // `pin_init_from_closure` or `init_from_closure`.
+ @construct_closure($construct_closure:ident),
+ @munch_fields(..Zeroable::zeroed()),
+ ) => {
+ $crate::__init_internal!(with_update_parsed:
+ @this($($this)?),
+ @typ($t),
+ @fields($($fields)*),
+ @error($err),
+ @data($data, $($use_data)?),
+ @has_data($has_data, $get_data),
+ @construct_closure($construct_closure),
+ @zeroed(()), // `()` means zero all fields not mentioned.
+ )
+ };
+ (
+ @this($($this:ident)?),
+ @typ($t:path),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ // Either `PinData` or `InitData`, `$use_data` should only be present in the `PinData`
+ // case.
+ @data($data:ident, $($use_data:ident)?),
+ // `HasPinData` or `HasInitData`.
+ @has_data($has_data:ident, $get_data:ident),
+ // `pin_init_from_closure` or `init_from_closure`.
+ @construct_closure($construct_closure:ident),
+ @munch_fields($ignore:tt $($rest:tt)*),
+ ) => {
+ $crate::__init_internal!(
+ @this($($this)?),
+ @typ($t),
+ @fields($($fields)*),
+ @error($err),
+ @data($data, $($use_data)?),
+ @has_data($has_data, $get_data),
+ @construct_closure($construct_closure),
+ @munch_fields($($rest)*),
+ )
+ };
+ (with_update_parsed:
+ @this($($this:ident)?),
+ @typ($t:path),
+ @fields($($fields:tt)*),
+ @error($err:ty),
+ // Either `PinData` or `InitData`, `$use_data` should only be present in the `PinData`
+ // case.
+ @data($data:ident, $($use_data:ident)?),
+ // `HasPinData` or `HasInitData`.
+ @has_data($has_data:ident, $get_data:ident),
+ // `pin_init_from_closure` or `init_from_closure`.
+ @construct_closure($construct_closure:ident),
+ @zeroed($($init_zeroed:expr)?),
+ ) => {{
+ // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
+ // type and shadow it later when we insert the arbitrary user code. That way there will be
+ // no possibility of returning without `unsafe`.
+ struct __InitOk;
+ // Get the data about fields from the supplied type.
+ let data = unsafe {
+ use $crate::init::__internal::$has_data;
+ // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
+ // information that is associated to already parsed fragments, so a path fragment
+ // cannot be used in this position. Doing the retokenization results in valid rust
+ // code.
+ ::kernel::macros::paste!($t::$get_data())
+ };
+ // Ensure that `data` really is of type `$data` and help with type inference:
+ let init = $crate::init::__internal::$data::make_closure::<_, __InitOk, $err>(
+ data,
+ move |slot| {
+ {
+ // Shadow the structure so it cannot be used to return early.
+ struct __InitOk;
+ // If `$init_zeroed` is present we should zero the slot now and not emit an
+ // error when fields are missing (since they will be zeroed). We also have to
+ // check that the type actually implements `Zeroable`.
+ $({
+ fn assert_zeroable<T: $crate::init::Zeroable>(_: *mut T) {}
+ // Ensure that the struct is indeed `Zeroable`.
+ assert_zeroable(slot);
+ // SAFETY: The type implements `Zeroable` by the check above.
+ unsafe { ::core::ptr::write_bytes(slot, 0, 1) };
+ $init_zeroed // This will be `()` if set.
+ })?
+ // Create the `this` so it can be referenced by the user inside of the
+ // expressions creating the individual fields.
+ $(let $this = unsafe { ::core::ptr::NonNull::new_unchecked(slot) };)?
+ // Initialize every field.
+ $crate::__init_internal!(init_slot($($use_data)?):
+ @data(data),
+ @slot(slot),
+ @guards(),
+ @munch_fields($($fields)*,),
+ );
+ // We use unreachable code to ensure that all fields have been mentioned exactly
+ // once, this struct initializer will still be type-checked and complain with a
+ // very natural error message if a field is forgotten/mentioned more than once.
+ #[allow(unreachable_code, clippy::diverging_sub_expression)]
+ let _ = || {
+ $crate::__init_internal!(make_initializer:
+ @slot(slot),
+ @type_name($t),
+ @munch_fields($($fields)*,),
+ @acc(),
+ );
+ };
+ }
+ Ok(__InitOk)
+ }
+ );
+ let init = move |slot| -> ::core::result::Result<(), $err> {
+ init(slot).map(|__InitOk| ())
+ };
+ let init = unsafe { $crate::init::$construct_closure::<_, $err>(init) };
+ init
+ }};
+ (init_slot($($use_data:ident)?):
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ @munch_fields($(..Zeroable::zeroed())? $(,)?),
+ ) => {
+ // Endpoint of munching, no fields are left. If execution reaches this point, all fields
+ // have been initialized. Therefore we can now dismiss the guards by forgetting them.
+ $(::core::mem::forget($guards);)*
+ };
+ (init_slot($use_data:ident): // `use_data` is present, so we use the `data` to init fields.
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // In-place initialization syntax.
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ ) => {
+ let init = $val;
+ // Call the initializer.
+ //
+ // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
+ // return when an error/panic occurs.
+ // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
+ unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ ::kernel::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [<$field>] = unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot($use_data):
+ @data($data),
+ @slot($slot),
+ @guards([<$field>], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot(): // No `use_data`, so we use `Init::__init` directly.
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // In-place initialization syntax.
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ ) => {
+ let init = $val;
+ // Call the initializer.
+ //
+ // SAFETY: `slot` is valid, because we are inside of an initializer closure, we
+ // return when an error/panic occurs.
+ unsafe { $crate::init::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ ::kernel::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [<$field>] = unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([<$field>], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($($use_data:ident)?):
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ ::kernel::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [<$field>] = unsafe {
+ $crate::init::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot($($use_data)?):
+ @data($data),
+ @slot($slot),
+ @guards([<$field>], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
+ @munch_fields(..Zeroable::zeroed() $(,)?),
+ @acc($($acc:tt)*),
+ ) => {
+ // Endpoint, nothing more to munch, create the initializer. Since the users specified
+ // `..Zeroable::zeroed()`, the slot will already have been zeroed and all field that have
+ // not been overwritten are thus zero and initialized. We still check that all fields are
+ // actually accessible by using the struct update syntax ourselves.
+ // We are inside of a closure that is never executed and thus we can abuse `slot` to
+ // get the correct type inference here:
+ #[allow(unused_assignments)]
+ unsafe {
+ let mut zeroed = ::core::mem::zeroed();
+ // We have to use type inference here to make zeroed have the correct type. This does
+ // not get executed, so it has no effect.
+ ::core::ptr::write($slot, zeroed);
+ zeroed = ::core::mem::zeroed();
+ // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
+ // information that is associated to already parsed fragments, so a path fragment
+ // cannot be used in this position. Doing the retokenization results in valid rust
+ // code.
+ ::kernel::macros::paste!(
+ ::core::ptr::write($slot, $t {
+ $($acc)*
+ ..zeroed
+ });
+ );
+ }
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
+ @munch_fields($(,)?),
+ @acc($($acc:tt)*),
+ ) => {
+ // Endpoint, nothing more to munch, create the initializer.
+ // Since we are in the closure that is never called, this will never get executed.
+ // We abuse `slot` to get the correct type inference here:
+ unsafe {
+ // Here we abuse `paste!` to retokenize `$t`. Declarative macros have some internal
+ // information that is associated to already parsed fragments, so a path fragment
+ // cannot be used in this position. Doing the retokenization results in valid rust
+ // code.
+ ::kernel::macros::paste!(
+ ::core::ptr::write($slot, $t {
+ $($acc)*
+ });
+ );
+ }
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
+ @munch_fields($field:ident <- $val:expr, $($rest:tt)*),
+ @acc($($acc:tt)*),
+ ) => {
+ $crate::__init_internal!(make_initializer:
+ @slot($slot),
+ @type_name($t),
+ @munch_fields($($rest)*),
+ @acc($($acc)* $field: ::core::panic!(),),
+ );
+ };
+ (make_initializer:
+ @slot($slot:ident),
+ @type_name($t:path),
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ @acc($($acc:tt)*),
+ ) => {
+ $crate::__init_internal!(make_initializer:
+ @slot($slot),
+ @type_name($t),
+ @munch_fields($($rest)*),
+ @acc($($acc)* $field: ::core::panic!(),),
+ );
+ };
+}
+
+#[doc(hidden)]
+#[macro_export]
+macro_rules! __derive_zeroable {
+ (parse_input:
+ @sig(
+ $(#[$($struct_attr:tt)*])*
+ $vis:vis struct $name:ident
+ $(where $($whr:tt)*)?
+ ),
+ @impl_generics($($impl_generics:tt)*),
+ @ty_generics($($ty_generics:tt)*),
+ @body({
+ $(
+ $(#[$($field_attr:tt)*])*
+ $field:ident : $field_ty:ty
+ ),* $(,)?
+ }),
+ ) => {
+ // SAFETY: Every field type implements `Zeroable` and padding bytes may be zero.
+ #[automatically_derived]
+ unsafe impl<$($impl_generics)*> $crate::init::Zeroable for $name<$($ty_generics)*>
+ where
+ $($($whr)*)?
+ {}
+ const _: () = {
+ fn assert_zeroable<T: ?::core::marker::Sized + $crate::init::Zeroable>() {}
+ fn ensure_zeroable<$($impl_generics)*>()
+ where $($($whr)*)?
+ {
+ $(assert_zeroable::<$field_ty>();)*
+ }
+ };
+ };
+}
diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs
new file mode 100644
index 000000000000..722655b2d62d
--- /dev/null
+++ b/rust/kernel/kunit.rs
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! KUnit-based macros for Rust unit tests.
+//!
+//! C header: [`include/kunit/test.h`](../../../../../include/kunit/test.h)
+//!
+//! Reference: <https://docs.kernel.org/dev-tools/kunit/index.html>
+
+use core::{ffi::c_void, fmt};
+
+/// Prints a KUnit error-level message.
+///
+/// Public but hidden since it should only be used from KUnit generated code.
+#[doc(hidden)]
+pub fn err(args: fmt::Arguments<'_>) {
+ // SAFETY: The format string is null-terminated and the `%pA` specifier matches the argument we
+ // are passing.
+ #[cfg(CONFIG_PRINTK)]
+ unsafe {
+ bindings::_printk(
+ b"\x013%pA\0".as_ptr() as _,
+ &args as *const _ as *const c_void,
+ );
+ }
+}
+
+/// Prints a KUnit info-level message.
+///
+/// Public but hidden since it should only be used from KUnit generated code.
+#[doc(hidden)]
+pub fn info(args: fmt::Arguments<'_>) {
+ // SAFETY: The format string is null-terminated and the `%pA` specifier matches the argument we
+ // are passing.
+ #[cfg(CONFIG_PRINTK)]
+ unsafe {
+ bindings::_printk(
+ b"\x016%pA\0".as_ptr() as _,
+ &args as *const _ as *const c_void,
+ );
+ }
+}
+
+/// Asserts that a boolean expression is `true` at runtime.
+///
+/// Public but hidden since it should only be used from generated tests.
+///
+/// Unlike the one in `core`, this one does not panic; instead, it is mapped to the KUnit
+/// facilities. See [`assert!`] for more details.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! kunit_assert {
+ ($name:literal, $file:literal, $diff:expr, $condition:expr $(,)?) => {
+ 'out: {
+ // Do nothing if the condition is `true`.
+ if $condition {
+ break 'out;
+ }
+
+ static FILE: &'static $crate::str::CStr = $crate::c_str!($file);
+ static LINE: i32 = core::line!() as i32 - $diff;
+ static CONDITION: &'static $crate::str::CStr = $crate::c_str!(stringify!($condition));
+
+ // SAFETY: FFI call without safety requirements.
+ let kunit_test = unsafe { $crate::bindings::kunit_get_current_test() };
+ if kunit_test.is_null() {
+ // The assertion failed but this task is not running a KUnit test, so we cannot call
+ // KUnit, but at least print an error to the kernel log. This may happen if this
+ // macro is called from an spawned thread in a test (see
+ // `scripts/rustdoc_test_gen.rs`) or if some non-test code calls this macro by
+ // mistake (it is hidden to prevent that).
+ //
+ // This mimics KUnit's failed assertion format.
+ $crate::kunit::err(format_args!(
+ " # {}: ASSERTION FAILED at {FILE}:{LINE}\n",
+ $name
+ ));
+ $crate::kunit::err(format_args!(
+ " Expected {CONDITION} to be true, but is false\n"
+ ));
+ $crate::kunit::err(format_args!(
+ " Failure not reported to KUnit since this is a non-KUnit task\n"
+ ));
+ break 'out;
+ }
+
+ #[repr(transparent)]
+ struct Location($crate::bindings::kunit_loc);
+
+ #[repr(transparent)]
+ struct UnaryAssert($crate::bindings::kunit_unary_assert);
+
+ // SAFETY: There is only a static instance and in that one the pointer field points to
+ // an immutable C string.
+ unsafe impl Sync for Location {}
+
+ // SAFETY: There is only a static instance and in that one the pointer field points to
+ // an immutable C string.
+ unsafe impl Sync for UnaryAssert {}
+
+ static LOCATION: Location = Location($crate::bindings::kunit_loc {
+ file: FILE.as_char_ptr(),
+ line: LINE,
+ });
+ static ASSERTION: UnaryAssert = UnaryAssert($crate::bindings::kunit_unary_assert {
+ assert: $crate::bindings::kunit_assert {},
+ condition: CONDITION.as_char_ptr(),
+ expected_true: true,
+ });
+
+ // SAFETY:
+ // - FFI call.
+ // - The `kunit_test` pointer is valid because we got it from
+ // `kunit_get_current_test()` and it was not null. This means we are in a KUnit
+ // test, and that the pointer can be passed to KUnit functions and assertions.
+ // - The string pointers (`file` and `condition` above) point to null-terminated
+ // strings since they are `CStr`s.
+ // - The function pointer (`format`) points to the proper function.
+ // - The pointers passed will remain valid since they point to `static`s.
+ // - The format string is allowed to be null.
+ // - There are, however, problems with this: first of all, this will end up stopping
+ // the thread, without running destructors. While that is problematic in itself,
+ // it is considered UB to have what is effectively a forced foreign unwind
+ // with `extern "C"` ABI. One could observe the stack that is now gone from
+ // another thread. We should avoid pinning stack variables to prevent library UB,
+ // too. For the moment, given that test failures are reported immediately before the
+ // next test runs, that test failures should be fixed and that KUnit is explicitly
+ // documented as not suitable for production environments, we feel it is reasonable.
+ unsafe {
+ $crate::bindings::__kunit_do_failed_assertion(
+ kunit_test,
+ core::ptr::addr_of!(LOCATION.0),
+ $crate::bindings::kunit_assert_type_KUNIT_ASSERTION,
+ core::ptr::addr_of!(ASSERTION.0.assert),
+ Some($crate::bindings::kunit_unary_assert_format),
+ core::ptr::null(),
+ );
+ }
+
+ // SAFETY: FFI call; the `test` pointer is valid because this hidden macro should only
+ // be called by the generated documentation tests which forward the test pointer given
+ // by KUnit.
+ unsafe {
+ $crate::bindings::__kunit_abort(kunit_test);
+ }
+ }
+ };
+}
+
+/// Asserts that two expressions are equal to each other (using [`PartialEq`]).
+///
+/// Public but hidden since it should only be used from generated tests.
+///
+/// Unlike the one in `core`, this one does not panic; instead, it is mapped to the KUnit
+/// facilities. See [`assert!`] for more details.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! kunit_assert_eq {
+ ($name:literal, $file:literal, $diff:expr, $left:expr, $right:expr $(,)?) => {{
+ // For the moment, we just forward to the expression assert because, for binary asserts,
+ // KUnit supports only a few types (e.g. integers).
+ $crate::kunit_assert!($name, $file, $diff, $left == $right);
+ }};
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 85b261209977..e8811700239a 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -34,6 +34,8 @@ mod build_assert;
pub mod error;
pub mod init;
pub mod ioctl;
+#[cfg(CONFIG_KUNIT)]
+pub mod kunit;
pub mod prelude;
pub mod print;
mod static_assert;
@@ -93,7 +95,4 @@ fn panic(info: &core::panic::PanicInfo<'_>) -> ! {
pr_emerg!("{}\n", info);
// SAFETY: FFI call.
unsafe { bindings::BUG() };
- // Bindgen currently does not recognize `__noreturn` so `BUG` returns `()`
- // instead of `!`. See <https://github.com/rust-lang/rust-bindgen/issues/2094>.
- loop {}
}
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
index c28587d68ebc..ae21600970b3 100644
--- a/rust/kernel/prelude.rs
+++ b/rust/kernel/prelude.rs
@@ -18,7 +18,7 @@ pub use core::pin::Pin;
pub use alloc::{boxed::Box, vec::Vec};
#[doc(no_inline)]
-pub use macros::{module, pin_data, pinned_drop, vtable};
+pub use macros::{module, pin_data, pinned_drop, vtable, Zeroable};
pub use super::build_assert;
diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
index c9dd3bf59e34..c41607b2e4fe 100644
--- a/rust/kernel/str.rs
+++ b/rust/kernel/str.rs
@@ -213,6 +213,7 @@ impl fmt::Display for CStr {
///
/// ```
/// # use kernel::c_str;
+ /// # use kernel::fmt;
/// # use kernel::str::CStr;
/// # use kernel::str::CString;
/// let penguin = c_str!("🐧");
@@ -241,6 +242,7 @@ impl fmt::Debug for CStr {
///
/// ```
/// # use kernel::c_str;
+ /// # use kernel::fmt;
/// # use kernel::str::CStr;
/// # use kernel::str::CString;
/// let penguin = c_str!("🐧");
@@ -529,7 +531,7 @@ impl fmt::Write for Formatter {
/// # Examples
///
/// ```
-/// use kernel::str::CString;
+/// use kernel::{str::CString, fmt};
///
/// let s = CString::try_from_fmt(fmt!("{}{}{}", "abc", 10, 20)).unwrap();
/// assert_eq!(s.as_bytes_with_nul(), "abc1020\0".as_bytes());
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
index 172f563976a9..3d496391a9bd 100644
--- a/rust/kernel/sync/arc.rs
+++ b/rust/kernel/sync/arc.rs
@@ -73,6 +73,7 @@ mod std_vendor;
/// assert_eq!(cloned.b, 20);
///
/// // The refcount drops to zero when `cloned` goes out of scope, and the memory is freed.
+/// # Ok::<(), Error>(())
/// ```
///
/// Using `Arc<T>` as the type of `self`:
@@ -98,6 +99,7 @@ mod std_vendor;
/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
/// obj.use_reference();
/// obj.take_over();
+/// # Ok::<(), Error>(())
/// ```
///
/// Coercion from `Arc<Example>` to `Arc<dyn MyTrait>`:
@@ -121,6 +123,7 @@ mod std_vendor;
///
/// // `coerced` has type `Arc<dyn MyTrait>`.
/// let coerced: Arc<dyn MyTrait> = obj;
+/// # Ok::<(), Error>(())
/// ```
pub struct Arc<T: ?Sized> {
ptr: NonNull<ArcInner<T>>,
@@ -336,7 +339,7 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
/// # Example
///
/// ```
-/// use crate::sync::{Arc, ArcBorrow};
+/// use kernel::sync::{Arc, ArcBorrow};
///
/// struct Example;
///
@@ -349,12 +352,13 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
///
/// // Assert that both `obj` and `cloned` point to the same underlying object.
/// assert!(core::ptr::eq(&*obj, &*cloned));
+/// # Ok::<(), Error>(())
/// ```
///
/// Using `ArcBorrow<T>` as the type of `self`:
///
/// ```
-/// use crate::sync::{Arc, ArcBorrow};
+/// use kernel::sync::{Arc, ArcBorrow};
///
/// struct Example {
/// a: u32,
@@ -369,6 +373,7 @@ impl<T: ?Sized> From<Pin<UniqueArc<T>>> for Arc<T> {
///
/// let obj = Arc::try_new(Example { a: 10, b: 20 })?;
/// obj.as_arc_borrow().use_reference();
+/// # Ok::<(), Error>(())
/// ```
pub struct ArcBorrow<'a, T: ?Sized + 'a> {
inner: NonNull<ArcInner<T>>,
diff --git a/rust/kernel/sync/lock.rs b/rust/kernel/sync/lock.rs
index a2216325632d..70a785f04754 100644
--- a/rust/kernel/sync/lock.rs
+++ b/rust/kernel/sync/lock.rs
@@ -72,8 +72,8 @@ pub unsafe trait Backend {
/// A mutual exclusion primitive.
///
-/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock backend
-/// specified as the generic parameter `B`.
+/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
+/// [`Backend`] specified as the generic parameter `B`.
#[pin_data]
pub struct Lock<T: ?Sized, B: Backend> {
/// The kernel lock object.
@@ -126,7 +126,7 @@ impl<T: ?Sized, B: Backend> Lock<T, B> {
/// A lock guard.
///
-/// Allows mutual exclusion primitives that implement the `Backend` trait to automatically unlock
+/// Allows mutual exclusion primitives that implement the [`Backend`] trait to automatically unlock
/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
/// protected by the lock.
#[must_use = "the lock unlocks immediately when the guard is unused"]
diff --git a/rust/kernel/sync/lock/mutex.rs b/rust/kernel/sync/lock/mutex.rs
index 923472f04af4..09276fedc091 100644
--- a/rust/kernel/sync/lock/mutex.rs
+++ b/rust/kernel/sync/lock/mutex.rs
@@ -63,6 +63,7 @@ macro_rules! new_mutex {
/// assert_eq!(e.c, 10);
/// assert_eq!(e.d.lock().a, 20);
/// assert_eq!(e.d.lock().b, 30);
+/// # Ok::<(), Error>(())
/// ```
///
/// The following example shows how to use interior mutability to modify the contents of a struct
diff --git a/rust/kernel/sync/lock/spinlock.rs b/rust/kernel/sync/lock/spinlock.rs
index 979b56464a4e..91eb2c9e9123 100644
--- a/rust/kernel/sync/lock/spinlock.rs
+++ b/rust/kernel/sync/lock/spinlock.rs
@@ -61,6 +61,7 @@ macro_rules! new_spinlock {
/// assert_eq!(e.c, 10);
/// assert_eq!(e.d.lock().a, 20);
/// assert_eq!(e.d.lock().b, 30);
+/// # Ok::<(), Error>(())
/// ```
///
/// The following example shows how to use interior mutability to modify the contents of a struct
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
index d479f8da8f38..fdb778e65d79 100644
--- a/rust/kernel/types.rs
+++ b/rust/kernel/types.rs
@@ -6,7 +6,7 @@ use crate::init::{self, PinInit};
use alloc::boxed::Box;
use core::{
cell::UnsafeCell,
- marker::PhantomData,
+ marker::{PhantomData, PhantomPinned},
mem::MaybeUninit,
ops::{Deref, DerefMut},
ptr::NonNull,
@@ -91,7 +91,7 @@ impl ForeignOwnable for () {
/// In the example below, we have multiple exit paths and we want to log regardless of which one is
/// taken:
/// ```
-/// # use kernel::ScopeGuard;
+/// # use kernel::types::ScopeGuard;
/// fn example1(arg: bool) {
/// let _log = ScopeGuard::new(|| pr_info!("example1 completed\n"));
///
@@ -109,7 +109,7 @@ impl ForeignOwnable for () {
/// In the example below, we want to log the same message on all early exits but a different one on
/// the main exit path:
/// ```
-/// # use kernel::ScopeGuard;
+/// # use kernel::types::ScopeGuard;
/// fn example2(arg: bool) {
/// let log = ScopeGuard::new(|| pr_info!("example2 returned early\n"));
///
@@ -130,7 +130,7 @@ impl ForeignOwnable for () {
/// In the example below, we need a mutable object (the vector) to be accessible within the log
/// function, so we wrap it in the [`ScopeGuard`]:
/// ```
-/// # use kernel::ScopeGuard;
+/// # use kernel::types::ScopeGuard;
/// fn example3(arg: bool) -> Result {
/// let mut vec =
/// ScopeGuard::new_with_data(Vec::new(), |v| pr_info!("vec had {} elements\n", v.len()));
@@ -206,17 +206,26 @@ impl<T, F: FnOnce(T)> Drop for ScopeGuard<T, F> {
///
/// This is meant to be used with FFI objects that are never interpreted by Rust code.
#[repr(transparent)]
-pub struct Opaque<T>(MaybeUninit<UnsafeCell<T>>);
+pub struct Opaque<T> {
+ value: UnsafeCell<MaybeUninit<T>>,
+ _pin: PhantomPinned,
+}
impl<T> Opaque<T> {
/// Creates a new opaque value.
pub const fn new(value: T) -> Self {
- Self(MaybeUninit::new(UnsafeCell::new(value)))
+ Self {
+ value: UnsafeCell::new(MaybeUninit::new(value)),
+ _pin: PhantomPinned,
+ }
}
/// Creates an uninitialised value.
pub const fn uninit() -> Self {
- Self(MaybeUninit::uninit())
+ Self {
+ value: UnsafeCell::new(MaybeUninit::uninit()),
+ _pin: PhantomPinned,
+ }
}
/// Creates a pin-initializer from the given initializer closure.
@@ -240,7 +249,7 @@ impl<T> Opaque<T> {
/// Returns a raw pointer to the opaque data.
pub fn get(&self) -> *mut T {
- UnsafeCell::raw_get(self.0.as_ptr())
+ UnsafeCell::get(&self.value).cast::<T>()
}
/// Gets the value behind `this`.
@@ -248,7 +257,7 @@ impl<T> Opaque<T> {
/// This function is useful to get access to the value without creating intermediate
/// references.
pub const fn raw_get(this: *const Self) -> *mut T {
- UnsafeCell::raw_get(this.cast::<UnsafeCell<T>>())
+ UnsafeCell::raw_get(this.cast::<UnsafeCell<MaybeUninit<T>>>()).cast::<T>()
}
}
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index 3fc74cb4ea19..c42105c2ff96 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -7,9 +7,11 @@ mod quote;
mod concat_idents;
mod helpers;
mod module;
+mod paste;
mod pin_data;
mod pinned_drop;
mod vtable;
+mod zeroable;
use proc_macro::TokenStream;
@@ -246,3 +248,118 @@ pub fn pin_data(inner: TokenStream, item: TokenStream) -> TokenStream {
pub fn pinned_drop(args: TokenStream, input: TokenStream) -> TokenStream {
pinned_drop::pinned_drop(args, input)
}
+
+/// Paste identifiers together.
+///
+/// Within the `paste!` macro, identifiers inside `[<` and `>]` are concatenated together to form a
+/// single identifier.
+///
+/// This is similar to the [`paste`] crate, but with pasting feature limited to identifiers
+/// (literals, lifetimes and documentation strings are not supported). There is a difference in
+/// supported modifiers as well.
+///
+/// # Example
+///
+/// ```ignore
+/// use kernel::macro::paste;
+///
+/// macro_rules! pub_no_prefix {
+/// ($prefix:ident, $($newname:ident),+) => {
+/// paste! {
+/// $(pub(crate) const $newname: u32 = [<$prefix $newname>];)+
+/// }
+/// };
+/// }
+///
+/// pub_no_prefix!(
+/// binder_driver_return_protocol_,
+/// BR_OK,
+/// BR_ERROR,
+/// BR_TRANSACTION,
+/// BR_REPLY,
+/// BR_DEAD_REPLY,
+/// BR_TRANSACTION_COMPLETE,
+/// BR_INCREFS,
+/// BR_ACQUIRE,
+/// BR_RELEASE,
+/// BR_DECREFS,
+/// BR_NOOP,
+/// BR_SPAWN_LOOPER,
+/// BR_DEAD_BINDER,
+/// BR_CLEAR_DEATH_NOTIFICATION_DONE,
+/// BR_FAILED_REPLY
+/// );
+///
+/// assert_eq!(BR_OK, binder_driver_return_protocol_BR_OK);
+/// ```
+///
+/// # Modifiers
+///
+/// For each identifier, it is possible to attach one or multiple modifiers to
+/// it.
+///
+/// Currently supported modifiers are:
+/// * `span`: change the span of concatenated identifier to the span of the specified token. By
+/// default the span of the `[< >]` group is used.
+/// * `lower`: change the identifier to lower case.
+/// * `upper`: change the identifier to upper case.
+///
+/// ```ignore
+/// use kernel::macro::paste;
+///
+/// macro_rules! pub_no_prefix {
+/// ($prefix:ident, $($newname:ident),+) => {
+/// kernel::macros::paste! {
+/// $(pub(crate) const fn [<$newname:lower:span>]: u32 = [<$prefix $newname:span>];)+
+/// }
+/// };
+/// }
+///
+/// pub_no_prefix!(
+/// binder_driver_return_protocol_,
+/// BR_OK,
+/// BR_ERROR,
+/// BR_TRANSACTION,
+/// BR_REPLY,
+/// BR_DEAD_REPLY,
+/// BR_TRANSACTION_COMPLETE,
+/// BR_INCREFS,
+/// BR_ACQUIRE,
+/// BR_RELEASE,
+/// BR_DECREFS,
+/// BR_NOOP,
+/// BR_SPAWN_LOOPER,
+/// BR_DEAD_BINDER,
+/// BR_CLEAR_DEATH_NOTIFICATION_DONE,
+/// BR_FAILED_REPLY
+/// );
+///
+/// assert_eq!(br_ok(), binder_driver_return_protocol_BR_OK);
+/// ```
+///
+/// [`paste`]: https://docs.rs/paste/
+#[proc_macro]
+pub fn paste(input: TokenStream) -> TokenStream {
+ let mut tokens = input.into_iter().collect();
+ paste::expand(&mut tokens);
+ tokens.into_iter().collect()
+}
+
+/// Derives the [`Zeroable`] trait for the given struct.
+///
+/// This can only be used for structs where every field implements the [`Zeroable`] trait.
+///
+/// # Examples
+///
+/// ```rust,ignore
+/// #[derive(Zeroable)]
+/// pub struct DriverData {
+/// id: i64,
+/// buf_ptr: *mut u8,
+/// len: usize,
+/// }
+/// ```
+#[proc_macro_derive(Zeroable)]
+pub fn derive_zeroable(input: TokenStream) -> TokenStream {
+ zeroable::derive(input)
+}
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
index fb1244f8c2e6..d62d8710d77a 100644
--- a/rust/macros/module.rs
+++ b/rust/macros/module.rs
@@ -199,7 +199,7 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream {
/// Used by the printing macros, e.g. [`info!`].
const __LOG_PREFIX: &[u8] = b\"{name}\\0\";
- /// The \"Rust loadable module\" mark, for `scripts/is_rust_module.sh`.
+ /// The \"Rust loadable module\" mark.
//
// This may be best done another way later on, e.g. as a new modinfo
// key or a new section. For the moment, keep it simple.
diff --git a/rust/macros/paste.rs b/rust/macros/paste.rs
new file mode 100644
index 000000000000..385a78434224
--- /dev/null
+++ b/rust/macros/paste.rs
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use proc_macro::{Delimiter, Group, Ident, Spacing, Span, TokenTree};
+
+fn concat(tokens: &[TokenTree], group_span: Span) -> TokenTree {
+ let mut tokens = tokens.iter();
+ let mut segments = Vec::new();
+ let mut span = None;
+ loop {
+ match tokens.next() {
+ None => break,
+ Some(TokenTree::Literal(lit)) => segments.push((lit.to_string(), lit.span())),
+ Some(TokenTree::Ident(ident)) => {
+ let mut value = ident.to_string();
+ if value.starts_with("r#") {
+ value.replace_range(0..2, "");
+ }
+ segments.push((value, ident.span()));
+ }
+ Some(TokenTree::Punct(p)) if p.as_char() == ':' => {
+ let Some(TokenTree::Ident(ident)) = tokens.next() else {
+ panic!("expected identifier as modifier");
+ };
+
+ let (mut value, sp) = segments.pop().expect("expected identifier before modifier");
+ match ident.to_string().as_str() {
+ // Set the overall span of concatenated token as current span
+ "span" => {
+ assert!(
+ span.is_none(),
+ "span modifier should only appear at most once"
+ );
+ span = Some(sp);
+ }
+ "lower" => value = value.to_lowercase(),
+ "upper" => value = value.to_uppercase(),
+ v => panic!("unknown modifier `{v}`"),
+ };
+ segments.push((value, sp));
+ }
+ _ => panic!("unexpected token in paste segments"),
+ };
+ }
+
+ let pasted: String = segments.into_iter().map(|x| x.0).collect();
+ TokenTree::Ident(Ident::new(&pasted, span.unwrap_or(group_span)))
+}
+
+pub(crate) fn expand(tokens: &mut Vec<TokenTree>) {
+ for token in tokens.iter_mut() {
+ if let TokenTree::Group(group) = token {
+ let delimiter = group.delimiter();
+ let span = group.span();
+ let mut stream: Vec<_> = group.stream().into_iter().collect();
+ // Find groups that looks like `[< A B C D >]`
+ if delimiter == Delimiter::Bracket
+ && stream.len() >= 3
+ && matches!(&stream[0], TokenTree::Punct(p) if p.as_char() == '<')
+ && matches!(&stream[stream.len() - 1], TokenTree::Punct(p) if p.as_char() == '>')
+ {
+ // Replace the group with concatenated token
+ *token = concat(&stream[1..stream.len() - 1], span);
+ } else {
+ // Recursively expand tokens inside the group
+ expand(&mut stream);
+ let mut group = Group::new(delimiter, stream.into_iter().collect());
+ group.set_span(span);
+ *token = TokenTree::Group(group);
+ }
+ }
+ }
+
+ // Path segments cannot contain invisible delimiter group, so remove them if any.
+ for i in (0..tokens.len().saturating_sub(3)).rev() {
+ // Looking for a double colon
+ if matches!(
+ (&tokens[i + 1], &tokens[i + 2]),
+ (TokenTree::Punct(a), TokenTree::Punct(b))
+ if a.as_char() == ':' && a.spacing() == Spacing::Joint && b.as_char() == ':'
+ ) {
+ match &tokens[i + 3] {
+ TokenTree::Group(group) if group.delimiter() == Delimiter::None => {
+ tokens.splice(i + 3..i + 4, group.stream());
+ }
+ _ => (),
+ }
+
+ match &tokens[i] {
+ TokenTree::Group(group) if group.delimiter() == Delimiter::None => {
+ tokens.splice(i..i + 1, group.stream());
+ }
+ _ => (),
+ }
+ }
+ }
+}
diff --git a/rust/macros/quote.rs b/rust/macros/quote.rs
index dddbb4e6f4cb..33a199e4f176 100644
--- a/rust/macros/quote.rs
+++ b/rust/macros/quote.rs
@@ -124,6 +124,18 @@ macro_rules! quote_spanned {
));
quote_spanned!(@proc $v $span $($tt)*);
};
+ (@proc $v:ident $span:ident ; $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new(';', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
+ (@proc $v:ident $span:ident + $($tt:tt)*) => {
+ $v.push(::proc_macro::TokenTree::Punct(
+ ::proc_macro::Punct::new('+', ::proc_macro::Spacing::Alone)
+ ));
+ quote_spanned!(@proc $v $span $($tt)*);
+ };
(@proc $v:ident $span:ident $id:ident $($tt:tt)*) => {
$v.push(::proc_macro::TokenTree::Ident(::proc_macro::Ident::new(stringify!($id), $span)));
quote_spanned!(@proc $v $span $($tt)*);
diff --git a/rust/macros/zeroable.rs b/rust/macros/zeroable.rs
new file mode 100644
index 000000000000..0d605c46ab3b
--- /dev/null
+++ b/rust/macros/zeroable.rs
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use crate::helpers::{parse_generics, Generics};
+use proc_macro::{TokenStream, TokenTree};
+
+pub(crate) fn derive(input: TokenStream) -> TokenStream {
+ let (
+ Generics {
+ impl_generics,
+ ty_generics,
+ },
+ mut rest,
+ ) = parse_generics(input);
+ // This should be the body of the struct `{...}`.
+ let last = rest.pop();
+ // Now we insert `Zeroable` as a bound for every generic parameter in `impl_generics`.
+ let mut new_impl_generics = Vec::with_capacity(impl_generics.len());
+ // Are we inside of a generic where we want to add `Zeroable`?
+ let mut in_generic = !impl_generics.is_empty();
+ // Have we already inserted `Zeroable`?
+ let mut inserted = false;
+ // Level of `<>` nestings.
+ let mut nested = 0;
+ for tt in impl_generics {
+ match &tt {
+ // If we find a `,`, then we have finished a generic/constant/lifetime parameter.
+ TokenTree::Punct(p) if nested == 0 && p.as_char() == ',' => {
+ if in_generic && !inserted {
+ new_impl_generics.extend(quote! { : ::kernel::init::Zeroable });
+ }
+ in_generic = true;
+ inserted = false;
+ new_impl_generics.push(tt);
+ }
+ // If we find `'`, then we are entering a lifetime.
+ TokenTree::Punct(p) if nested == 0 && p.as_char() == '\'' => {
+ in_generic = false;
+ new_impl_generics.push(tt);
+ }
+ TokenTree::Punct(p) if nested == 0 && p.as_char() == ':' => {
+ new_impl_generics.push(tt);
+ if in_generic {
+ new_impl_generics.extend(quote! { ::kernel::init::Zeroable + });
+ inserted = true;
+ }
+ }
+ TokenTree::Punct(p) if p.as_char() == '<' => {
+ nested += 1;
+ new_impl_generics.push(tt);
+ }
+ TokenTree::Punct(p) if p.as_char() == '>' => {
+ assert!(nested > 0);
+ nested -= 1;
+ new_impl_generics.push(tt);
+ }
+ _ => new_impl_generics.push(tt),
+ }
+ }
+ assert_eq!(nested, 0);
+ if in_generic && !inserted {
+ new_impl_generics.extend(quote! { : ::kernel::init::Zeroable });
+ }
+ quote! {
+ ::kernel::__derive_zeroable!(
+ parse_input:
+ @sig(#(#rest)*),
+ @impl_generics(#(#new_impl_generics)*),
+ @ty_generics(#(#ty_generics)*),
+ @body(#last),
+ );
+ }
+}
diff --git a/samples/ftrace/ftrace-direct-modify.c b/samples/ftrace/ftrace-direct-modify.c
index e5ed08098ff3..e2a6a69352df 100644
--- a/samples/ftrace/ftrace-direct-modify.c
+++ b/samples/ftrace/ftrace-direct-modify.c
@@ -105,7 +105,7 @@ asm (
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:"
-" bti c\n"
+" hint 34\n" // bti c
" sub sp, sp, #16\n"
" stp x9, x30, [sp]\n"
" bl my_direct_func1\n"
@@ -117,7 +117,7 @@ asm (
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:"
-" bti c\n"
+" hint 34\n" // bti c
" sub sp, sp, #16\n"
" stp x9, x30, [sp]\n"
" bl my_direct_func2\n"
diff --git a/samples/ftrace/ftrace-direct-multi-modify.c b/samples/ftrace/ftrace-direct-multi-modify.c
index 292cff2b3f5d..2e349834d63c 100644
--- a/samples/ftrace/ftrace-direct-multi-modify.c
+++ b/samples/ftrace/ftrace-direct-multi-modify.c
@@ -112,7 +112,7 @@ asm (
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:"
-" bti c\n"
+" hint 34\n" // bti c
" sub sp, sp, #32\n"
" stp x9, x30, [sp]\n"
" str x0, [sp, #16]\n"
@@ -127,7 +127,7 @@ asm (
" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:"
-" bti c\n"
+" hint 34\n" // bti c
" sub sp, sp, #32\n"
" stp x9, x30, [sp]\n"
" str x0, [sp, #16]\n"
diff --git a/samples/ftrace/ftrace-direct-multi.c b/samples/ftrace/ftrace-direct-multi.c
index b4391e08c913..9243dbfe4d0c 100644
--- a/samples/ftrace/ftrace-direct-multi.c
+++ b/samples/ftrace/ftrace-direct-multi.c
@@ -75,7 +75,7 @@ asm (
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
-" bti c\n"
+" hint 34\n" // bti c
" sub sp, sp, #32\n"
" stp x9, x30, [sp]\n"
" str x0, [sp, #16]\n"
diff --git a/samples/ftrace/ftrace-direct-too.c b/samples/ftrace/ftrace-direct-too.c
index e9804c5307c0..e39c3563ae4e 100644
--- a/samples/ftrace/ftrace-direct-too.c
+++ b/samples/ftrace/ftrace-direct-too.c
@@ -81,7 +81,7 @@ asm (
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
-" bti c\n"
+" hint 34\n" // bti c
" sub sp, sp, #48\n"
" stp x9, x30, [sp]\n"
" stp x0, x1, [sp, #16]\n"
diff --git a/samples/ftrace/ftrace-direct.c b/samples/ftrace/ftrace-direct.c
index 20f4a7caa810..32c477da1e9a 100644
--- a/samples/ftrace/ftrace-direct.c
+++ b/samples/ftrace/ftrace-direct.c
@@ -72,7 +72,7 @@ asm (
" .type my_tramp, @function\n"
" .globl my_tramp\n"
" my_tramp:"
-" bti c\n"
+" hint 34\n" // bti c
" sub sp, sp, #32\n"
" stp x9, x30, [sp]\n"
" str x0, [sp, #16]\n"
diff --git a/scripts/.gitignore b/scripts/.gitignore
index 6e9ce6720a05..3dbb8bb2457b 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -5,6 +5,8 @@
/kallsyms
/module.lds
/recordmcount
+/rustdoc_test_builder
+/rustdoc_test_gen
/sign-file
/sorttable
/target.json
diff --git a/scripts/Makefile b/scripts/Makefile
index 32b6ba722728..576cf64be667 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -9,6 +9,8 @@ hostprogs-always-$(CONFIG_BUILDTIME_TABLE_SORT) += sorttable
hostprogs-always-$(CONFIG_ASN1) += asn1_compiler
hostprogs-always-$(CONFIG_MODULE_SIG_FORMAT) += sign-file
hostprogs-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert
+hostprogs-always-$(CONFIG_RUST_KERNEL_DOCTESTS) += rustdoc_test_builder
+hostprogs-always-$(CONFIG_RUST_KERNEL_DOCTESTS) += rustdoc_test_gen
always-$(CONFIG_RUST) += target.json
filechk_rust_target = $< < include/config/auto.conf
@@ -18,6 +20,8 @@ $(obj)/target.json: scripts/generate_rust_target include/config/auto.conf FORCE
hostprogs += generate_rust_target
generate_rust_target-rust := y
+rustdoc_test_builder-rust := y
+rustdoc_test_gen-rust := y
HOSTCFLAGS_sorttable.o = -I$(srctree)/tools/include
HOSTLDLIBS_sorttable = -lpthread
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index fc19f67039bd..b3a6aa8fbe8c 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -41,8 +41,6 @@ quiet_cmd_btf_ko = BTF [M] $@
cmd_btf_ko = \
if [ ! -f vmlinux ]; then \
printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
- elif [ -n "$(CONFIG_RUST)" ] && $(srctree)/scripts/is_rust_module.sh $@; then \
- printf "Skipping BTF generation for %s because it's a Rust module\n" $@ 1>&2; \
else \
LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) --btf_base vmlinux $@; \
$(RESOLVE_BTFIDS) -b vmlinux $@; \
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 880fde13d9b8..a9841148cde2 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -7457,6 +7457,30 @@ sub process {
}
}
+# Complain about RCU Tasks Trace used outside of BPF (and of course, RCU).
+ our $rcu_trace_funcs = qr{(?x:
+ rcu_read_lock_trace |
+ rcu_read_lock_trace_held |
+ rcu_read_unlock_trace |
+ call_rcu_tasks_trace |
+ synchronize_rcu_tasks_trace |
+ rcu_barrier_tasks_trace |
+ rcu_request_urgent_qs_task
+ )};
+ our $rcu_trace_paths = qr{(?x:
+ kernel/bpf/ |
+ include/linux/bpf |
+ net/bpf/ |
+ kernel/rcu/ |
+ include/linux/rcu
+ )};
+ if ($line =~ /\b($rcu_trace_funcs)\s*\(/) {
+ if ($realfile !~ m{^$rcu_trace_paths}) {
+ WARN("RCU_TASKS_TRACE",
+ "use of RCU tasks trace is incorrect outside BPF or core RCU code\n" . $herecurr);
+ }
+ }
+
# check for lockdep_set_novalidate_class
if ($line =~ /^.\s*lockdep_set_novalidate_class\s*\(/ ||
$line =~ /__lockdep_no_validate__\s*\)/ ) {
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index 84c730da36dd..1ae39b9f4a95 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -440,4 +440,8 @@ static inline void debug_gimple_stmt(const_gimple s)
#define SET_DECL_MODE(decl, mode) DECL_MODE(decl) = (mode)
#endif
+#if BUILDING_GCC_VERSION >= 14000
+#define last_stmt(x) last_nondebug_stmt(x)
+#endif
+
#endif
diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py
index 946e250c1b2a..fc52bc41d3e7 100755
--- a/scripts/generate_rust_analyzer.py
+++ b/scripts/generate_rust_analyzer.py
@@ -6,10 +6,19 @@
import argparse
import json
import logging
+import os
import pathlib
import sys
-def generate_crates(srctree, objtree, sysroot_src):
+def args_crates_cfgs(cfgs):
+ crates_cfgs = {}
+ for cfg in cfgs:
+ crate, vals = cfg.split("=", 1)
+ crates_cfgs[crate] = vals.replace("--cfg", "").split()
+
+ return crates_cfgs
+
+def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs):
# Generate the configuration list.
cfg = []
with open(objtree / "include" / "generated" / "rustc_cfg") as fd:
@@ -23,6 +32,7 @@ def generate_crates(srctree, objtree, sysroot_src):
# Avoid O(n^2) iterations by keeping a map of indexes.
crates = []
crates_indexes = {}
+ crates_cfgs = args_crates_cfgs(cfgs)
def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=True, is_proc_macro=False):
crates_indexes[display_name] = len(crates)
@@ -44,6 +54,7 @@ def generate_crates(srctree, objtree, sysroot_src):
"core",
sysroot_src / "core" / "src" / "lib.rs",
[],
+ cfg=crates_cfgs.get("core", []),
is_workspace_member=False,
)
@@ -57,6 +68,7 @@ def generate_crates(srctree, objtree, sysroot_src):
"alloc",
srctree / "rust" / "alloc" / "lib.rs",
["core", "compiler_builtins"],
+ cfg=crates_cfgs.get("alloc", []),
)
append_crate(
@@ -65,7 +77,7 @@ def generate_crates(srctree, objtree, sysroot_src):
[],
is_proc_macro=True,
)
- crates[-1]["proc_macro_dylib_path"] = "rust/libmacros.so"
+ crates[-1]["proc_macro_dylib_path"] = f"{objtree}/rust/libmacros.so"
append_crate(
"build_error",
@@ -95,19 +107,26 @@ def generate_crates(srctree, objtree, sysroot_src):
"exclude_dirs": [],
}
+ def is_root_crate(build_file, target):
+ try:
+ return f"{target}.o" in open(build_file).read()
+ except FileNotFoundError:
+ return False
+
# Then, the rest outside of `rust/`.
#
# We explicitly mention the top-level folders we want to cover.
- for folder in ("samples", "drivers"):
- for path in (srctree / folder).rglob("*.rs"):
+ extra_dirs = map(lambda dir: srctree / dir, ("samples", "drivers"))
+ if external_src is not None:
+ extra_dirs = [external_src]
+ for folder in extra_dirs:
+ for path in folder.rglob("*.rs"):
logging.info("Checking %s", path)
name = path.name.replace(".rs", "")
# Skip those that are not crate roots.
- try:
- if f"{name}.o" not in open(path.parent / "Makefile").read():
- continue
- except FileNotFoundError:
+ if not is_root_crate(path.parent / "Makefile", name) and \
+ not is_root_crate(path.parent / "Kbuild", name):
continue
logging.info("Adding %s", name)
@@ -123,9 +142,11 @@ def generate_crates(srctree, objtree, sysroot_src):
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', '-v', action='store_true')
+ parser.add_argument('--cfgs', action='append', default=[])
parser.add_argument("srctree", type=pathlib.Path)
parser.add_argument("objtree", type=pathlib.Path)
parser.add_argument("sysroot_src", type=pathlib.Path)
+ parser.add_argument("exttree", type=pathlib.Path, nargs="?")
args = parser.parse_args()
logging.basicConfig(
@@ -134,7 +155,7 @@ def main():
)
rust_project = {
- "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src),
+ "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src, args.exttree, args.cfgs),
"sysroot_src": str(args.sysroot_src),
}
diff --git a/scripts/is_rust_module.sh b/scripts/is_rust_module.sh
deleted file mode 100755
index 464761a7cf7f..000000000000
--- a/scripts/is_rust_module.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-#
-# is_rust_module.sh module.ko
-#
-# Returns `0` if `module.ko` is a Rust module, `1` otherwise.
-
-set -e
-
-# Using the `16_` prefix ensures other symbols with the same substring
-# are not picked up (even if it would be unlikely). The last part is
-# used just in case LLVM decides to use the `.` suffix.
-#
-# In the future, checking for the `.comment` section may be another
-# option, see https://github.com/rust-lang/rust/pull/97550.
-${NM} "$*" | grep -qE '^[0-9a-fA-F]+ [Rr] _R[^[:space:]]+16___IS_RUST_MODULE[^[:space:]]*$'
diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh
index 2ade63149466..d65ab8bfeaf4 100755
--- a/scripts/min-tool-version.sh
+++ b/scripts/min-tool-version.sh
@@ -31,10 +31,10 @@ llvm)
fi
;;
rustc)
- echo 1.68.2
+ echo 1.71.1
;;
bindgen)
- echo 0.56.0
+ echo 0.65.1
;;
*)
echo "$1: unknown tool" >&2
diff --git a/scripts/rust_is_available.sh b/scripts/rust_is_available.sh
index aebbf1913970..117018946b57 100755
--- a/scripts/rust_is_available.sh
+++ b/scripts/rust_is_available.sh
@@ -2,8 +2,6 @@
# SPDX-License-Identifier: GPL-2.0
#
# Tests whether a suitable Rust toolchain is available.
-#
-# Pass `-v` for human output and more checks (as warnings).
set -e
@@ -21,102 +19,208 @@ get_canonical_version()
echo $((100000 * $1 + 100 * $2 + $3))
}
+# Print a reference to the Quick Start guide in the documentation.
+print_docs_reference()
+{
+ echo >&2 "***"
+ echo >&2 "*** Please see Documentation/rust/quick-start.rst for details"
+ echo >&2 "*** on how to set up the Rust support."
+ echo >&2 "***"
+}
+
+# Print an explanation about the fact that the script is meant to be called from Kbuild.
+print_kbuild_explanation()
+{
+ echo >&2 "***"
+ echo >&2 "*** This script is intended to be called from Kbuild."
+ echo >&2 "*** Please use the 'rustavailable' target to call it instead."
+ echo >&2 "*** Otherwise, the results may not be meaningful."
+ exit 1
+}
+
+# If the script fails for any reason, or if there was any warning, then
+# print a reference to the documentation on exit.
+warning=0
+trap 'if [ $? -ne 0 ] || [ $warning -ne 0 ]; then print_docs_reference; fi' EXIT
+
+# Check that the expected environment variables are set.
+if [ -z "${RUSTC+x}" ]; then
+ echo >&2 "***"
+ echo >&2 "*** Environment variable 'RUSTC' is not set."
+ print_kbuild_explanation
+fi
+
+if [ -z "${BINDGEN+x}" ]; then
+ echo >&2 "***"
+ echo >&2 "*** Environment variable 'BINDGEN' is not set."
+ print_kbuild_explanation
+fi
+
+if [ -z "${CC+x}" ]; then
+ echo >&2 "***"
+ echo >&2 "*** Environment variable 'CC' is not set."
+ print_kbuild_explanation
+fi
+
# Check that the Rust compiler exists.
if ! command -v "$RUSTC" >/dev/null; then
- if [ "$1" = -v ]; then
- echo >&2 "***"
- echo >&2 "*** Rust compiler '$RUSTC' could not be found."
- echo >&2 "***"
- fi
+ echo >&2 "***"
+ echo >&2 "*** Rust compiler '$RUSTC' could not be found."
+ echo >&2 "***"
exit 1
fi
# Check that the Rust bindings generator exists.
if ! command -v "$BINDGEN" >/dev/null; then
- if [ "$1" = -v ]; then
- echo >&2 "***"
- echo >&2 "*** Rust bindings generator '$BINDGEN' could not be found."
- echo >&2 "***"
- fi
+ echo >&2 "***"
+ echo >&2 "*** Rust bindings generator '$BINDGEN' could not be found."
+ echo >&2 "***"
exit 1
fi
# Check that the Rust compiler version is suitable.
#
# Non-stable and distributions' versions may have a version suffix, e.g. `-dev`.
+rust_compiler_output=$( \
+ LC_ALL=C "$RUSTC" --version 2>/dev/null
+) || rust_compiler_code=$?
+if [ -n "$rust_compiler_code" ]; then
+ echo >&2 "***"
+ echo >&2 "*** Running '$RUSTC' to check the Rust compiler version failed with"
+ echo >&2 "*** code $rust_compiler_code. See output and docs below for details:"
+ echo >&2 "***"
+ echo >&2 "$rust_compiler_output"
+ echo >&2 "***"
+ exit 1
+fi
rust_compiler_version=$( \
- LC_ALL=C "$RUSTC" --version 2>/dev/null \
- | head -n 1 \
- | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' \
+ echo "$rust_compiler_output" \
+ | sed -nE '1s:.*rustc ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
)
+if [ -z "$rust_compiler_version" ]; then
+ echo >&2 "***"
+ echo >&2 "*** Running '$RUSTC' to check the Rust compiler version did not return"
+ echo >&2 "*** an expected output. See output and docs below for details:"
+ echo >&2 "***"
+ echo >&2 "$rust_compiler_output"
+ echo >&2 "***"
+ exit 1
+fi
rust_compiler_min_version=$($min_tool_version rustc)
rust_compiler_cversion=$(get_canonical_version $rust_compiler_version)
rust_compiler_min_cversion=$(get_canonical_version $rust_compiler_min_version)
if [ "$rust_compiler_cversion" -lt "$rust_compiler_min_cversion" ]; then
- if [ "$1" = -v ]; then
- echo >&2 "***"
- echo >&2 "*** Rust compiler '$RUSTC' is too old."
- echo >&2 "*** Your version: $rust_compiler_version"
- echo >&2 "*** Minimum version: $rust_compiler_min_version"
- echo >&2 "***"
- fi
+ echo >&2 "***"
+ echo >&2 "*** Rust compiler '$RUSTC' is too old."
+ echo >&2 "*** Your version: $rust_compiler_version"
+ echo >&2 "*** Minimum version: $rust_compiler_min_version"
+ echo >&2 "***"
exit 1
fi
-if [ "$1" = -v ] && [ "$rust_compiler_cversion" -gt "$rust_compiler_min_cversion" ]; then
+if [ "$rust_compiler_cversion" -gt "$rust_compiler_min_cversion" ]; then
echo >&2 "***"
echo >&2 "*** Rust compiler '$RUSTC' is too new. This may or may not work."
echo >&2 "*** Your version: $rust_compiler_version"
echo >&2 "*** Expected version: $rust_compiler_min_version"
echo >&2 "***"
+ warning=1
fi
# Check that the Rust bindings generator is suitable.
#
# Non-stable and distributions' versions may have a version suffix, e.g. `-dev`.
+rust_bindings_generator_output=$( \
+ LC_ALL=C "$BINDGEN" --version 2>/dev/null
+) || rust_bindings_generator_code=$?
+if [ -n "$rust_bindings_generator_code" ]; then
+ echo >&2 "***"
+ echo >&2 "*** Running '$BINDGEN' to check the Rust bindings generator version failed with"
+ echo >&2 "*** code $rust_bindings_generator_code. See output and docs below for details:"
+ echo >&2 "***"
+ echo >&2 "$rust_bindings_generator_output"
+ echo >&2 "***"
+ exit 1
+fi
rust_bindings_generator_version=$( \
- LC_ALL=C "$BINDGEN" --version 2>/dev/null \
- | head -n 1 \
- | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' \
+ echo "$rust_bindings_generator_output" \
+ | sed -nE '1s:.*bindgen ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
)
+if [ -z "$rust_bindings_generator_version" ]; then
+ echo >&2 "***"
+ echo >&2 "*** Running '$BINDGEN' to check the bindings generator version did not return"
+ echo >&2 "*** an expected output. See output and docs below for details:"
+ echo >&2 "***"
+ echo >&2 "$rust_bindings_generator_output"
+ echo >&2 "***"
+ exit 1
+fi
rust_bindings_generator_min_version=$($min_tool_version bindgen)
rust_bindings_generator_cversion=$(get_canonical_version $rust_bindings_generator_version)
rust_bindings_generator_min_cversion=$(get_canonical_version $rust_bindings_generator_min_version)
if [ "$rust_bindings_generator_cversion" -lt "$rust_bindings_generator_min_cversion" ]; then
- if [ "$1" = -v ]; then
- echo >&2 "***"
- echo >&2 "*** Rust bindings generator '$BINDGEN' is too old."
- echo >&2 "*** Your version: $rust_bindings_generator_version"
- echo >&2 "*** Minimum version: $rust_bindings_generator_min_version"
- echo >&2 "***"
- fi
+ echo >&2 "***"
+ echo >&2 "*** Rust bindings generator '$BINDGEN' is too old."
+ echo >&2 "*** Your version: $rust_bindings_generator_version"
+ echo >&2 "*** Minimum version: $rust_bindings_generator_min_version"
+ echo >&2 "***"
exit 1
fi
-if [ "$1" = -v ] && [ "$rust_bindings_generator_cversion" -gt "$rust_bindings_generator_min_cversion" ]; then
+if [ "$rust_bindings_generator_cversion" -gt "$rust_bindings_generator_min_cversion" ]; then
echo >&2 "***"
echo >&2 "*** Rust bindings generator '$BINDGEN' is too new. This may or may not work."
echo >&2 "*** Your version: $rust_bindings_generator_version"
echo >&2 "*** Expected version: $rust_bindings_generator_min_version"
echo >&2 "***"
+ warning=1
fi
# Check that the `libclang` used by the Rust bindings generator is suitable.
+#
+# In order to do that, first invoke `bindgen` to get the `libclang` version
+# found by `bindgen`. This step may already fail if, for instance, `libclang`
+# is not found, thus inform the user in such a case.
+bindgen_libclang_output=$( \
+ LC_ALL=C "$BINDGEN" $(dirname $0)/rust_is_available_bindgen_libclang.h 2>&1 >/dev/null
+) || bindgen_libclang_code=$?
+if [ -n "$bindgen_libclang_code" ]; then
+ echo >&2 "***"
+ echo >&2 "*** Running '$BINDGEN' to check the libclang version (used by the Rust"
+ echo >&2 "*** bindings generator) failed with code $bindgen_libclang_code. This may be caused by"
+ echo >&2 "*** a failure to locate libclang. See output and docs below for details:"
+ echo >&2 "***"
+ echo >&2 "$bindgen_libclang_output"
+ echo >&2 "***"
+ exit 1
+fi
+
+# `bindgen` returned successfully, thus use the output to check that the version
+# of the `libclang` found by the Rust bindings generator is suitable.
+#
+# Unlike other version checks, note that this one does not necessarily appear
+# in the first line of the output, thus no `sed` address is provided.
bindgen_libclang_version=$( \
- LC_ALL=C "$BINDGEN" $(dirname $0)/rust_is_available_bindgen_libclang.h 2>&1 >/dev/null \
- | grep -F 'clang version ' \
- | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' \
- | head -n 1 \
+ echo "$bindgen_libclang_output" \
+ | sed -nE 's:.*clang version ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
)
+if [ -z "$bindgen_libclang_version" ]; then
+ echo >&2 "***"
+ echo >&2 "*** Running '$BINDGEN' to check the libclang version (used by the Rust"
+ echo >&2 "*** bindings generator) did not return an expected output. See output"
+ echo >&2 "*** and docs below for details:"
+ echo >&2 "***"
+ echo >&2 "$bindgen_libclang_output"
+ echo >&2 "***"
+ exit 1
+fi
bindgen_libclang_min_version=$($min_tool_version llvm)
bindgen_libclang_cversion=$(get_canonical_version $bindgen_libclang_version)
bindgen_libclang_min_cversion=$(get_canonical_version $bindgen_libclang_min_version)
if [ "$bindgen_libclang_cversion" -lt "$bindgen_libclang_min_cversion" ]; then
- if [ "$1" = -v ]; then
- echo >&2 "***"
- echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN') is too old."
- echo >&2 "*** Your version: $bindgen_libclang_version"
- echo >&2 "*** Minimum version: $bindgen_libclang_min_version"
- echo >&2 "***"
- fi
+ echo >&2 "***"
+ echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN') is too old."
+ echo >&2 "*** Your version: $bindgen_libclang_version"
+ echo >&2 "*** Minimum version: $bindgen_libclang_min_version"
+ echo >&2 "***"
exit 1
fi
@@ -125,21 +229,20 @@ fi
#
# In the future, we might be able to perform a full version check, see
# https://github.com/rust-lang/rust-bindgen/issues/2138.
-if [ "$1" = -v ]; then
- cc_name=$($(dirname $0)/cc-version.sh "$CC" | cut -f1 -d' ')
- if [ "$cc_name" = Clang ]; then
- clang_version=$( \
- LC_ALL=C "$CC" --version 2>/dev/null \
- | sed -nE '1s:.*version ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
- )
- if [ "$clang_version" != "$bindgen_libclang_version" ]; then
- echo >&2 "***"
- echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN')"
- echo >&2 "*** version does not match Clang's. This may be a problem."
- echo >&2 "*** libclang version: $bindgen_libclang_version"
- echo >&2 "*** Clang version: $clang_version"
- echo >&2 "***"
- fi
+cc_name=$($(dirname $0)/cc-version.sh $CC | cut -f1 -d' ')
+if [ "$cc_name" = Clang ]; then
+ clang_version=$( \
+ LC_ALL=C $CC --version 2>/dev/null \
+ | sed -nE '1s:.*version ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
+ )
+ if [ "$clang_version" != "$bindgen_libclang_version" ]; then
+ echo >&2 "***"
+ echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN')"
+ echo >&2 "*** version does not match Clang's. This may be a problem."
+ echo >&2 "*** libclang version: $bindgen_libclang_version"
+ echo >&2 "*** Clang version: $clang_version"
+ echo >&2 "***"
+ warning=1
fi
fi
@@ -150,11 +253,9 @@ rustc_sysroot=$("$RUSTC" $KRUSTFLAGS --print sysroot)
rustc_src=${RUST_LIB_SRC:-"$rustc_sysroot/lib/rustlib/src/rust/library"}
rustc_src_core="$rustc_src/core/src/lib.rs"
if [ ! -e "$rustc_src_core" ]; then
- if [ "$1" = -v ]; then
- echo >&2 "***"
- echo >&2 "*** Source code for the 'core' standard library could not be found"
- echo >&2 "*** at '$rustc_src_core'."
- echo >&2 "***"
- fi
+ echo >&2 "***"
+ echo >&2 "*** Source code for the 'core' standard library could not be found"
+ echo >&2 "*** at '$rustc_src_core'."
+ echo >&2 "***"
exit 1
fi
diff --git a/scripts/rust_is_available_test.py b/scripts/rust_is_available_test.py
new file mode 100755
index 000000000000..57613fe5ed75
--- /dev/null
+++ b/scripts/rust_is_available_test.py
@@ -0,0 +1,346 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+"""Tests the `rust_is_available.sh` script.
+
+Some of the tests require the real programs to be available in `$PATH`
+under their canonical name (and with the expected versions).
+"""
+
+import enum
+import os
+import pathlib
+import stat
+import subprocess
+import tempfile
+import unittest
+
+class TestRustIsAvailable(unittest.TestCase):
+ @enum.unique
+ class Expected(enum.Enum):
+ SUCCESS = enum.auto()
+ SUCCESS_WITH_WARNINGS = enum.auto()
+ SUCCESS_WITH_EXTRA_OUTPUT = enum.auto()
+ FAILURE = enum.auto()
+
+ @classmethod
+ def generate_executable(cls, content):
+ path = pathlib.Path(cls.tempdir.name)
+ name = str(len(tuple(path.iterdir())))
+ path = path / name
+ with open(path, "w") as file_:
+ file_.write(content)
+ os.chmod(path, os.stat(path).st_mode | stat.S_IXUSR)
+ return path
+
+ @classmethod
+ def generate_clang(cls, stdout):
+ return cls.generate_executable(f"""#!/usr/bin/env python3
+import sys
+if "-E" in " ".join(sys.argv):
+ print({repr("Clang " + " ".join(cls.llvm_default_version.split(" ")))})
+else:
+ print({repr(stdout)})
+""")
+
+ @classmethod
+ def generate_rustc(cls, stdout):
+ return cls.generate_executable(f"""#!/usr/bin/env python3
+import sys
+if "--print sysroot" in " ".join(sys.argv):
+ print({repr(cls.rust_default_sysroot)})
+else:
+ print({repr(stdout)})
+""")
+
+ @classmethod
+ def generate_bindgen(cls, version_stdout, libclang_stderr):
+ return cls.generate_executable(f"""#!/usr/bin/env python3
+import sys
+if "rust_is_available_bindgen_libclang.h" in " ".join(sys.argv):
+ print({repr(libclang_stderr)}, file=sys.stderr)
+else:
+ print({repr(version_stdout)})
+""")
+
+ @classmethod
+ def generate_bindgen_version(cls, stdout):
+ return cls.generate_bindgen(stdout, cls.bindgen_default_bindgen_libclang_stderr)
+
+ @classmethod
+ def generate_bindgen_libclang(cls, stderr):
+ return cls.generate_bindgen(cls.bindgen_default_bindgen_version_stdout, stderr)
+
+ @classmethod
+ def setUpClass(cls):
+ cls.tempdir = tempfile.TemporaryDirectory()
+
+ cls.missing = pathlib.Path(cls.tempdir.name) / "missing"
+
+ cls.nonexecutable = pathlib.Path(cls.tempdir.name) / "nonexecutable"
+ with open(cls.nonexecutable, "w") as file_:
+ file_.write("nonexecutable")
+
+ cls.unexpected_binary = "true"
+
+ cls.rustc_default_version = subprocess.check_output(("scripts/min-tool-version.sh", "rustc")).decode().strip()
+ cls.bindgen_default_version = subprocess.check_output(("scripts/min-tool-version.sh", "bindgen")).decode().strip()
+ cls.llvm_default_version = subprocess.check_output(("scripts/min-tool-version.sh", "llvm")).decode().strip()
+ cls.rust_default_sysroot = subprocess.check_output(("rustc", "--print", "sysroot")).decode().strip()
+
+ cls.bindgen_default_bindgen_version_stdout = f"bindgen {cls.bindgen_default_version}"
+ cls.bindgen_default_bindgen_libclang_stderr = f"scripts/rust_is_available_bindgen_libclang.h:2:9: warning: clang version {cls.llvm_default_version} [-W#pragma-messages], err: false"
+
+ cls.default_rustc = cls.generate_rustc(f"rustc {cls.rustc_default_version}")
+ cls.default_bindgen = cls.generate_bindgen(cls.bindgen_default_bindgen_version_stdout, cls.bindgen_default_bindgen_libclang_stderr)
+ cls.default_cc = cls.generate_clang(f"clang version {cls.llvm_default_version}")
+
+ def run_script(self, expected, override_env):
+ env = {
+ "RUSTC": self.default_rustc,
+ "BINDGEN": self.default_bindgen,
+ "CC": self.default_cc,
+ }
+
+ for key, value in override_env.items():
+ if value is None:
+ del env[key]
+ continue
+ env[key] = value
+
+ result = subprocess.run("scripts/rust_is_available.sh", env=env, capture_output=True)
+
+ # The script should never output anything to `stdout`.
+ self.assertEqual(result.stdout, b"")
+
+ if expected == self.Expected.SUCCESS:
+ # When expecting a success, the script should return 0
+ # and it should not output anything to `stderr`.
+ self.assertEqual(result.returncode, 0)
+ self.assertEqual(result.stderr, b"")
+ elif expected == self.Expected.SUCCESS_WITH_EXTRA_OUTPUT:
+ # When expecting a success with extra output (that is not warnings,
+ # which is the common case), the script should return 0 and it
+ # should output at least something to `stderr` (the output should
+ # be checked further by the test).
+ self.assertEqual(result.returncode, 0)
+ self.assertNotEqual(result.stderr, b"")
+ elif expected == self.Expected.SUCCESS_WITH_WARNINGS:
+ # When expecting a success with warnings, the script should return 0
+ # and it should output at least the instructions to `stderr`.
+ self.assertEqual(result.returncode, 0)
+ self.assertIn(b"Please see Documentation/rust/quick-start.rst for details", result.stderr)
+ else:
+ # When expecting a failure, the script should return non-0
+ # and it should output at least the instructions to `stderr`.
+ self.assertNotEqual(result.returncode, 0)
+ self.assertIn(b"Please see Documentation/rust/quick-start.rst for details", result.stderr)
+
+ # The output will generally be UTF-8 (i.e. unless the user has
+ # put strange values in the environment).
+ result.stderr = result.stderr.decode()
+
+ return result
+
+ def test_rustc_unset(self):
+ result = self.run_script(self.Expected.FAILURE, { "RUSTC": None })
+ self.assertIn("Environment variable 'RUSTC' is not set.", result.stderr)
+ self.assertIn("This script is intended to be called from Kbuild.", result.stderr)
+
+ def test_bindgen_unset(self):
+ result = self.run_script(self.Expected.FAILURE, { "BINDGEN": None })
+ self.assertIn("Environment variable 'BINDGEN' is not set.", result.stderr)
+ self.assertIn("This script is intended to be called from Kbuild.", result.stderr)
+
+ def test_cc_unset(self):
+ result = self.run_script(self.Expected.FAILURE, { "CC": None })
+ self.assertIn("Environment variable 'CC' is not set.", result.stderr)
+ self.assertIn("This script is intended to be called from Kbuild.", result.stderr)
+
+ def test_rustc_missing(self):
+ result = self.run_script(self.Expected.FAILURE, { "RUSTC": self.missing })
+ self.assertIn(f"Rust compiler '{self.missing}' could not be found.", result.stderr)
+
+ def test_bindgen_missing(self):
+ result = self.run_script(self.Expected.FAILURE, { "BINDGEN": self.missing })
+ self.assertIn(f"Rust bindings generator '{self.missing}' could not be found.", result.stderr)
+
+ def test_rustc_nonexecutable(self):
+ result = self.run_script(self.Expected.FAILURE, { "RUSTC": self.nonexecutable })
+ self.assertIn(f"Running '{self.nonexecutable}' to check the Rust compiler version failed with", result.stderr)
+
+ def test_rustc_unexpected_binary(self):
+ result = self.run_script(self.Expected.FAILURE, { "RUSTC": self.unexpected_binary })
+ self.assertIn(f"Running '{self.unexpected_binary}' to check the Rust compiler version did not return", result.stderr)
+
+ def test_rustc_unexpected_name(self):
+ rustc = self.generate_rustc(f"unexpected {self.rustc_default_version} (a8314ef7d 2022-06-27)")
+ result = self.run_script(self.Expected.FAILURE, { "RUSTC": rustc })
+ self.assertIn(f"Running '{rustc}' to check the Rust compiler version did not return", result.stderr)
+
+ def test_rustc_unexpected_version(self):
+ rustc = self.generate_rustc("rustc unexpected (a8314ef7d 2022-06-27)")
+ result = self.run_script(self.Expected.FAILURE, { "RUSTC": rustc })
+ self.assertIn(f"Running '{rustc}' to check the Rust compiler version did not return", result.stderr)
+
+ def test_rustc_no_minor(self):
+ rustc = self.generate_rustc(f"rustc {'.'.join(self.rustc_default_version.split('.')[:2])} (a8314ef7d 2022-06-27)")
+ result = self.run_script(self.Expected.FAILURE, { "RUSTC": rustc })
+ self.assertIn(f"Running '{rustc}' to check the Rust compiler version did not return", result.stderr)
+
+ def test_rustc_old_version(self):
+ rustc = self.generate_rustc("rustc 1.60.0 (a8314ef7d 2022-06-27)")
+ result = self.run_script(self.Expected.FAILURE, { "RUSTC": rustc })
+ self.assertIn(f"Rust compiler '{rustc}' is too old.", result.stderr)
+
+ def test_rustc_new_version(self):
+ rustc = self.generate_rustc("rustc 1.999.0 (a8314ef7d 2099-06-27)")
+ result = self.run_script(self.Expected.SUCCESS_WITH_WARNINGS, { "RUSTC": rustc })
+ self.assertIn(f"Rust compiler '{rustc}' is too new. This may or may not work.", result.stderr)
+
+ def test_bindgen_nonexecutable(self):
+ result = self.run_script(self.Expected.FAILURE, { "BINDGEN": self.nonexecutable })
+ self.assertIn(f"Running '{self.nonexecutable}' to check the Rust bindings generator version failed with", result.stderr)
+
+ def test_bindgen_unexpected_binary(self):
+ result = self.run_script(self.Expected.FAILURE, { "BINDGEN": self.unexpected_binary })
+ self.assertIn(f"Running '{self.unexpected_binary}' to check the bindings generator version did not return", result.stderr)
+
+ def test_bindgen_unexpected_name(self):
+ bindgen = self.generate_bindgen_version(f"unexpected {self.bindgen_default_version}")
+ result = self.run_script(self.Expected.FAILURE, { "BINDGEN": bindgen })
+ self.assertIn(f"Running '{bindgen}' to check the bindings generator version did not return", result.stderr)
+
+ def test_bindgen_unexpected_version(self):
+ bindgen = self.generate_bindgen_version("bindgen unexpected")
+ result = self.run_script(self.Expected.FAILURE, { "BINDGEN": bindgen })
+ self.assertIn(f"Running '{bindgen}' to check the bindings generator version did not return", result.stderr)
+
+ def test_bindgen_no_minor(self):
+ bindgen = self.generate_bindgen_version(f"bindgen {'.'.join(self.bindgen_default_version.split('.')[:2])}")
+ result = self.run_script(self.Expected.FAILURE, { "BINDGEN": bindgen })
+ self.assertIn(f"Running '{bindgen}' to check the bindings generator version did not return", result.stderr)
+
+ def test_bindgen_old_version(self):
+ bindgen = self.generate_bindgen_version("bindgen 0.50.0")
+ result = self.run_script(self.Expected.FAILURE, { "BINDGEN": bindgen })
+ self.assertIn(f"Rust bindings generator '{bindgen}' is too old.", result.stderr)
+
+ def test_bindgen_new_version(self):
+ bindgen = self.generate_bindgen_version("bindgen 0.999.0")
+ result = self.run_script(self.Expected.SUCCESS_WITH_WARNINGS, { "BINDGEN": bindgen })
+ self.assertIn(f"Rust bindings generator '{bindgen}' is too new. This may or may not work.", result.stderr)
+
+ def test_bindgen_libclang_failure(self):
+ for env in (
+ { "LLVM_CONFIG_PATH": self.missing },
+ { "LIBCLANG_PATH": self.missing },
+ { "CLANG_PATH": self.missing },
+ ):
+ with self.subTest(env=env):
+ result = self.run_script(self.Expected.FAILURE, env | { "PATH": os.environ["PATH"], "BINDGEN": "bindgen" })
+ self.assertIn("Running 'bindgen' to check the libclang version (used by the Rust", result.stderr)
+ self.assertIn("bindings generator) failed with code ", result.stderr)
+
+ def test_bindgen_libclang_unexpected_version(self):
+ bindgen = self.generate_bindgen_libclang("scripts/rust_is_available_bindgen_libclang.h:2:9: warning: clang version unexpected [-W#pragma-messages], err: false")
+ result = self.run_script(self.Expected.FAILURE, { "BINDGEN": bindgen })
+ self.assertIn(f"Running '{bindgen}' to check the libclang version (used by the Rust", result.stderr)
+ self.assertIn("bindings generator) did not return an expected output. See output", result.stderr)
+
+ def test_bindgen_libclang_old_version(self):
+ bindgen = self.generate_bindgen_libclang("scripts/rust_is_available_bindgen_libclang.h:2:9: warning: clang version 10.0.0 [-W#pragma-messages], err: false")
+ result = self.run_script(self.Expected.FAILURE, { "BINDGEN": bindgen })
+ self.assertIn(f"libclang (used by the Rust bindings generator '{bindgen}') is too old.", result.stderr)
+
+ def test_clang_matches_bindgen_libclang_different_bindgen(self):
+ bindgen = self.generate_bindgen_libclang("scripts/rust_is_available_bindgen_libclang.h:2:9: warning: clang version 999.0.0 [-W#pragma-messages], err: false")
+ result = self.run_script(self.Expected.SUCCESS_WITH_WARNINGS, { "BINDGEN": bindgen })
+ self.assertIn("version does not match Clang's. This may be a problem.", result.stderr)
+
+ def test_clang_matches_bindgen_libclang_different_clang(self):
+ cc = self.generate_clang("clang version 999.0.0")
+ result = self.run_script(self.Expected.SUCCESS_WITH_WARNINGS, { "CC": cc })
+ self.assertIn("version does not match Clang's. This may be a problem.", result.stderr)
+
+ def test_rustc_src_core_krustflags(self):
+ result = self.run_script(self.Expected.FAILURE, { "PATH": os.environ["PATH"], "RUSTC": "rustc", "KRUSTFLAGS": f"--sysroot={self.missing}" })
+ self.assertIn("Source code for the 'core' standard library could not be found", result.stderr)
+
+ def test_rustc_src_core_rustlibsrc(self):
+ result = self.run_script(self.Expected.FAILURE, { "RUST_LIB_SRC": self.missing })
+ self.assertIn("Source code for the 'core' standard library could not be found", result.stderr)
+
+ def test_success_cc_unknown(self):
+ result = self.run_script(self.Expected.SUCCESS_WITH_EXTRA_OUTPUT, { "CC": self.missing })
+ self.assertIn("unknown C compiler", result.stderr)
+
+ def test_success_cc_multiple_arguments_ccache(self):
+ clang = self.generate_clang(f"""Ubuntu clang version {self.llvm_default_version}-1ubuntu1
+Target: x86_64-pc-linux-gnu
+Thread model: posix
+InstalledDir: /usr/bin
+""")
+ result = self.run_script(self.Expected.SUCCESS, { "CC": f"{clang} clang" })
+
+ def test_success_rustc_version(self):
+ for rustc_stdout in (
+ f"rustc {self.rustc_default_version} (a8314ef7d 2022-06-27)",
+ f"rustc {self.rustc_default_version}-dev (a8314ef7d 2022-06-27)",
+ f"rustc {self.rustc_default_version}-1.60.0 (a8314ef7d 2022-06-27)",
+ ):
+ with self.subTest(rustc_stdout=rustc_stdout):
+ rustc = self.generate_rustc(rustc_stdout)
+ result = self.run_script(self.Expected.SUCCESS, { "RUSTC": rustc })
+
+ def test_success_bindgen_version(self):
+ for bindgen_stdout in (
+ f"bindgen {self.bindgen_default_version}",
+ f"bindgen {self.bindgen_default_version}-dev",
+ f"bindgen {self.bindgen_default_version}-0.999.0",
+ ):
+ with self.subTest(bindgen_stdout=bindgen_stdout):
+ bindgen = self.generate_bindgen_version(bindgen_stdout)
+ result = self.run_script(self.Expected.SUCCESS, { "BINDGEN": bindgen })
+
+ def test_success_bindgen_libclang(self):
+ for stderr in (
+ f"scripts/rust_is_available_bindgen_libclang.h:2:9: warning: clang version {self.llvm_default_version} (https://github.com/llvm/llvm-project.git 4a2c05b05ed07f1f620e94f6524a8b4b2760a0b1) [-W#pragma-messages], err: false",
+ f"/home/jd/Documents/dev/kernel-module-flake/linux-6.1/outputs/dev/lib/modules/6.1.0-development/source/scripts/rust_is_available_bindgen_libclang.h:2:9: warning: clang version {self.llvm_default_version} [-W#pragma-messages], err: false",
+ f"scripts/rust_is_available_bindgen_libclang.h:2:9: warning: clang version {self.llvm_default_version} (Fedora 13.0.0-3.fc35) [-W#pragma-messages], err: false",
+ f"""
+/nix/store/dsd5gz46hdbdk2rfdimqddhq6m8m8fqs-bash-5.1-p16/bin/bash: warning: setlocale: LC_ALL: cannot change locale (c)
+scripts/rust_is_available_bindgen_libclang.h:2:9: warning: clang version {self.llvm_default_version} [-W#pragma-messages], err: false
+""",
+ f"""
+/nix/store/dsd5gz46hdbdk2rfdimqddhq6m8m8fqs-bash-5.1.0-p16/bin/bash: warning: setlocale: LC_ALL: cannot change locale (c)
+/home/jd/Documents/dev/kernel-module-flake/linux-6.1/outputs/dev/lib/modules/6.1.0-development/source/scripts/rust_is_available_bindgen_libclang.h:2:9: warning: clang version {self.llvm_default_version} (Fedora 13.0.0-3.fc35) [-W#pragma-messages], err: false
+"""
+ ):
+ with self.subTest(stderr=stderr):
+ bindgen = self.generate_bindgen_libclang(stderr)
+ result = self.run_script(self.Expected.SUCCESS, { "BINDGEN": bindgen })
+
+ def test_success_clang_version(self):
+ for clang_stdout in (
+ f"clang version {self.llvm_default_version} (https://github.com/llvm/llvm-project.git 4a2c05b05ed07f1f620e94f6524a8b4b2760a0b1)",
+ f"clang version {self.llvm_default_version}-dev",
+ f"clang version {self.llvm_default_version}-2~ubuntu20.04.1",
+ f"Ubuntu clang version {self.llvm_default_version}-2~ubuntu20.04.1",
+ ):
+ with self.subTest(clang_stdout=clang_stdout):
+ clang = self.generate_clang(clang_stdout)
+ result = self.run_script(self.Expected.SUCCESS, { "CC": clang })
+
+ def test_success_real_programs(self):
+ for cc in ["gcc", "clang"]:
+ with self.subTest(cc=cc):
+ result = self.run_script(self.Expected.SUCCESS, {
+ "PATH": os.environ["PATH"],
+ "RUSTC": "rustc",
+ "BINDGEN": "bindgen",
+ "CC": cc,
+ })
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/scripts/rustdoc_test_builder.rs b/scripts/rustdoc_test_builder.rs
new file mode 100644
index 000000000000..e5894652f12c
--- /dev/null
+++ b/scripts/rustdoc_test_builder.rs
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Test builder for `rustdoc`-generated tests.
+//!
+//! This script is a hack to extract the test from `rustdoc`'s output. Ideally, `rustdoc` would
+//! have an option to generate this information instead, e.g. as JSON output.
+//!
+//! The `rustdoc`-generated test names look like `{file}_{line}_{number}`, e.g.
+//! `...path_rust_kernel_sync_arc_rs_42_0`. `number` is the "test number", needed in cases like
+//! a macro that expands into items with doctests is invoked several times within the same line.
+//!
+//! However, since these names are used for bisection in CI, the line number makes it not stable
+//! at all. In the future, we would like `rustdoc` to give us the Rust item path associated with
+//! the test, plus a "test number" (for cases with several examples per item) and generate a name
+//! from that. For the moment, we generate ourselves a new name, `{file}_{number}` instead, in
+//! the `gen` script (done there since we need to be aware of all the tests in a given file).
+
+use std::io::Read;
+
+fn main() {
+ let mut stdin = std::io::stdin().lock();
+ let mut body = String::new();
+ stdin.read_to_string(&mut body).unwrap();
+
+ // Find the generated function name looking for the inner function inside `main()`.
+ //
+ // The line we are looking for looks like one of the following:
+ //
+ // ```
+ // fn main() { #[allow(non_snake_case)] fn _doctest_main_rust_kernel_file_rs_28_0() {
+ // fn main() { #[allow(non_snake_case)] fn _doctest_main_rust_kernel_file_rs_37_0() -> Result<(), impl core::fmt::Debug> {
+ // ```
+ //
+ // It should be unlikely that doctest code matches such lines (when code is formatted properly).
+ let rustdoc_function_name = body
+ .lines()
+ .find_map(|line| {
+ Some(
+ line.split_once("fn main() {")?
+ .1
+ .split_once("fn ")?
+ .1
+ .split_once("()")?
+ .0,
+ )
+ .filter(|x| x.chars().all(|c| c.is_alphanumeric() || c == '_'))
+ })
+ .expect("No test function found in `rustdoc`'s output.");
+
+ // Qualify `Result` to avoid the collision with our own `Result` coming from the prelude.
+ let body = body.replace(
+ &format!("{rustdoc_function_name}() -> Result<(), impl core::fmt::Debug> {{"),
+ &format!("{rustdoc_function_name}() -> core::result::Result<(), impl core::fmt::Debug> {{"),
+ );
+
+ // For tests that get generated with `Result`, like above, `rustdoc` generates an `unwrap()` on
+ // the return value to check there were no returned errors. Instead, we use our assert macro
+ // since we want to just fail the test, not panic the kernel.
+ //
+ // We save the result in a variable so that the failed assertion message looks nicer.
+ let body = body.replace(
+ &format!("}} {rustdoc_function_name}().unwrap() }}"),
+ &format!("}} let test_return_value = {rustdoc_function_name}(); assert!(test_return_value.is_ok()); }}"),
+ );
+
+ // Figure out a smaller test name based on the generated function name.
+ let name = rustdoc_function_name.split_once("_rust_kernel_").unwrap().1;
+
+ let path = format!("rust/test/doctests/kernel/{name}");
+
+ std::fs::write(path, body.as_bytes()).unwrap();
+}
diff --git a/scripts/rustdoc_test_gen.rs b/scripts/rustdoc_test_gen.rs
new file mode 100644
index 000000000000..5ebd42ae4a3f
--- /dev/null
+++ b/scripts/rustdoc_test_gen.rs
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generates KUnit tests from saved `rustdoc`-generated tests.
+//!
+//! KUnit passes a context (`struct kunit *`) to each test, which should be forwarded to the other
+//! KUnit functions and macros.
+//!
+//! However, we want to keep this as an implementation detail because:
+//!
+//! - Test code should not care about the implementation.
+//!
+//! - Documentation looks worse if it needs to carry extra details unrelated to the piece
+//! being described.
+//!
+//! - Test code should be able to define functions and call them, without having to carry
+//! the context.
+//!
+//! - Later on, we may want to be able to test non-kernel code (e.g. `core`, `alloc` or
+//! third-party crates) which likely use the standard library `assert*!` macros.
+//!
+//! For this reason, instead of the passed context, `kunit_get_current_test()` is used instead
+//! (i.e. `current->kunit_test`).
+//!
+//! Note that this means other threads/tasks potentially spawned by a given test, if failing, will
+//! report the failure in the kernel log but will not fail the actual test. Saving the pointer in
+//! e.g. a `static` per test does not fully solve the issue either, because currently KUnit does
+//! not support assertions (only expectations) from other tasks. Thus leave that feature for
+//! the future, which simplifies the code here too. We could also simply not allow `assert`s in
+//! other tasks, but that seems overly constraining, and we do want to support them, eventually.
+
+use std::{
+ fs,
+ fs::File,
+ io::{BufWriter, Read, Write},
+ path::{Path, PathBuf},
+};
+
+/// Find the real path to the original file based on the `file` portion of the test name.
+///
+/// `rustdoc` generated `file`s look like `sync_locked_by_rs`. Underscores (except the last one)
+/// may represent an actual underscore in a directory/file, or a path separator. Thus the actual
+/// file might be `sync_locked_by.rs`, `sync/locked_by.rs`, `sync_locked/by.rs` or
+/// `sync/locked/by.rs`. This function walks the file system to determine which is the real one.
+///
+/// This does require that ambiguities do not exist, but that seems fair, especially since this is
+/// all supposed to be temporary until `rustdoc` gives us proper metadata to build this. If such
+/// ambiguities are detected, they are diagnosed and the script panics.
+fn find_real_path<'a>(srctree: &Path, valid_paths: &'a mut Vec<PathBuf>, file: &str) -> &'a str {
+ valid_paths.clear();
+
+ let potential_components: Vec<&str> = file.strip_suffix("_rs").unwrap().split('_').collect();
+
+ find_candidates(srctree, valid_paths, Path::new(""), &potential_components);
+ fn find_candidates(
+ srctree: &Path,
+ valid_paths: &mut Vec<PathBuf>,
+ prefix: &Path,
+ potential_components: &[&str],
+ ) {
+ // The base case: check whether all the potential components left, joined by underscores,
+ // is a file.
+ let joined_potential_components = potential_components.join("_") + ".rs";
+ if srctree
+ .join("rust/kernel")
+ .join(prefix)
+ .join(&joined_potential_components)
+ .is_file()
+ {
+ // Avoid `srctree` here in order to keep paths relative to it in the KTAP output.
+ valid_paths.push(
+ Path::new("rust/kernel")
+ .join(prefix)
+ .join(joined_potential_components),
+ );
+ }
+
+ // In addition, check whether each component prefix, joined by underscores, is a directory.
+ // If not, there is no need to check for combinations with that prefix.
+ for i in 1..potential_components.len() {
+ let (components_prefix, components_rest) = potential_components.split_at(i);
+ let prefix = prefix.join(components_prefix.join("_"));
+ if srctree.join("rust/kernel").join(&prefix).is_dir() {
+ find_candidates(srctree, valid_paths, &prefix, components_rest);
+ }
+ }
+ }
+
+ assert!(
+ valid_paths.len() > 0,
+ "No path candidates found. This is likely a bug in the build system, or some files went \
+ away while compiling."
+ );
+
+ if valid_paths.len() > 1 {
+ eprintln!("Several path candidates found:");
+ for path in valid_paths {
+ eprintln!(" {path:?}");
+ }
+ panic!(
+ "Several path candidates found, please resolve the ambiguity by renaming a file or \
+ folder."
+ );
+ }
+
+ valid_paths[0].to_str().unwrap()
+}
+
+fn main() {
+ let srctree = std::env::var("srctree").unwrap();
+ let srctree = Path::new(&srctree);
+
+ let mut paths = fs::read_dir("rust/test/doctests/kernel")
+ .unwrap()
+ .map(|entry| entry.unwrap().path())
+ .collect::<Vec<_>>();
+
+ // Sort paths.
+ paths.sort();
+
+ let mut rust_tests = String::new();
+ let mut c_test_declarations = String::new();
+ let mut c_test_cases = String::new();
+ let mut body = String::new();
+ let mut last_file = String::new();
+ let mut number = 0;
+ let mut valid_paths: Vec<PathBuf> = Vec::new();
+ let mut real_path: &str = "";
+ for path in paths {
+ // The `name` follows the `{file}_{line}_{number}` pattern (see description in
+ // `scripts/rustdoc_test_builder.rs`). Discard the `number`.
+ let name = path.file_name().unwrap().to_str().unwrap().to_string();
+
+ // Extract the `file` and the `line`, discarding the `number`.
+ let (file, line) = name.rsplit_once('_').unwrap().0.rsplit_once('_').unwrap();
+
+ // Generate an ID sequence ("test number") for each one in the file.
+ if file == last_file {
+ number += 1;
+ } else {
+ number = 0;
+ last_file = file.to_string();
+
+ // Figure out the real path, only once per file.
+ real_path = find_real_path(srctree, &mut valid_paths, file);
+ }
+
+ // Generate a KUnit name (i.e. test name and C symbol) for this test.
+ //
+ // We avoid the line number, like `rustdoc` does, to make things slightly more stable for
+ // bisection purposes. However, to aid developers in mapping back what test failed, we will
+ // print a diagnostics line in the KTAP report.
+ let kunit_name = format!("rust_doctest_kernel_{file}_{number}");
+
+ // Read the test's text contents to dump it below.
+ body.clear();
+ File::open(path).unwrap().read_to_string(&mut body).unwrap();
+
+ // Calculate how many lines before `main` function (including the `main` function line).
+ let body_offset = body
+ .lines()
+ .take_while(|line| !line.contains("fn main() {"))
+ .count()
+ + 1;
+
+ use std::fmt::Write;
+ write!(
+ rust_tests,
+ r#"/// Generated `{name}` KUnit test case from a Rust documentation test.
+#[no_mangle]
+pub extern "C" fn {kunit_name}(__kunit_test: *mut kernel::bindings::kunit) {{
+ /// Overrides the usual [`assert!`] macro with one that calls KUnit instead.
+ #[allow(unused)]
+ macro_rules! assert {{
+ ($cond:expr $(,)?) => {{{{
+ kernel::kunit_assert!("{kunit_name}", "{real_path}", __DOCTEST_ANCHOR - {line}, $cond);
+ }}}}
+ }}
+
+ /// Overrides the usual [`assert_eq!`] macro with one that calls KUnit instead.
+ #[allow(unused)]
+ macro_rules! assert_eq {{
+ ($left:expr, $right:expr $(,)?) => {{{{
+ kernel::kunit_assert_eq!("{kunit_name}", "{real_path}", __DOCTEST_ANCHOR - {line}, $left, $right);
+ }}}}
+ }}
+
+ // Many tests need the prelude, so provide it by default.
+ #[allow(unused)]
+ use kernel::prelude::*;
+
+ // Unconditionally print the location of the original doctest (i.e. rather than the location in
+ // the generated file) so that developers can easily map the test back to the source code.
+ //
+ // This information is also printed when assertions fail, but this helps in the successful cases
+ // when the user is running KUnit manually, or when passing `--raw_output` to `kunit.py`.
+ //
+ // This follows the syntax for declaring test metadata in the proposed KTAP v2 spec, which may
+ // be used for the proposed KUnit test attributes API. Thus hopefully this will make migration
+ // easier later on.
+ kernel::kunit::info(format_args!(" # {kunit_name}.location: {real_path}:{line}\n"));
+
+ /// The anchor where the test code body starts.
+ #[allow(unused)]
+ static __DOCTEST_ANCHOR: i32 = core::line!() as i32 + {body_offset} + 1;
+ {{
+ {body}
+ main();
+ }}
+}}
+
+"#
+ )
+ .unwrap();
+
+ write!(c_test_declarations, "void {kunit_name}(struct kunit *);\n").unwrap();
+ write!(c_test_cases, " KUNIT_CASE({kunit_name}),\n").unwrap();
+ }
+
+ let rust_tests = rust_tests.trim();
+ let c_test_declarations = c_test_declarations.trim();
+ let c_test_cases = c_test_cases.trim();
+
+ write!(
+ BufWriter::new(File::create("rust/doctests_kernel_generated.rs").unwrap()),
+ r#"//! `kernel` crate documentation tests.
+
+const __LOG_PREFIX: &[u8] = b"rust_doctests_kernel\0";
+
+{rust_tests}
+"#
+ )
+ .unwrap();
+
+ write!(
+ BufWriter::new(File::create("rust/doctests_kernel_generated_kunit.c").unwrap()),
+ r#"/*
+ * `kernel` crate documentation tests.
+ */
+
+#include <kunit/test.h>
+
+{c_test_declarations}
+
+static struct kunit_case test_cases[] = {{
+ {c_test_cases}
+ {{ }}
+}};
+
+static struct kunit_suite test_suite = {{
+ .name = "rust_doctests_kernel",
+ .test_cases = test_cases,
+}};
+
+kunit_test_suite(test_suite);
+
+MODULE_LICENSE("GPL");
+"#
+ )
+ .unwrap();
+}
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index 0f295961e773..2cff851ebfd7 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -279,6 +279,29 @@ config ZERO_CALL_USED_REGS
endmenu
+menu "Hardening of kernel data structures"
+
+config LIST_HARDENED
+ bool "Check integrity of linked list manipulation"
+ help
+ Minimal integrity checking in the linked-list manipulation routines
+ to catch memory corruptions that are not guaranteed to result in an
+ immediate access fault.
+
+ If unsure, say N.
+
+config BUG_ON_DATA_CORRUPTION
+ bool "Trigger a BUG when data corruption is detected"
+ select LIST_HARDENED
+ help
+ Select this option if the kernel should BUG when it encounters
+ data corruption in kernel memory structures when they get checked
+ for validity.
+
+ If unsure, say N.
+
+endmenu
+
config CC_HAS_RANDSTRUCT
def_bool $(cc-option,-frandomize-layout-seed-file=/dev/null)
# Randstruct was first added in Clang 15, but it isn't safe to use until
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index db7a51acf9db..bd6a910f6528 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -226,7 +226,7 @@ static int __aafs_setup_d_inode(struct inode *dir, struct dentry *dentry,
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_private = data;
if (S_ISDIR(mode)) {
inode->i_op = iops ? iops : &simple_dir_inode_operations;
@@ -1554,8 +1554,11 @@ void __aafs_profile_migrate_dents(struct aa_profile *old,
for (i = 0; i < AAFS_PROF_SIZEOF; i++) {
new->dents[i] = old->dents[i];
- if (new->dents[i])
- new->dents[i]->d_inode->i_mtime = current_time(new->dents[i]->d_inode);
+ if (new->dents[i]) {
+ struct inode *inode = d_inode(new->dents[i]);
+
+ inode->i_mtime = inode_set_ctime_current(inode);
+ }
old->dents[i] = NULL;
}
}
@@ -2540,7 +2543,7 @@ static int aa_mk_null_file(struct dentry *parent)
inode->i_ino = get_next_ino();
inode->i_mode = S_IFCHR | S_IRUGO | S_IWUGO;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO,
MKDEV(MEM_MAJOR, 3));
d_instantiate(dentry, inode);
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 694fb7a09962..8b8846073e14 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -86,10 +86,13 @@ void __aa_loaddata_update(struct aa_loaddata *data, long revision)
data->revision = revision;
if ((data->dents[AAFS_LOADDATA_REVISION])) {
- d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
- current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
- d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
- current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
+ struct inode *inode;
+
+ inode = d_inode(data->dents[AAFS_LOADDATA_DIR]);
+ inode->i_mtime = inode_set_ctime_current(inode);
+
+ inode = d_inode(data->dents[AAFS_LOADDATA_REVISION]);
+ inode->i_mtime = inode_set_ctime_current(inode);
}
}
diff --git a/security/inode.c b/security/inode.c
index 6c326939750d..3aa75fffa8c9 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -145,7 +145,7 @@ static struct dentry *securityfs_create_dentry(const char *name, umode_t mode,
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
+ inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
inode->i_private = data;
if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
index ec6e0d789da1..232191ee09e3 100644
--- a/security/integrity/Kconfig
+++ b/security/integrity/Kconfig
@@ -67,7 +67,9 @@ config INTEGRITY_MACHINE_KEYRING
depends on SECONDARY_TRUSTED_KEYRING
depends on INTEGRITY_ASYMMETRIC_KEYS
depends on SYSTEM_BLACKLIST_KEYRING
- depends on LOAD_UEFI_KEYS
+ depends on LOAD_UEFI_KEYS || LOAD_PPC_KEYS
+ select INTEGRITY_CA_MACHINE_KEYRING if LOAD_PPC_KEYS
+ select INTEGRITY_CA_MACHINE_KEYRING_MAX if LOAD_PPC_KEYS
help
If set, provide a keyring to which Machine Owner Keys (MOK) may
be added. This keyring shall contain just MOK keys. Unlike keys
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 6f31ffe23c48..df387de29bfa 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -34,9 +34,9 @@ static const char * const keyring_name[INTEGRITY_KEYRING_MAX] = {
};
#ifdef CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
-#define restrict_link_to_ima restrict_link_by_builtin_and_secondary_trusted
+#define restrict_link_to_ima restrict_link_by_digsig_builtin_and_secondary
#else
-#define restrict_link_to_ima restrict_link_by_builtin_trusted
+#define restrict_link_to_ima restrict_link_by_digsig_builtin
#endif
static struct key *integrity_keyring_from_id(const unsigned int id)
@@ -113,7 +113,7 @@ static int __init __integrity_init_keyring(const unsigned int id,
} else {
if (id == INTEGRITY_KEYRING_PLATFORM)
set_platform_trusted_keys(keyring[id]);
- if (id == INTEGRITY_KEYRING_MACHINE && trust_moklist())
+ if (id == INTEGRITY_KEYRING_MACHINE && imputed_trust_enabled())
set_machine_trusted_keys(keyring[id]);
if (id == INTEGRITY_KEYRING_IMA)
load_module_cert(keyring[id]);
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
index a6e19d23e700..fba9ee359bc9 100644
--- a/security/integrity/evm/Kconfig
+++ b/security/integrity/evm/Kconfig
@@ -64,7 +64,8 @@ config EVM_LOAD_X509
This option enables X509 certificate loading from the kernel
onto the '.evm' trusted keyring. A public key can be used to
- verify EVM integrity starting from the 'init' process.
+ verify EVM integrity starting from the 'init' process. The
+ key must have digitalSignature usage set.
config EVM_X509_PATH
string "EVM X509 certificate path"
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 60a511c6b583..684425936c53 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -270,7 +270,8 @@ config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
help
Keys may be added to the IMA or IMA blacklist keyrings, if the
key is validly signed by a CA cert in the system built-in or
- secondary trusted keyrings.
+ secondary trusted keyrings. The key must also have the
+ digitalSignature usage set.
Intermediate keys between those the kernel has compiled in and the
IMA keys to be added may be added to the system secondary keyring,
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index c9b3bd8f1bb9..7a0420cf1a6a 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -68,7 +68,7 @@ enum policy_rule_list { IMA_DEFAULT_POLICY = 1, IMA_CUSTOM_POLICY };
struct ima_rule_opt_list {
size_t count;
- char *items[];
+ char *items[] __counted_by(count);
};
/*
@@ -342,6 +342,7 @@ static struct ima_rule_opt_list *ima_alloc_rule_opt_list(const substring_t *src)
kfree(src_copy);
return ERR_PTR(-ENOMEM);
}
+ opt_list->count = count;
/*
* strsep() has already replaced all instances of '|' with '\0',
@@ -357,7 +358,6 @@ static struct ima_rule_opt_list *ima_alloc_rule_opt_list(const substring_t *src)
opt_list->items[i] = cur;
cur = strchr(cur, '\0') + 1;
}
- opt_list->count = count;
return opt_list;
}
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index 7167a6e99bdc..d7553c93f5c0 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -320,13 +320,14 @@ static inline void __init add_to_platform_keyring(const char *source,
#ifdef CONFIG_INTEGRITY_MACHINE_KEYRING
void __init add_to_machine_keyring(const char *source, const void *data, size_t len);
-bool __init trust_moklist(void);
+bool __init imputed_trust_enabled(void);
#else
static inline void __init add_to_machine_keyring(const char *source,
const void *data, size_t len)
{
}
-static inline bool __init trust_moklist(void)
+
+static inline bool __init imputed_trust_enabled(void)
{
return false;
}
diff --git a/security/integrity/platform_certs/keyring_handler.c b/security/integrity/platform_certs/keyring_handler.c
index 8a1124e4d769..13ea17207902 100644
--- a/security/integrity/platform_certs/keyring_handler.c
+++ b/security/integrity/platform_certs/keyring_handler.c
@@ -61,7 +61,8 @@ __init efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type)
__init efi_element_handler_t get_handler_for_mok(const efi_guid_t *sig_type)
{
if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0) {
- if (IS_ENABLED(CONFIG_INTEGRITY_MACHINE_KEYRING) && trust_moklist())
+ if (IS_ENABLED(CONFIG_INTEGRITY_MACHINE_KEYRING) &&
+ imputed_trust_enabled())
return add_to_machine_keyring;
else
return add_to_platform_keyring;
@@ -69,6 +70,22 @@ __init efi_element_handler_t get_handler_for_mok(const efi_guid_t *sig_type)
return NULL;
}
+__init efi_element_handler_t get_handler_for_ca_keys(const efi_guid_t *sig_type)
+{
+ if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
+ return add_to_machine_keyring;
+
+ return NULL;
+}
+
+__init efi_element_handler_t get_handler_for_code_signing_keys(const efi_guid_t *sig_type)
+{
+ if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
+ return add_to_secondary_keyring;
+
+ return NULL;
+}
+
/*
* Return the appropriate handler for particular signature list types found in
* the UEFI dbx and MokListXRT tables.
diff --git a/security/integrity/platform_certs/keyring_handler.h b/security/integrity/platform_certs/keyring_handler.h
index 212d894a8c0c..f92895cc50f6 100644
--- a/security/integrity/platform_certs/keyring_handler.h
+++ b/security/integrity/platform_certs/keyring_handler.h
@@ -30,6 +30,16 @@ efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type);
efi_element_handler_t get_handler_for_mok(const efi_guid_t *sig_type);
/*
+ * Return the handler for particular signature list types for CA keys.
+ */
+efi_element_handler_t get_handler_for_ca_keys(const efi_guid_t *sig_type);
+
+/*
+ * Return the handler for particular signature list types for code signing keys.
+ */
+efi_element_handler_t get_handler_for_code_signing_keys(const efi_guid_t *sig_type);
+
+/*
* Return the handler for particular signature list types found in the dbx.
*/
efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type);
diff --git a/security/integrity/platform_certs/load_ipl_s390.c b/security/integrity/platform_certs/load_ipl_s390.c
index e769dcb7ea94..c7c381a9ddaa 100644
--- a/security/integrity/platform_certs/load_ipl_s390.c
+++ b/security/integrity/platform_certs/load_ipl_s390.c
@@ -22,8 +22,8 @@ static int __init load_ipl_certs(void)
if (!ipl_cert_list_addr)
return 0;
- /* Copy the certificates to the system keyring */
- ptr = (void *) ipl_cert_list_addr;
+ /* Copy the certificates to the platform keyring */
+ ptr = __va(ipl_cert_list_addr);
end = ptr + ipl_cert_list_size;
while ((void *) ptr < end) {
len = *(unsigned int *) ptr;
diff --git a/security/integrity/platform_certs/load_powerpc.c b/security/integrity/platform_certs/load_powerpc.c
index 170789dc63d2..c85febca3343 100644
--- a/security/integrity/platform_certs/load_powerpc.c
+++ b/security/integrity/platform_certs/load_powerpc.c
@@ -59,6 +59,8 @@ static __init void *get_cert_list(u8 *key, unsigned long keylen, u64 *size)
static int __init load_powerpc_certs(void)
{
void *db = NULL, *dbx = NULL, *data = NULL;
+ void *trustedca;
+ void *moduledb;
u64 dsize = 0;
u64 offset = 0;
int rc = 0;
@@ -120,6 +122,38 @@ static int __init load_powerpc_certs(void)
kfree(data);
}
+ data = get_cert_list("trustedcadb", 12, &dsize);
+ if (!data) {
+ pr_info("Couldn't get trustedcadb list from firmware\n");
+ } else if (IS_ERR(data)) {
+ rc = PTR_ERR(data);
+ pr_err("Error reading trustedcadb from firmware: %d\n", rc);
+ } else {
+ extract_esl(trustedca, data, dsize, offset);
+
+ rc = parse_efi_signature_list("powerpc:trustedca", trustedca, dsize,
+ get_handler_for_ca_keys);
+ if (rc)
+ pr_err("Couldn't parse trustedcadb signatures: %d\n", rc);
+ kfree(data);
+ }
+
+ data = get_cert_list("moduledb", 9, &dsize);
+ if (!data) {
+ pr_info("Couldn't get moduledb list from firmware\n");
+ } else if (IS_ERR(data)) {
+ rc = PTR_ERR(data);
+ pr_err("Error reading moduledb from firmware: %d\n", rc);
+ } else {
+ extract_esl(moduledb, data, dsize, offset);
+
+ rc = parse_efi_signature_list("powerpc:moduledb", moduledb, dsize,
+ get_handler_for_code_signing_keys);
+ if (rc)
+ pr_err("Couldn't parse moduledb signatures: %d\n", rc);
+ kfree(data);
+ }
+
return rc;
}
late_initcall(load_powerpc_certs);
diff --git a/security/integrity/platform_certs/machine_keyring.c b/security/integrity/platform_certs/machine_keyring.c
index 7aaed7950b6e..a401640a63cd 100644
--- a/security/integrity/platform_certs/machine_keyring.c
+++ b/security/integrity/platform_certs/machine_keyring.c
@@ -8,8 +8,6 @@
#include <linux/efi.h>
#include "../integrity.h"
-static bool trust_mok;
-
static __init int machine_keyring_init(void)
{
int rc;
@@ -36,7 +34,8 @@ void __init add_to_machine_keyring(const char *source, const void *data, size_t
* If the restriction check does not pass and the platform keyring
* is configured, try to add it into that keyring instead.
*/
- if (rc && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING))
+ if (rc && efi_enabled(EFI_BOOT) &&
+ IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING))
rc = integrity_load_cert(INTEGRITY_KEYRING_PLATFORM, source,
data, len, perm);
@@ -62,12 +61,14 @@ static __init bool uefi_check_trust_mok_keys(void)
return false;
}
-bool __init trust_moklist(void)
+static bool __init trust_moklist(void)
{
static bool initialized;
+ static bool trust_mok;
if (!initialized) {
initialized = true;
+ trust_mok = false;
if (uefi_check_trust_mok_keys())
trust_mok = true;
@@ -75,3 +76,16 @@ bool __init trust_moklist(void)
return trust_mok;
}
+
+/*
+ * Provides platform specific check for trusting imputed keys before loading
+ * on .machine keyring. UEFI systems enable this trust based on a variable,
+ * and for other platforms, it is always enabled.
+ */
+bool __init imputed_trust_enabled(void)
+{
+ if (efi_enabled(EFI_BOOT))
+ return trust_moklist();
+
+ return true;
+}
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 41e9735006d0..8f33cd170e42 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -178,7 +178,7 @@ struct key *request_key_auth_new(struct key *target, const char *op,
if (!rka->callout_info)
goto error_free_rka;
rka->callout_len = callout_len;
- strlcpy(rka->op, op, sizeof(rka->op));
+ strscpy(rka->op, op, sizeof(rka->op));
/* see if the calling process is already servicing the key request of
* another process */
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index ebae964f7cc9..a9d40456a064 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -336,6 +336,7 @@ static int read_trusted_verity_root_digests(unsigned int fd)
rc = -ENOMEM;
goto err;
}
+ trd->len = len;
if (hex2bin(trd->data, d, len)) {
kfree(trd);
@@ -343,8 +344,6 @@ static int read_trusted_verity_root_digests(unsigned int fd)
goto err;
}
- trd->len = len;
-
list_add_tail(&trd->node, &dm_verity_loadpin_trusted_root_digests);
}
diff --git a/security/security.c b/security/security.c
index 2dfc7b9f6ed9..3b454e9442b1 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1139,6 +1139,20 @@ void security_bprm_committed_creds(struct linux_binprm *bprm)
}
/**
+ * security_fs_context_submount() - Initialise fc->security
+ * @fc: new filesystem context
+ * @reference: dentry reference for submount/remount
+ *
+ * Fill out the ->security field for a new fs_context.
+ *
+ * Return: Returns 0 on success or negative error code on failure.
+ */
+int security_fs_context_submount(struct fs_context *fc, struct super_block *reference)
+{
+ return call_int_hook(fs_context_submount, 0, fc, reference);
+}
+
+/**
* security_fs_context_dup() - Duplicate a fs_context LSM blob
* @fc: destination filesystem context
* @src_fc: source filesystem context
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2bdc48dd8670..73435b6a1604 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2745,6 +2745,27 @@ static int selinux_umount(struct vfsmount *mnt, int flags)
FILESYSTEM__UNMOUNT, NULL);
}
+static int selinux_fs_context_submount(struct fs_context *fc,
+ struct super_block *reference)
+{
+ const struct superblock_security_struct *sbsec;
+ struct selinux_mnt_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+
+ sbsec = selinux_superblock(reference);
+ if (sbsec->flags & FSCONTEXT_MNT)
+ opts->fscontext_sid = sbsec->sid;
+ if (sbsec->flags & CONTEXT_MNT)
+ opts->context_sid = sbsec->mntpoint_sid;
+ if (sbsec->flags & DEFCONTEXT_MNT)
+ opts->defcontext_sid = sbsec->def_sid;
+ fc->security = opts;
+ return 0;
+}
+
static int selinux_fs_context_dup(struct fs_context *fc,
struct fs_context *src_fc)
{
@@ -7182,6 +7203,7 @@ static struct security_hook_list selinux_hooks[] __ro_after_init = {
/*
* PUT "CLONING" (ACCESSING + ALLOCATING) HOOKS HERE
*/
+ LSM_HOOK_INIT(fs_context_submount, selinux_fs_context_submount),
LSM_HOOK_INIT(fs_context_dup, selinux_fs_context_dup),
LSM_HOOK_INIT(fs_context_parse_param, selinux_fs_context_parse_param),
LSM_HOOK_INIT(sb_eat_lsm_opts, selinux_sb_eat_lsm_opts),
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index bad1f6b685fd..9dafb6ff110d 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1197,7 +1197,7 @@ static struct inode *sel_make_inode(struct super_block *sb, int mode)
if (ret) {
ret->i_mode = mode;
- ret->i_atime = ret->i_mtime = ret->i_ctime = current_time(ret);
+ ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
}
return ret;
}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 6e270cf3fd30..a8201cf22f20 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -615,6 +615,56 @@ out_opt_err:
}
/**
+ * smack_fs_context_submount - Initialise security data for a filesystem context
+ * @fc: The filesystem context.
+ * @reference: reference superblock
+ *
+ * Returns 0 on success or -ENOMEM on error.
+ */
+static int smack_fs_context_submount(struct fs_context *fc,
+ struct super_block *reference)
+{
+ struct superblock_smack *sbsp;
+ struct smack_mnt_opts *ctx;
+ struct inode_smack *isp;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ fc->security = ctx;
+
+ sbsp = smack_superblock(reference);
+ isp = smack_inode(reference->s_root->d_inode);
+
+ if (sbsp->smk_default) {
+ ctx->fsdefault = kstrdup(sbsp->smk_default->smk_known, GFP_KERNEL);
+ if (!ctx->fsdefault)
+ return -ENOMEM;
+ }
+
+ if (sbsp->smk_floor) {
+ ctx->fsfloor = kstrdup(sbsp->smk_floor->smk_known, GFP_KERNEL);
+ if (!ctx->fsfloor)
+ return -ENOMEM;
+ }
+
+ if (sbsp->smk_hat) {
+ ctx->fshat = kstrdup(sbsp->smk_hat->smk_known, GFP_KERNEL);
+ if (!ctx->fshat)
+ return -ENOMEM;
+ }
+
+ if (isp->smk_flags & SMK_INODE_TRANSMUTE) {
+ if (sbsp->smk_root) {
+ ctx->fstransmute = kstrdup(sbsp->smk_root->smk_known, GFP_KERNEL);
+ if (!ctx->fstransmute)
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/**
* smack_fs_context_dup - Duplicate the security data on fs_context duplication
* @fc: The new filesystem context.
* @src_fc: The source filesystem context being duplicated.
@@ -4876,6 +4926,7 @@ static struct security_hook_list smack_hooks[] __ro_after_init = {
LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme),
LSM_HOOK_INIT(syslog, smack_syslog),
+ LSM_HOOK_INIT(fs_context_submount, smack_fs_context_submount),
LSM_HOOK_INIT(fs_context_dup, smack_fs_context_dup),
LSM_HOOK_INIT(fs_context_parse_param, smack_fs_context_parse_param),
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index 9b1bcabd8414..97cee096a286 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -556,17 +556,14 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw);
static int reg_raw_update_once(struct hdac_device *codec, unsigned int reg,
unsigned int mask, unsigned int val)
{
- unsigned int orig;
- int err;
+ int err = 0;
if (!codec->regmap)
return reg_raw_update(codec, reg, mask, val);
mutex_lock(&codec->regmap_lock);
- regcache_cache_only(codec->regmap, true);
- err = regmap_read(codec->regmap, reg, &orig);
- regcache_cache_only(codec->regmap, false);
- if (err < 0)
+ /* Discard any updates to already initialised registers. */
+ if (!regcache_reg_cached(codec->regmap, reg))
err = regmap_update_bits(codec->regmap, reg, mask, val);
mutex_unlock(&codec->regmap_lock);
return err;
diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c
index b033bd290940..48444dda44de 100644
--- a/sound/pci/ymfpci/ymfpci.c
+++ b/sound/pci/ymfpci/ymfpci.c
@@ -152,8 +152,8 @@ static inline int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, i
void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { }
#endif /* SUPPORT_JOYSTICK */
-static int snd_card_ymfpci_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_card_ymfpci_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -348,6 +348,12 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
return 0;
}
+static int snd_card_ymfpci_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_card_ymfpci_probe(pci, pci_id));
+}
+
static struct pci_driver ymfpci_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_ymfpci_ids,
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index a2fe3bd4f9a1..b304b3562c82 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -217,7 +217,7 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "82"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82V2"),
}
},
{
@@ -251,6 +251,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m17 R5 AMD"),
}
diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
index 6ac501f008ec..8a879b6f4829 100644
--- a/sound/soc/codecs/cs35l41.c
+++ b/sound/soc/codecs/cs35l41.c
@@ -168,7 +168,7 @@ static int cs35l41_get_fs_mon_config_index(int freq)
static const DECLARE_TLV_DB_RANGE(dig_vol_tlv,
0, 0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
1, 913, TLV_DB_MINMAX_ITEM(-10200, 1200));
-static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1);
+static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 50, 100, 0);
static const struct snd_kcontrol_new dre_ctrl =
SOC_DAPM_SINGLE("Switch", CS35L41_PWR_CTRL3, 20, 1, 0);
diff --git a/sound/soc/codecs/cs35l56-i2c.c b/sound/soc/codecs/cs35l56-i2c.c
index ed2a41943d97..40666e6698ba 100644
--- a/sound/soc/codecs/cs35l56-i2c.c
+++ b/sound/soc/codecs/cs35l56-i2c.c
@@ -62,10 +62,19 @@ static const struct i2c_device_id cs35l56_id_i2c[] = {
};
MODULE_DEVICE_TABLE(i2c, cs35l56_id_i2c);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cs35l56_asoc_acpi_match[] = {
+ { "CSC355C", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, cs35l56_asoc_acpi_match);
+#endif
+
static struct i2c_driver cs35l56_i2c_driver = {
.driver = {
.name = "cs35l56",
.pm = &cs35l56_pm_ops_i2c_spi,
+ .acpi_match_table = ACPI_PTR(cs35l56_asoc_acpi_match),
},
.id_table = cs35l56_id_i2c,
.probe = cs35l56_i2c_probe,
diff --git a/sound/soc/codecs/cs35l56-spi.c b/sound/soc/codecs/cs35l56-spi.c
index 996aab10500e..302f9c47407a 100644
--- a/sound/soc/codecs/cs35l56-spi.c
+++ b/sound/soc/codecs/cs35l56-spi.c
@@ -59,10 +59,19 @@ static const struct spi_device_id cs35l56_id_spi[] = {
};
MODULE_DEVICE_TABLE(spi, cs35l56_id_spi);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cs35l56_asoc_acpi_match[] = {
+ { "CSC355C", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, cs35l56_asoc_acpi_match);
+#endif
+
static struct spi_driver cs35l56_spi_driver = {
.driver = {
.name = "cs35l56",
.pm = &cs35l56_pm_ops_i2c_spi,
+ .acpi_match_table = ACPI_PTR(cs35l56_asoc_acpi_match),
},
.id_table = cs35l56_id_spi,
.probe = cs35l56_spi_probe,
diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
index c03f9d3c9a13..fd06b9f9d496 100644
--- a/sound/soc/codecs/cs35l56.c
+++ b/sound/soc/codecs/cs35l56.c
@@ -5,7 +5,6 @@
// Copyright (C) 2023 Cirrus Logic, Inc. and
// Cirrus Logic International Semiconductor Ltd.
-#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
@@ -1354,26 +1353,22 @@ static int cs35l56_dsp_init(struct cs35l56_private *cs35l56)
return 0;
}
-static int cs35l56_acpi_get_name(struct cs35l56_private *cs35l56)
+static int cs35l56_get_firmware_uid(struct cs35l56_private *cs35l56)
{
- acpi_handle handle = ACPI_HANDLE(cs35l56->dev);
- const char *sub;
+ struct device *dev = cs35l56->dev;
+ const char *prop;
+ int ret;
- /* If there is no ACPI_HANDLE, there is no ACPI for this system, return 0 */
- if (!handle)
+ ret = device_property_read_string(dev, "cirrus,firmware-uid", &prop);
+ /* If bad sw node property, return 0 and fallback to legacy firmware path */
+ if (ret < 0)
return 0;
- sub = acpi_get_subsystem_id(handle);
- if (IS_ERR(sub)) {
- /* If bad ACPI, return 0 and fallback to legacy firmware path, otherwise fail */
- if (PTR_ERR(sub) == -ENODATA)
- return 0;
- else
- return PTR_ERR(sub);
- }
+ cs35l56->dsp.system_name = devm_kstrdup(dev, prop, GFP_KERNEL);
+ if (cs35l56->dsp.system_name == NULL)
+ return -ENOMEM;
- cs35l56->dsp.system_name = sub;
- dev_dbg(cs35l56->dev, "Subsystem ID: %s\n", cs35l56->dsp.system_name);
+ dev_dbg(dev, "Firmware UID: %s\n", cs35l56->dsp.system_name);
return 0;
}
@@ -1417,7 +1412,7 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56)
gpiod_set_value_cansleep(cs35l56->reset_gpio, 1);
}
- ret = cs35l56_acpi_get_name(cs35l56);
+ ret = cs35l56_get_firmware_uid(cs35l56);
if (ret != 0)
goto err;
@@ -1604,8 +1599,6 @@ void cs35l56_remove(struct cs35l56_private *cs35l56)
regcache_cache_only(cs35l56->regmap, true);
- kfree(cs35l56->dsp.system_name);
-
gpiod_set_value_cansleep(cs35l56->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(cs35l56->supplies), cs35l56->supplies);
}
diff --git a/sound/soc/codecs/tas2781-comlib.c b/sound/soc/codecs/tas2781-comlib.c
index a88c6c28a394..ffb26e4a7e2f 100644
--- a/sound/soc/codecs/tas2781-comlib.c
+++ b/sound/soc/codecs/tas2781-comlib.c
@@ -57,16 +57,17 @@ static int tasdevice_change_chn_book(struct tasdevice_priv *tas_priv,
if (client->addr != tasdev->dev_addr) {
client->addr = tasdev->dev_addr;
- if (tasdev->cur_book == book) {
- ret = regmap_write(map,
- TASDEVICE_PAGE_SELECT, 0);
- if (ret < 0) {
- dev_err(tas_priv->dev, "%s, E=%d\n",
- __func__, ret);
- goto out;
- }
+ /* All tas2781s share the same regmap, clear the page
+ * inside regmap once switching to another tas2781.
+ * Register 0 at any pages and any books inside tas2781
+ * is the same one for page-switching.
+ */
+ ret = regmap_write(map, TASDEVICE_PAGE_SELECT, 0);
+ if (ret < 0) {
+ dev_err(tas_priv->dev, "%s, E=%d\n",
+ __func__, ret);
+ goto out;
}
- goto out;
}
if (tasdev->cur_book != book) {
diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
index 0c905bd0fab4..027416eb2f50 100644
--- a/sound/soc/sof/ipc4-pcm.c
+++ b/sound/soc/sof/ipc4-pcm.c
@@ -708,6 +708,9 @@ static int sof_ipc4_pcm_hw_params(struct snd_soc_component *component,
struct snd_sof_pcm *spcm;
spcm = snd_sof_find_spcm_dai(component, rtd);
+ if (!spcm)
+ return -EINVAL;
+
time_info = spcm->stream[substream->stream].private;
/* delay calculation is not supported by current fw_reg ABI */
if (!time_info)
diff --git a/tools/arch/x86/include/uapi/asm/unistd_32.h b/tools/arch/x86/include/uapi/asm/unistd_32.h
index bc48a4dabe5d..4798f9d18fe8 100644
--- a/tools/arch/x86/include/uapi/asm/unistd_32.h
+++ b/tools/arch/x86/include/uapi/asm/unistd_32.h
@@ -26,3 +26,6 @@
#ifndef __NR_setns
#define __NR_setns 346
#endif
+#ifdef __NR_seccomp
+#define __NR_seccomp 354
+#endif
diff --git a/tools/arch/x86/include/uapi/asm/unistd_64.h b/tools/arch/x86/include/uapi/asm/unistd_64.h
index f70d2cada256..d0f2043d7132 100644
--- a/tools/arch/x86/include/uapi/asm/unistd_64.h
+++ b/tools/arch/x86/include/uapi/asm/unistd_64.h
@@ -26,3 +26,6 @@
#ifndef __NR_getcpu
#define __NR_getcpu 309
#endif
+#ifndef __NR_seccomp
+#define __NR_seccomp 317
+#endif
diff --git a/tools/crypto/ccp/.gitignore b/tools/crypto/ccp/.gitignore
new file mode 100644
index 000000000000..bee8a64b79a9
--- /dev/null
+++ b/tools/crypto/ccp/.gitignore
@@ -0,0 +1 @@
+__pycache__
diff --git a/tools/crypto/ccp/Makefile b/tools/crypto/ccp/Makefile
new file mode 100644
index 000000000000..ae4a66d1558a
--- /dev/null
+++ b/tools/crypto/ccp/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+CFLAGS += -D__EXPORTED_HEADERS__ -I../../../include/uapi -I../../../include
+
+TARGET = dbc_library.so
+
+all: $(TARGET)
+
+dbc_library.so: dbc.c
+ $(CC) $(CFLAGS) $(LDFLAGS) -shared -o $@ $<
+ chmod -x $@
+
+clean:
+ $(RM) $(TARGET)
diff --git a/tools/crypto/ccp/dbc.c b/tools/crypto/ccp/dbc.c
new file mode 100644
index 000000000000..37e813175642
--- /dev/null
+++ b/tools/crypto/ccp/dbc.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Processor Dynamic Boost Control sample library
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include <assert.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+/* if uapi header isn't installed, this might not yet exist */
+#ifndef __packed
+#define __packed __attribute__((packed))
+#endif
+#include <linux/psp-dbc.h>
+
+int get_nonce(int fd, void *nonce_out, void *signature)
+{
+ struct dbc_user_nonce tmp = {
+ .auth_needed = !!signature,
+ };
+ int ret;
+
+ assert(nonce_out);
+
+ if (signature)
+ memcpy(tmp.signature, signature, sizeof(tmp.signature));
+
+ ret = ioctl(fd, DBCIOCNONCE, &tmp);
+ if (ret)
+ return ret;
+ memcpy(nonce_out, tmp.nonce, sizeof(tmp.nonce));
+
+ return 0;
+}
+
+int set_uid(int fd, __u8 *uid, __u8 *signature)
+{
+ struct dbc_user_setuid tmp;
+
+ assert(uid);
+ assert(signature);
+
+ memcpy(tmp.uid, uid, sizeof(tmp.uid));
+ memcpy(tmp.signature, signature, sizeof(tmp.signature));
+
+ return ioctl(fd, DBCIOCUID, &tmp);
+}
+
+int process_param(int fd, int msg_index, __u8 *signature, int *data)
+{
+ struct dbc_user_param tmp = {
+ .msg_index = msg_index,
+ .param = *data,
+ };
+ int ret;
+
+ assert(signature);
+ assert(data);
+
+ memcpy(tmp.signature, signature, sizeof(tmp.signature));
+
+ ret = ioctl(fd, DBCIOCPARAM, &tmp);
+ if (ret)
+ return ret;
+
+ *data = tmp.param;
+ return 0;
+}
diff --git a/tools/crypto/ccp/dbc.py b/tools/crypto/ccp/dbc.py
new file mode 100644
index 000000000000..3f6a825ffc9e
--- /dev/null
+++ b/tools/crypto/ccp/dbc.py
@@ -0,0 +1,64 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+
+import ctypes
+import os
+
+DBC_UID_SIZE = 16
+DBC_NONCE_SIZE = 16
+DBC_SIG_SIZE = 32
+
+PARAM_GET_FMAX_CAP = (0x3,)
+PARAM_SET_FMAX_CAP = (0x4,)
+PARAM_GET_PWR_CAP = (0x5,)
+PARAM_SET_PWR_CAP = (0x6,)
+PARAM_GET_GFX_MODE = (0x7,)
+PARAM_SET_GFX_MODE = (0x8,)
+PARAM_GET_CURR_TEMP = (0x9,)
+PARAM_GET_FMAX_MAX = (0xA,)
+PARAM_GET_FMAX_MIN = (0xB,)
+PARAM_GET_SOC_PWR_MAX = (0xC,)
+PARAM_GET_SOC_PWR_MIN = (0xD,)
+PARAM_GET_SOC_PWR_CUR = (0xE,)
+
+DEVICE_NODE = "/dev/dbc"
+
+lib = ctypes.CDLL("./dbc_library.so", mode=ctypes.RTLD_GLOBAL)
+
+
+def handle_error(code):
+ val = code * -1
+ raise OSError(val, os.strerror(val))
+
+
+def get_nonce(device, signature):
+ if not device:
+ raise ValueError("Device required")
+ buf = ctypes.create_string_buffer(DBC_NONCE_SIZE)
+ ret = lib.get_nonce(device.fileno(), ctypes.byref(buf), signature)
+ if ret:
+ handle_error(ret)
+ return buf.value
+
+
+def set_uid(device, new_uid, signature):
+ if not signature:
+ raise ValueError("Signature required")
+ if not new_uid:
+ raise ValueError("UID required")
+ ret = lib.set_uid(device.fileno(), new_uid, signature)
+ if ret:
+ handle_error(ret)
+ return True
+
+
+def process_param(device, message, signature, data=None):
+ if not signature:
+ raise ValueError("Signature required")
+ if type(message) != tuple:
+ raise ValueError("Expected message tuple")
+ arg = ctypes.c_int(data if data else 0)
+ ret = lib.process_param(device.fileno(), message[0], signature, ctypes.pointer(arg))
+ if ret:
+ handle_error(ret)
+ return arg, signature
diff --git a/tools/crypto/ccp/dbc_cli.py b/tools/crypto/ccp/dbc_cli.py
new file mode 100755
index 000000000000..bf52233fd038
--- /dev/null
+++ b/tools/crypto/ccp/dbc_cli.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+import argparse
+import binascii
+import os
+import errno
+from dbc import *
+
+ERRORS = {
+ errno.EACCES: "Access is denied",
+ errno.E2BIG: "Excess data provided",
+ errno.EINVAL: "Bad parameters",
+ errno.EAGAIN: "Bad state",
+ errno.ENOENT: "Not implemented or message failure",
+ errno.EBUSY: "Busy",
+ errno.ENFILE: "Overflow",
+ errno.EPERM: "Signature invalid",
+}
+
+messages = {
+ "get-fmax-cap": PARAM_GET_FMAX_CAP,
+ "set-fmax-cap": PARAM_SET_FMAX_CAP,
+ "get-power-cap": PARAM_GET_PWR_CAP,
+ "set-power-cap": PARAM_SET_PWR_CAP,
+ "get-graphics-mode": PARAM_GET_GFX_MODE,
+ "set-graphics-mode": PARAM_SET_GFX_MODE,
+ "get-current-temp": PARAM_GET_CURR_TEMP,
+ "get-fmax-max": PARAM_GET_FMAX_MAX,
+ "get-fmax-min": PARAM_GET_FMAX_MIN,
+ "get-soc-power-max": PARAM_GET_SOC_PWR_MAX,
+ "get-soc-power-min": PARAM_GET_SOC_PWR_MIN,
+ "get-soc-power-cur": PARAM_GET_SOC_PWR_CUR,
+}
+
+
+def _pretty_buffer(ba):
+ return str(binascii.hexlify(ba, " "))
+
+
+def parse_args():
+ parser = argparse.ArgumentParser(
+ description="Dynamic Boost control command line interface"
+ )
+ parser.add_argument(
+ "command",
+ choices=["get-nonce", "get-param", "set-param", "set-uid"],
+ help="Command to send",
+ )
+ parser.add_argument("--device", default="/dev/dbc", help="Device to operate")
+ parser.add_argument("--signature", help="File containing signature for command")
+ parser.add_argument("--message", choices=messages.keys(), help="Message index")
+ parser.add_argument("--data", help="Argument to pass to message")
+ parser.add_argument("--uid", help="File containing UID to pass")
+ return parser.parse_args()
+
+
+def pretty_error(code):
+ if code in ERRORS:
+ print(ERRORS[code])
+ else:
+ print("failed with return code %d" % code)
+
+
+if __name__ == "__main__":
+ args = parse_args()
+ data = 0
+ sig = None
+ uid = None
+ if not os.path.exists(args.device):
+ raise IOError("Missing device {device}".format(device=args.device))
+ if args.signature:
+ if not os.path.exists(args.signature):
+ raise ValueError("Invalid signature file %s" % args.signature)
+ with open(args.signature, "rb") as f:
+ sig = f.read()
+ if len(sig) != DBC_SIG_SIZE:
+ raise ValueError(
+ "Invalid signature length %d (expected %d)" % (len(sig), DBC_SIG_SIZE)
+ )
+ if args.uid:
+ if not os.path.exists(args.uid):
+ raise ValueError("Invalid uid file %s" % args.uid)
+ with open(args.uid, "rb") as f:
+ uid = f.read()
+ if len(uid) != DBC_UID_SIZE:
+ raise ValueError(
+ "Invalid UID length %d (expected %d)" % (len(uid), DBC_UID_SIZE)
+ )
+ if args.data:
+ try:
+ data = int(args.data, 10)
+ except ValueError:
+ data = int(args.data, 16)
+
+ with open(args.device) as d:
+ if args.command == "get-nonce":
+ try:
+ nonce = get_nonce(d, sig)
+ print("Nonce: %s" % _pretty_buffer(bytes(nonce)))
+ except OSError as e:
+ pretty_error(e.errno)
+ elif args.command == "set-uid":
+ try:
+ result = set_uid(d, uid, sig)
+ if result:
+ print("Set UID")
+ except OSError as e:
+ pretty_error(e.errno)
+ elif args.command == "get-param":
+ if not args.message or args.message.startswith("set"):
+ raise ValueError("Invalid message %s" % args.message)
+ try:
+ param, signature = process_param(d, messages[args.message], sig)
+ print(
+ "Parameter: {par}, response signature {sig}".format(
+ par=param,
+ sig=_pretty_buffer(bytes(signature)),
+ )
+ )
+ except OSError as e:
+ pretty_error(e.errno)
+ elif args.command == "set-param":
+ if not args.message or args.message.startswith("get"):
+ raise ValueError("Invalid message %s" % args.message)
+ try:
+ param, signature = process_param(d, messages[args.message], sig, data)
+ print(
+ "Parameter: {par}, response signature {sig}".format(
+ par=param,
+ sig=_pretty_buffer(bytes(signature)),
+ )
+ )
+ except OSError as e:
+ pretty_error(e.errno)
diff --git a/tools/crypto/ccp/test_dbc.py b/tools/crypto/ccp/test_dbc.py
new file mode 100755
index 000000000000..998bb3e3cd04
--- /dev/null
+++ b/tools/crypto/ccp/test_dbc.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+import unittest
+import os
+import time
+import glob
+from dbc import *
+
+# Artificial delay between set commands
+SET_DELAY = 0.5
+
+
+class invalid_param(ctypes.Structure):
+ _fields_ = [
+ ("data", ctypes.c_uint8),
+ ]
+
+
+def system_is_secured() -> bool:
+ fused_part = glob.glob("/sys/bus/pci/drivers/ccp/**/fused_part")[0]
+ if os.path.exists(fused_part):
+ with open(fused_part, "r") as r:
+ return int(r.read()) == 1
+ return True
+
+
+class DynamicBoostControlTest(unittest.TestCase):
+ def __init__(self, data) -> None:
+ self.d = None
+ self.signature = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
+ self.uid = "1111111111111111"
+ super().__init__(data)
+
+ def setUp(self) -> None:
+ self.d = open(DEVICE_NODE)
+ return super().setUp()
+
+ def tearDown(self) -> None:
+ if self.d:
+ self.d.close()
+ return super().tearDown()
+
+
+class TestUnsupportedSystem(DynamicBoostControlTest):
+ def setUp(self) -> None:
+ if os.path.exists(DEVICE_NODE):
+ self.skipTest("system is supported")
+ with self.assertRaises(FileNotFoundError) as error:
+ super().setUp()
+ self.assertEqual(error.exception.errno, 2)
+
+ def test_unauthenticated_nonce(self) -> None:
+ """fetch unauthenticated nonce"""
+ with self.assertRaises(ValueError) as error:
+ get_nonce(self.d, None)
+
+
+class TestInvalidIoctls(DynamicBoostControlTest):
+ def __init__(self, data) -> None:
+ self.data = invalid_param()
+ self.data.data = 1
+ super().__init__(data)
+
+ def setUp(self) -> None:
+ if not os.path.exists(DEVICE_NODE):
+ self.skipTest("system is unsupported")
+ return super().setUp()
+
+ def test_invalid_nonce_ioctl(self) -> None:
+ """tries to call get_nonce ioctl with invalid data structures"""
+
+ # 0x1 (get nonce), and invalid data
+ INVALID1 = IOWR(ord("D"), 0x01, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID1, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+
+ def test_invalid_setuid_ioctl(self) -> None:
+ """tries to call set_uid ioctl with invalid data structures"""
+
+ # 0x2 (set uid), and invalid data
+ INVALID2 = IOW(ord("D"), 0x02, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID2, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+
+ def test_invalid_setuid_rw_ioctl(self) -> None:
+ """tries to call set_uid ioctl with invalid data structures"""
+
+ # 0x2 as RW (set uid), and invalid data
+ INVALID3 = IOWR(ord("D"), 0x02, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID3, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+
+ def test_invalid_param_ioctl(self) -> None:
+ """tries to call param ioctl with invalid data structures"""
+ # 0x3 (param), and invalid data
+ INVALID4 = IOWR(ord("D"), 0x03, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID4, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+
+ def test_invalid_call_ioctl(self) -> None:
+ """tries to call the DBC ioctl with invalid data structures"""
+ # 0x4, and invalid data
+ INVALID5 = IOWR(ord("D"), 0x04, invalid_param)
+ with self.assertRaises(OSError) as error:
+ fcntl.ioctl(self.d, INVALID5, self.data, True)
+ self.assertEqual(error.exception.errno, 22)
+
+
+class TestInvalidSignature(DynamicBoostControlTest):
+ def setUp(self) -> None:
+ if not os.path.exists(DEVICE_NODE):
+ self.skipTest("system is unsupported")
+ if not system_is_secured():
+ self.skipTest("system is unfused")
+ return super().setUp()
+
+ def test_unauthenticated_nonce(self) -> None:
+ """fetch unauthenticated nonce"""
+ get_nonce(self.d, None)
+
+ def test_multiple_unauthenticated_nonce(self) -> None:
+ """ensure state machine always returns nonce"""
+ for count in range(0, 2):
+ get_nonce(self.d, None)
+
+ def test_authenticated_nonce(self) -> None:
+ """fetch authenticated nonce"""
+ with self.assertRaises(OSError) as error:
+ get_nonce(self.d, self.signature)
+ self.assertEqual(error.exception.errno, 1)
+
+ def test_set_uid(self) -> None:
+ """set uid"""
+ with self.assertRaises(OSError) as error:
+ set_uid(self.d, self.uid, self.signature)
+ self.assertEqual(error.exception.errno, 1)
+
+ def test_get_param(self) -> None:
+ """fetch a parameter"""
+ with self.assertRaises(OSError) as error:
+ process_param(self.d, PARAM_GET_SOC_PWR_CUR, self.signature)
+ self.assertEqual(error.exception.errno, 1)
+
+ def test_set_param(self) -> None:
+ """set a parameter"""
+ with self.assertRaises(OSError) as error:
+ process_param(self.d, PARAM_SET_PWR_CAP, self.signature, 1000)
+ self.assertEqual(error.exception.errno, 1)
+
+
+class TestUnFusedSystem(DynamicBoostControlTest):
+ def setup_identity(self) -> None:
+ """sets up the identity of the caller"""
+ # if already authenticated these may fail
+ try:
+ get_nonce(self.d, None)
+ except PermissionError:
+ pass
+ try:
+ set_uid(self.d, self.uid, self.signature)
+ except BlockingIOError:
+ pass
+ try:
+ get_nonce(self.d, self.signature)
+ except PermissionError:
+ pass
+
+ def setUp(self) -> None:
+ if not os.path.exists(DEVICE_NODE):
+ self.skipTest("system is unsupported")
+ if system_is_secured():
+ self.skipTest("system is fused")
+ super().setUp()
+ self.setup_identity()
+ time.sleep(SET_DELAY)
+
+ def test_get_valid_param(self) -> None:
+ """fetch all possible parameters"""
+ # SOC power
+ soc_power_max = process_param(self.d, PARAM_GET_SOC_PWR_MAX, self.signature)
+ soc_power_min = process_param(self.d, PARAM_GET_SOC_PWR_MIN, self.signature)
+ self.assertGreater(soc_power_max.parameter, soc_power_min.parameter)
+
+ # fmax
+ fmax_max = process_param(self.d, PARAM_GET_FMAX_MAX, self.signature)
+ fmax_min = process_param(self.d, PARAM_GET_FMAX_MIN, self.signature)
+ self.assertGreater(fmax_max.parameter, fmax_min.parameter)
+
+ # cap values
+ keys = {
+ "fmax-cap": PARAM_GET_FMAX_CAP,
+ "power-cap": PARAM_GET_PWR_CAP,
+ "current-temp": PARAM_GET_CURR_TEMP,
+ "soc-power-cur": PARAM_GET_SOC_PWR_CUR,
+ }
+ for k in keys:
+ result = process_param(self.d, keys[k], self.signature)
+ self.assertGreater(result.parameter, 0)
+
+ def test_get_invalid_param(self) -> None:
+ """fetch an invalid parameter"""
+ try:
+ set_uid(self.d, self.uid, self.signature)
+ except OSError:
+ pass
+ with self.assertRaises(OSError) as error:
+ process_param(self.d, (0xF,), self.signature)
+ self.assertEqual(error.exception.errno, 22)
+
+ def test_set_fmax(self) -> None:
+ """get/set fmax limit"""
+ # fetch current
+ original = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
+
+ # set the fmax
+ target = original.parameter - 100
+ process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, target)
+ time.sleep(SET_DELAY)
+ new = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
+ self.assertEqual(new.parameter, target)
+
+ # revert back to current
+ process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, original.parameter)
+ time.sleep(SET_DELAY)
+ cur = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
+ self.assertEqual(cur.parameter, original.parameter)
+
+ def test_set_power_cap(self) -> None:
+ """get/set power cap limit"""
+ # fetch current
+ original = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
+
+ # set the fmax
+ target = original.parameter - 10
+ process_param(self.d, PARAM_SET_PWR_CAP, self.signature, target)
+ time.sleep(SET_DELAY)
+ new = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
+ self.assertEqual(new.parameter, target)
+
+ # revert back to current
+ process_param(self.d, PARAM_SET_PWR_CAP, self.signature, original.parameter)
+ time.sleep(SET_DELAY)
+ cur = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
+ self.assertEqual(cur.parameter, original.parameter)
+
+ def test_set_3d_graphics_mode(self) -> None:
+ """set/get 3d graphics mode"""
+ # these aren't currently implemented but may be some day
+ # they are *expected* to fail
+ with self.assertRaises(OSError) as error:
+ process_param(self.d, PARAM_GET_GFX_MODE, self.signature)
+ self.assertEqual(error.exception.errno, 2)
+
+ time.sleep(SET_DELAY)
+
+ with self.assertRaises(OSError) as error:
+ process_param(self.d, PARAM_SET_GFX_MODE, self.signature, 1)
+ self.assertEqual(error.exception.errno, 2)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 9d36c8ce1fe7..1684216e826a 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -42,6 +42,18 @@
# define __always_inline inline __attribute__((always_inline))
#endif
+#ifndef __always_unused
+#define __always_unused __attribute__((__unused__))
+#endif
+
+#ifndef __noreturn
+#define __noreturn __attribute__((__noreturn__))
+#endif
+
+#ifndef unreachable
+#define unreachable() __builtin_unreachable()
+#endif
+
#ifndef noinline
#define noinline
#endif
@@ -190,4 +202,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
#define ___PASTE(a, b) a##b
#define __PASTE(a, b) ___PASTE(a, b)
+#ifndef OPTIMIZER_HIDE_VAR
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
+#define OPTIMIZER_HIDE_VAR(var) \
+ __asm__ ("" : "=r" (var) : "0" (var))
+#endif
+
#endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/nolibc/Makefile b/tools/include/nolibc/Makefile
index 64d67b080744..909b6eb500fe 100644
--- a/tools/include/nolibc/Makefile
+++ b/tools/include/nolibc/Makefile
@@ -27,6 +27,7 @@ nolibc_arch := $(patsubst arm64,aarch64,$(ARCH))
arch_file := arch-$(nolibc_arch).h
all_files := \
compiler.h \
+ crt.h \
ctype.h \
errno.h \
nolibc.h \
diff --git a/tools/include/nolibc/arch-aarch64.h b/tools/include/nolibc/arch-aarch64.h
index 11f294a406b7..6c33c46848e3 100644
--- a/tools/include/nolibc/arch-aarch64.h
+++ b/tools/include/nolibc/arch-aarch64.h
@@ -8,34 +8,7 @@
#define _NOLIBC_ARCH_AARCH64_H
#include "compiler.h"
-
-/* The struct returned by the newfstatat() syscall. Differs slightly from the
- * x86_64's stat one by field ordering, so be careful.
- */
-struct sys_stat_struct {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
-
- unsigned long st_rdev;
- unsigned long __pad1;
- long st_size;
- int st_blksize;
- int __pad2;
-
- long st_blocks;
- long st_atime;
- unsigned long st_atime_nsec;
- long st_mtime;
-
- unsigned long st_mtime_nsec;
- long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned int __unused[2];
-};
+#include "crt.h"
/* Syscalls for AARCH64 :
* - registers are 64-bit
@@ -56,8 +29,8 @@ struct sys_stat_struct {
({ \
register long _num __asm__ ("x8") = (num); \
register long _arg1 __asm__ ("x0"); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"svc #0\n" \
: "=r"(_arg1) \
: "r"(_num) \
@@ -70,8 +43,8 @@ struct sys_stat_struct {
({ \
register long _num __asm__ ("x8") = (num); \
register long _arg1 __asm__ ("x0") = (long)(arg1); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"svc #0\n" \
: "=r"(_arg1) \
: "r"(_arg1), \
@@ -86,8 +59,8 @@ struct sys_stat_struct {
register long _num __asm__ ("x8") = (num); \
register long _arg1 __asm__ ("x0") = (long)(arg1); \
register long _arg2 __asm__ ("x1") = (long)(arg2); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"svc #0\n" \
: "=r"(_arg1) \
: "r"(_arg1), "r"(_arg2), \
@@ -103,8 +76,8 @@ struct sys_stat_struct {
register long _arg1 __asm__ ("x0") = (long)(arg1); \
register long _arg2 __asm__ ("x1") = (long)(arg2); \
register long _arg3 __asm__ ("x2") = (long)(arg3); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"svc #0\n" \
: "=r"(_arg1) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), \
@@ -121,8 +94,8 @@ struct sys_stat_struct {
register long _arg2 __asm__ ("x1") = (long)(arg2); \
register long _arg3 __asm__ ("x2") = (long)(arg3); \
register long _arg4 __asm__ ("x3") = (long)(arg4); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"svc #0\n" \
: "=r"(_arg1) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
@@ -140,8 +113,8 @@ struct sys_stat_struct {
register long _arg3 __asm__ ("x2") = (long)(arg3); \
register long _arg4 __asm__ ("x3") = (long)(arg4); \
register long _arg5 __asm__ ("x4") = (long)(arg5); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"svc #0\n" \
: "=r" (_arg1) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
@@ -160,8 +133,8 @@ struct sys_stat_struct {
register long _arg4 __asm__ ("x3") = (long)(arg4); \
register long _arg5 __asm__ ("x4") = (long)(arg5); \
register long _arg6 __asm__ ("x5") = (long)(arg6); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"svc #0\n" \
: "=r" (_arg1) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
@@ -171,33 +144,13 @@ struct sys_stat_struct {
_arg1; \
})
-char **environ __attribute__((weak));
-const unsigned long *_auxv __attribute__((weak));
-
/* startup code */
-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
+void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
{
__asm__ volatile (
-#ifdef _NOLIBC_STACKPROTECTOR
- "bl __stack_chk_init\n" /* initialize stack protector */
-#endif
- "ldr x0, [sp]\n" /* argc (x0) was in the stack */
- "add x1, sp, 8\n" /* argv (x1) = sp */
- "lsl x2, x0, 3\n" /* envp (x2) = 8*argc ... */
- "add x2, x2, 8\n" /* + 8 (skip null) */
- "add x2, x2, x1\n" /* + argv */
- "adrp x3, environ\n" /* x3 = &environ (high bits) */
- "str x2, [x3, #:lo12:environ]\n" /* store envp into environ */
- "mov x4, x2\n" /* search for auxv (follows NULL after last env) */
- "0:\n"
- "ldr x5, [x4], 8\n" /* x5 = *x4; x4 += 8 */
- "cbnz x5, 0b\n" /* and stop at NULL after last env */
- "adrp x3, _auxv\n" /* x3 = &_auxv (high bits) */
- "str x4, [x3, #:lo12:_auxv]\n" /* store x4 into _auxv */
- "and sp, x1, -16\n" /* sp must be 16-byte aligned in the callee */
- "bl main\n" /* main() returns the status code, we'll exit with it. */
- "mov x8, 93\n" /* NR_exit == 93 */
- "svc #0\n"
+ "mov x0, sp\n" /* save stack pointer to x0, as arg1 of _start_c */
+ "and sp, x0, -16\n" /* sp must be 16-byte aligned in the callee */
+ "bl _start_c\n" /* transfer to c runtime */
);
__builtin_unreachable();
}
diff --git a/tools/include/nolibc/arch-arm.h b/tools/include/nolibc/arch-arm.h
index ca4c66987497..cae4afa7c1c7 100644
--- a/tools/include/nolibc/arch-arm.h
+++ b/tools/include/nolibc/arch-arm.h
@@ -8,43 +8,7 @@
#define _NOLIBC_ARCH_ARM_H
#include "compiler.h"
-
-/* The struct returned by the stat() syscall, 32-bit only, the syscall returns
- * exactly 56 bytes (stops before the unused array). In big endian, the format
- * differs as devices are returned as short only.
- */
-struct sys_stat_struct {
-#if defined(__ARMEB__)
- unsigned short st_dev;
- unsigned short __pad1;
-#else
- unsigned long st_dev;
-#endif
- unsigned long st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
-
-#if defined(__ARMEB__)
- unsigned short st_rdev;
- unsigned short __pad2;
-#else
- unsigned long st_rdev;
-#endif
- unsigned long st_size;
- unsigned long st_blksize;
- unsigned long st_blocks;
-
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
-
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned long __unused[2];
-};
+#include "crt.h"
/* Syscalls for ARM in ARM or Thumb modes :
* - registers are 32-bit
@@ -90,8 +54,8 @@ struct sys_stat_struct {
({ \
register long _num __asm__(_NOLIBC_SYSCALL_REG) = (num); \
register long _arg1 __asm__ ("r0"); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
_NOLIBC_THUMB_SET_R7 \
"svc #0\n" \
_NOLIBC_THUMB_RESTORE_R7 \
@@ -107,8 +71,8 @@ struct sys_stat_struct {
({ \
register long _num __asm__(_NOLIBC_SYSCALL_REG) = (num); \
register long _arg1 __asm__ ("r0") = (long)(arg1); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
_NOLIBC_THUMB_SET_R7 \
"svc #0\n" \
_NOLIBC_THUMB_RESTORE_R7 \
@@ -125,8 +89,8 @@ struct sys_stat_struct {
register long _num __asm__(_NOLIBC_SYSCALL_REG) = (num); \
register long _arg1 __asm__ ("r0") = (long)(arg1); \
register long _arg2 __asm__ ("r1") = (long)(arg2); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
_NOLIBC_THUMB_SET_R7 \
"svc #0\n" \
_NOLIBC_THUMB_RESTORE_R7 \
@@ -144,8 +108,8 @@ struct sys_stat_struct {
register long _arg1 __asm__ ("r0") = (long)(arg1); \
register long _arg2 __asm__ ("r1") = (long)(arg2); \
register long _arg3 __asm__ ("r2") = (long)(arg3); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
_NOLIBC_THUMB_SET_R7 \
"svc #0\n" \
_NOLIBC_THUMB_RESTORE_R7 \
@@ -164,8 +128,8 @@ struct sys_stat_struct {
register long _arg2 __asm__ ("r1") = (long)(arg2); \
register long _arg3 __asm__ ("r2") = (long)(arg3); \
register long _arg4 __asm__ ("r3") = (long)(arg4); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
_NOLIBC_THUMB_SET_R7 \
"svc #0\n" \
_NOLIBC_THUMB_RESTORE_R7 \
@@ -185,8 +149,8 @@ struct sys_stat_struct {
register long _arg3 __asm__ ("r2") = (long)(arg3); \
register long _arg4 __asm__ ("r3") = (long)(arg4); \
register long _arg5 __asm__ ("r4") = (long)(arg5); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
_NOLIBC_THUMB_SET_R7 \
"svc #0\n" \
_NOLIBC_THUMB_RESTORE_R7 \
@@ -207,8 +171,8 @@ struct sys_stat_struct {
register long _arg4 __asm__ ("r3") = (long)(arg4); \
register long _arg5 __asm__ ("r4") = (long)(arg5); \
register long _arg6 __asm__ ("r5") = (long)(arg6); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
_NOLIBC_THUMB_SET_R7 \
"svc #0\n" \
_NOLIBC_THUMB_RESTORE_R7 \
@@ -220,49 +184,14 @@ struct sys_stat_struct {
_arg1; \
})
-
-char **environ __attribute__((weak));
-const unsigned long *_auxv __attribute__((weak));
-
/* startup code */
-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
+void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
{
__asm__ volatile (
-#ifdef _NOLIBC_STACKPROTECTOR
- "bl __stack_chk_init\n" /* initialize stack protector */
-#endif
- "pop {%r0}\n" /* argc was in the stack */
- "mov %r1, %sp\n" /* argv = sp */
-
- "add %r2, %r0, $1\n" /* envp = (argc + 1) ... */
- "lsl %r2, %r2, $2\n" /* * 4 ... */
- "add %r2, %r2, %r1\n" /* + argv */
- "ldr %r3, 1f\n" /* r3 = &environ (see below) */
- "str %r2, [r3]\n" /* store envp into environ */
-
- "mov r4, r2\n" /* search for auxv (follows NULL after last env) */
- "0:\n"
- "mov r5, r4\n" /* r5 = r4 */
- "add r4, r4, #4\n" /* r4 += 4 */
- "ldr r5,[r5]\n" /* r5 = *r5 = *(r4-4) */
- "cmp r5, #0\n" /* and stop at NULL after last env */
- "bne 0b\n"
- "ldr %r3, 2f\n" /* r3 = &_auxv (low bits) */
- "str r4, [r3]\n" /* store r4 into _auxv */
-
- "mov %r3, $8\n" /* AAPCS : sp must be 8-byte aligned in the */
- "neg %r3, %r3\n" /* callee, and bl doesn't push (lr=pc) */
- "and %r3, %r3, %r1\n" /* so we do sp = r1(=sp) & r3(=-8); */
- "mov %sp, %r3\n"
-
- "bl main\n" /* main() returns the status code, we'll exit with it. */
- "movs r7, $1\n" /* NR_exit == 1 */
- "svc $0x00\n"
- ".align 2\n" /* below are the pointers to a few variables */
- "1:\n"
- ".word environ\n"
- "2:\n"
- ".word _auxv\n"
+ "mov %r0, sp\n" /* save stack pointer to %r0, as arg1 of _start_c */
+ "and ip, %r0, #-8\n" /* sp must be 8-byte aligned in the callee */
+ "mov sp, ip\n"
+ "bl _start_c\n" /* transfer to c runtime */
);
__builtin_unreachable();
}
diff --git a/tools/include/nolibc/arch-i386.h b/tools/include/nolibc/arch-i386.h
index 3d672d925e9e..64415b9fac77 100644
--- a/tools/include/nolibc/arch-i386.h
+++ b/tools/include/nolibc/arch-i386.h
@@ -8,32 +8,7 @@
#define _NOLIBC_ARCH_I386_H
#include "compiler.h"
-
-/* The struct returned by the stat() syscall, 32-bit only, the syscall returns
- * exactly 56 bytes (stops before the unused array).
- */
-struct sys_stat_struct {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
-
- unsigned long st_rdev;
- unsigned long st_size;
- unsigned long st_blksize;
- unsigned long st_blocks;
-
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
-
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned long __unused[2];
-};
+#include "crt.h"
/* Syscalls for i386 :
* - mostly similar to x86_64
@@ -57,8 +32,8 @@ struct sys_stat_struct {
({ \
long _ret; \
register long _num __asm__ ("eax") = (num); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"int $0x80\n" \
: "=a" (_ret) \
: "0"(_num) \
@@ -72,8 +47,8 @@ struct sys_stat_struct {
long _ret; \
register long _num __asm__ ("eax") = (num); \
register long _arg1 __asm__ ("ebx") = (long)(arg1); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"int $0x80\n" \
: "=a" (_ret) \
: "r"(_arg1), \
@@ -89,8 +64,8 @@ struct sys_stat_struct {
register long _num __asm__ ("eax") = (num); \
register long _arg1 __asm__ ("ebx") = (long)(arg1); \
register long _arg2 __asm__ ("ecx") = (long)(arg2); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"int $0x80\n" \
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), \
@@ -107,8 +82,8 @@ struct sys_stat_struct {
register long _arg1 __asm__ ("ebx") = (long)(arg1); \
register long _arg2 __asm__ ("ecx") = (long)(arg2); \
register long _arg3 __asm__ ("edx") = (long)(arg3); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"int $0x80\n" \
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), \
@@ -126,8 +101,8 @@ struct sys_stat_struct {
register long _arg2 __asm__ ("ecx") = (long)(arg2); \
register long _arg3 __asm__ ("edx") = (long)(arg3); \
register long _arg4 __asm__ ("esi") = (long)(arg4); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"int $0x80\n" \
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
@@ -146,8 +121,8 @@ struct sys_stat_struct {
register long _arg3 __asm__ ("edx") = (long)(arg3); \
register long _arg4 __asm__ ("esi") = (long)(arg4); \
register long _arg5 __asm__ ("edi") = (long)(arg5); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"int $0x80\n" \
: "=a" (_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
@@ -180,9 +155,6 @@ struct sys_stat_struct {
_eax; \
})
-char **environ __attribute__((weak));
-const unsigned long *_auxv __attribute__((weak));
-
/* startup code */
/*
* i386 System V ABI mandates:
@@ -190,33 +162,15 @@ const unsigned long *_auxv __attribute__((weak));
* 2) The deepest stack frame should be set to zero
*
*/
-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
+void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
{
__asm__ volatile (
-#ifdef _NOLIBC_STACKPROTECTOR
- "call __stack_chk_init\n" /* initialize stack protector */
-#endif
- "pop %eax\n" /* argc (first arg, %eax) */
- "mov %esp, %ebx\n" /* argv[] (second arg, %ebx) */
- "lea 4(%ebx,%eax,4),%ecx\n" /* then a NULL then envp (third arg, %ecx) */
- "mov %ecx, environ\n" /* save environ */
- "xor %ebp, %ebp\n" /* zero the stack frame */
- "mov %ecx, %edx\n" /* search for auxv (follows NULL after last env) */
- "0:\n"
- "add $4, %edx\n" /* search for auxv using edx, it follows the */
- "cmp -4(%edx), %ebp\n" /* ... NULL after last env (ebp is zero here) */
- "jnz 0b\n"
- "mov %edx, _auxv\n" /* save it into _auxv */
- "and $-16, %esp\n" /* x86 ABI : esp must be 16-byte aligned before */
- "sub $4, %esp\n" /* the call instruction (args are aligned) */
- "push %ecx\n" /* push all registers on the stack so that we */
- "push %ebx\n" /* support both regparm and plain stack modes */
- "push %eax\n"
- "call main\n" /* main() returns the status code in %eax */
- "mov %eax, %ebx\n" /* retrieve exit code (32-bit int) */
- "movl $1, %eax\n" /* NR_exit == 1 */
- "int $0x80\n" /* exit now */
- "hlt\n" /* ensure it does not */
+ "xor %ebp, %ebp\n" /* zero the stack frame */
+ "mov %esp, %eax\n" /* save stack pointer to %eax, as arg1 of _start_c */
+ "and $-16, %esp\n" /* last pushed argument must be 16-byte aligned */
+ "push %eax\n" /* push arg1 on stack to support plain stack modes too */
+ "call _start_c\n" /* transfer to c runtime */
+ "hlt\n" /* ensure it does not return */
);
__builtin_unreachable();
}
diff --git a/tools/include/nolibc/arch-loongarch.h b/tools/include/nolibc/arch-loongarch.h
index ad3f266e7093..bf98f6220195 100644
--- a/tools/include/nolibc/arch-loongarch.h
+++ b/tools/include/nolibc/arch-loongarch.h
@@ -8,6 +8,7 @@
#define _NOLIBC_ARCH_LOONGARCH_H
#include "compiler.h"
+#include "crt.h"
/* Syscalls for LoongArch :
* - stack is 16-byte aligned
@@ -22,18 +23,19 @@
* On LoongArch, select() is not implemented so we have to use pselect6().
*/
#define __ARCH_WANT_SYS_PSELECT6
+#define _NOLIBC_SYSCALL_CLOBBERLIST \
+ "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8"
#define my_syscall0(num) \
({ \
register long _num __asm__ ("a7") = (num); \
register long _arg1 __asm__ ("a0"); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"syscall 0\n" \
: "=r"(_arg1) \
: "r"(_num) \
- : "memory", "$t0", "$t1", "$t2", "$t3", \
- "$t4", "$t5", "$t6", "$t7", "$t8" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg1; \
})
@@ -43,12 +45,11 @@
register long _num __asm__ ("a7") = (num); \
register long _arg1 __asm__ ("a0") = (long)(arg1); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"syscall 0\n" \
: "+r"(_arg1) \
: "r"(_num) \
- : "memory", "$t0", "$t1", "$t2", "$t3", \
- "$t4", "$t5", "$t6", "$t7", "$t8" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg1; \
})
@@ -59,13 +60,12 @@
register long _arg1 __asm__ ("a0") = (long)(arg1); \
register long _arg2 __asm__ ("a1") = (long)(arg2); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"syscall 0\n" \
: "+r"(_arg1) \
: "r"(_arg2), \
"r"(_num) \
- : "memory", "$t0", "$t1", "$t2", "$t3", \
- "$t4", "$t5", "$t6", "$t7", "$t8" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg1; \
})
@@ -77,13 +77,12 @@
register long _arg2 __asm__ ("a1") = (long)(arg2); \
register long _arg3 __asm__ ("a2") = (long)(arg3); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"syscall 0\n" \
: "+r"(_arg1) \
: "r"(_arg2), "r"(_arg3), \
"r"(_num) \
- : "memory", "$t0", "$t1", "$t2", "$t3", \
- "$t4", "$t5", "$t6", "$t7", "$t8" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg1; \
})
@@ -96,13 +95,12 @@
register long _arg3 __asm__ ("a2") = (long)(arg3); \
register long _arg4 __asm__ ("a3") = (long)(arg4); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"syscall 0\n" \
: "+r"(_arg1) \
: "r"(_arg2), "r"(_arg3), "r"(_arg4), \
"r"(_num) \
- : "memory", "$t0", "$t1", "$t2", "$t3", \
- "$t4", "$t5", "$t6", "$t7", "$t8" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg1; \
})
@@ -116,13 +114,12 @@
register long _arg4 __asm__ ("a3") = (long)(arg4); \
register long _arg5 __asm__ ("a4") = (long)(arg5); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"syscall 0\n" \
: "+r"(_arg1) \
: "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
"r"(_num) \
- : "memory", "$t0", "$t1", "$t2", "$t3", \
- "$t4", "$t5", "$t6", "$t7", "$t8" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg1; \
})
@@ -137,67 +134,29 @@
register long _arg5 __asm__ ("a4") = (long)(arg5); \
register long _arg6 __asm__ ("a5") = (long)(arg6); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"syscall 0\n" \
: "+r"(_arg1) \
: "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), "r"(_arg6), \
"r"(_num) \
- : "memory", "$t0", "$t1", "$t2", "$t3", \
- "$t4", "$t5", "$t6", "$t7", "$t8" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg1; \
})
-char **environ __attribute__((weak));
-const unsigned long *_auxv __attribute__((weak));
-
#if __loongarch_grlen == 32
-#define LONGLOG "2"
-#define SZREG "4"
-#define REG_L "ld.w"
-#define LONG_S "st.w"
-#define LONG_ADD "add.w"
-#define LONG_ADDI "addi.w"
-#define LONG_SLL "slli.w"
#define LONG_BSTRINS "bstrins.w"
#else /* __loongarch_grlen == 64 */
-#define LONGLOG "3"
-#define SZREG "8"
-#define REG_L "ld.d"
-#define LONG_S "st.d"
-#define LONG_ADD "add.d"
-#define LONG_ADDI "addi.d"
-#define LONG_SLL "slli.d"
#define LONG_BSTRINS "bstrins.d"
#endif
/* startup code */
-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
+void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
{
__asm__ volatile (
-#ifdef _NOLIBC_STACKPROTECTOR
- "bl __stack_chk_init\n" /* initialize stack protector */
-#endif
- REG_L " $a0, $sp, 0\n" /* argc (a0) was in the stack */
- LONG_ADDI " $a1, $sp, "SZREG"\n" /* argv (a1) = sp + SZREG */
- LONG_SLL " $a2, $a0, "LONGLOG"\n" /* envp (a2) = SZREG*argc ... */
- LONG_ADDI " $a2, $a2, "SZREG"\n" /* + SZREG (skip null) */
- LONG_ADD " $a2, $a2, $a1\n" /* + argv */
-
- "move $a3, $a2\n" /* iterate a3 over envp to find auxv (after NULL) */
- "0:\n" /* do { */
- REG_L " $a4, $a3, 0\n" /* a4 = *a3; */
- LONG_ADDI " $a3, $a3, "SZREG"\n" /* a3 += sizeof(void*); */
- "bne $a4, $zero, 0b\n" /* } while (a4); */
- "la.pcrel $a4, _auxv\n" /* a4 = &_auxv */
- LONG_S " $a3, $a4, 0\n" /* store a3 into _auxv */
-
- "la.pcrel $a3, environ\n" /* a3 = &environ */
- LONG_S " $a2, $a3, 0\n" /* store envp(a2) into environ */
- LONG_BSTRINS " $sp, $zero, 3, 0\n" /* sp must be 16-byte aligned */
- "bl main\n" /* main() returns the status code, we'll exit with it. */
- "li.w $a7, 93\n" /* NR_exit == 93 */
- "syscall 0\n"
+ "move $a0, $sp\n" /* save stack pointer to $a0, as arg1 of _start_c */
+ LONG_BSTRINS " $sp, $zero, 3, 0\n" /* $sp must be 16-byte aligned */
+ "bl _start_c\n" /* transfer to c runtime */
);
__builtin_unreachable();
}
diff --git a/tools/include/nolibc/arch-mips.h b/tools/include/nolibc/arch-mips.h
index db24e0837a39..4ab6fa54beee 100644
--- a/tools/include/nolibc/arch-mips.h
+++ b/tools/include/nolibc/arch-mips.h
@@ -8,34 +8,7 @@
#define _NOLIBC_ARCH_MIPS_H
#include "compiler.h"
-
-/* The struct returned by the stat() syscall. 88 bytes are returned by the
- * syscall.
- */
-struct sys_stat_struct {
- unsigned int st_dev;
- long st_pad1[3];
- unsigned long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int st_rdev;
- long st_pad2[2];
- long st_size;
- long st_pad3;
-
- long st_atime;
- long st_atime_nsec;
- long st_mtime;
- long st_mtime_nsec;
-
- long st_ctime;
- long st_ctime_nsec;
- long st_blksize;
- long st_blocks;
- long st_pad4[14];
-};
+#include "crt.h"
/* Syscalls for MIPS ABI O32 :
* - WARNING! there's always a delayed slot!
@@ -57,19 +30,22 @@ struct sys_stat_struct {
* don't have to experience issues with register constraints.
*/
+#define _NOLIBC_SYSCALL_CLOBBERLIST \
+ "memory", "cc", "at", "v1", "hi", "lo", \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9"
+
#define my_syscall0(num) \
({ \
register long _num __asm__ ("v0") = (num); \
register long _arg4 __asm__ ("a3"); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"addiu $sp, $sp, -32\n" \
"syscall\n" \
"addiu $sp, $sp, 32\n" \
: "=r"(_num), "=r"(_arg4) \
: "r"(_num) \
- : "memory", "cc", "at", "v1", "hi", "lo", \
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg4 ? -_num : _num; \
})
@@ -79,16 +55,15 @@ struct sys_stat_struct {
register long _num __asm__ ("v0") = (num); \
register long _arg1 __asm__ ("a0") = (long)(arg1); \
register long _arg4 __asm__ ("a3"); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"addiu $sp, $sp, -32\n" \
"syscall\n" \
"addiu $sp, $sp, 32\n" \
: "=r"(_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1) \
- : "memory", "cc", "at", "v1", "hi", "lo", \
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg4 ? -_num : _num; \
})
@@ -99,16 +74,15 @@ struct sys_stat_struct {
register long _arg1 __asm__ ("a0") = (long)(arg1); \
register long _arg2 __asm__ ("a1") = (long)(arg2); \
register long _arg4 __asm__ ("a3"); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"addiu $sp, $sp, -32\n" \
"syscall\n" \
"addiu $sp, $sp, 32\n" \
: "=r"(_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2) \
- : "memory", "cc", "at", "v1", "hi", "lo", \
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg4 ? -_num : _num; \
})
@@ -120,16 +94,15 @@ struct sys_stat_struct {
register long _arg2 __asm__ ("a1") = (long)(arg2); \
register long _arg3 __asm__ ("a2") = (long)(arg3); \
register long _arg4 __asm__ ("a3"); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"addiu $sp, $sp, -32\n" \
"syscall\n" \
"addiu $sp, $sp, 32\n" \
: "=r"(_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3) \
- : "memory", "cc", "at", "v1", "hi", "lo", \
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg4 ? -_num : _num; \
})
@@ -141,16 +114,15 @@ struct sys_stat_struct {
register long _arg2 __asm__ ("a1") = (long)(arg2); \
register long _arg3 __asm__ ("a2") = (long)(arg3); \
register long _arg4 __asm__ ("a3") = (long)(arg4); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"addiu $sp, $sp, -32\n" \
"syscall\n" \
"addiu $sp, $sp, 32\n" \
: "=r" (_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
- : "memory", "cc", "at", "v1", "hi", "lo", \
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg4 ? -_num : _num; \
})
@@ -163,65 +135,58 @@ struct sys_stat_struct {
register long _arg3 __asm__ ("a2") = (long)(arg3); \
register long _arg4 __asm__ ("a3") = (long)(arg4); \
register long _arg5 = (long)(arg5); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"addiu $sp, $sp, -32\n" \
"sw %7, 16($sp)\n" \
- "syscall\n " \
+ "syscall\n" \
"addiu $sp, $sp, 32\n" \
: "=r" (_num), "=r"(_arg4) \
: "0"(_num), \
"r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
- : "memory", "cc", "at", "v1", "hi", "lo", \
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
); \
_arg4 ? -_num : _num; \
})
-char **environ __attribute__((weak));
-const unsigned long *_auxv __attribute__((weak));
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ register long _num __asm__ ("v0") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg3 __asm__ ("a2") = (long)(arg3); \
+ register long _arg4 __asm__ ("a3") = (long)(arg4); \
+ register long _arg5 = (long)(arg5); \
+ register long _arg6 = (long)(arg6); \
+ \
+ __asm__ volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "sw %7, 16($sp)\n" \
+ "sw %8, 20($sp)\n" \
+ "syscall\n" \
+ "addiu $sp, $sp, 32\n" \
+ : "=r" (_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_arg6) \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
+ ); \
+ _arg4 ? -_num : _num; \
+})
/* startup code, note that it's called __start on MIPS */
-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector __start(void)
+void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector __start(void)
{
__asm__ volatile (
- /*".set nomips16\n"*/
".set push\n"
- ".set noreorder\n"
+ ".set noreorder\n"
".option pic0\n"
-#ifdef _NOLIBC_STACKPROTECTOR
- "jal __stack_chk_init\n" /* initialize stack protector */
- "nop\n" /* delayed slot */
-#endif
- /*".ent __start\n"*/
- /*"__start:\n"*/
- "lw $a0,($sp)\n" /* argc was in the stack */
- "addiu $a1, $sp, 4\n" /* argv = sp + 4 */
- "sll $a2, $a0, 2\n" /* a2 = argc * 4 */
- "add $a2, $a2, $a1\n" /* envp = argv + 4*argc ... */
- "addiu $a2, $a2, 4\n" /* ... + 4 */
- "lui $a3, %hi(environ)\n" /* load environ into a3 (hi) */
- "addiu $a3, %lo(environ)\n" /* load environ into a3 (lo) */
- "sw $a2,($a3)\n" /* store envp(a2) into environ */
-
- "move $t0, $a2\n" /* iterate t0 over envp, look for NULL */
- "0:" /* do { */
- "lw $a3, ($t0)\n" /* a3=*(t0); */
- "bne $a3, $0, 0b\n" /* } while (a3); */
- "addiu $t0, $t0, 4\n" /* delayed slot: t0+=4; */
- "lui $a3, %hi(_auxv)\n" /* load _auxv into a3 (hi) */
- "addiu $a3, %lo(_auxv)\n" /* load _auxv into a3 (lo) */
- "sw $t0, ($a3)\n" /* store t0 into _auxv */
-
- "li $t0, -8\n"
- "and $sp, $sp, $t0\n" /* sp must be 8-byte aligned */
- "addiu $sp,$sp,-16\n" /* the callee expects to save a0..a3 there! */
- "jal main\n" /* main() returns the status code, we'll exit with it. */
- "nop\n" /* delayed slot */
- "move $a0, $v0\n" /* retrieve 32-bit exit code from v0 */
- "li $v0, 4001\n" /* NR_exit == 4001 */
- "syscall\n"
- /*".end __start\n"*/
+ "move $a0, $sp\n" /* save stack pointer to $a0, as arg1 of _start_c */
+ "li $t0, -8\n"
+ "and $sp, $sp, $t0\n" /* $sp must be 8-byte aligned */
+ "addiu $sp, $sp, -16\n" /* the callee expects to save a0..a3 there */
+ "jal _start_c\n" /* transfer to c runtime */
+ " nop\n" /* delayed slot */
".set pop\n"
);
__builtin_unreachable();
diff --git a/tools/include/nolibc/arch-powerpc.h b/tools/include/nolibc/arch-powerpc.h
new file mode 100644
index 000000000000..ac212e6185b2
--- /dev/null
+++ b/tools/include/nolibc/arch-powerpc.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * PowerPC specific definitions for NOLIBC
+ * Copyright (C) 2023 Zhangjin Wu <falcon@tinylab.org>
+ */
+
+#ifndef _NOLIBC_ARCH_POWERPC_H
+#define _NOLIBC_ARCH_POWERPC_H
+
+#include "compiler.h"
+#include "crt.h"
+
+/* Syscalls for PowerPC :
+ * - stack is 16-byte aligned
+ * - syscall number is passed in r0
+ * - arguments are in r3, r4, r5, r6, r7, r8, r9
+ * - the system call is performed by calling "sc"
+ * - syscall return comes in r3, and the summary overflow bit is checked
+ * to know if an error occurred, in which case errno is in r3.
+ * - the arguments are cast to long and assigned into the target
+ * registers which are then simply passed as registers to the asm code,
+ * so that we don't have to experience issues with register constraints.
+ */
+
+#define _NOLIBC_SYSCALL_CLOBBERLIST \
+ "memory", "cr0", "r12", "r11", "r10", "r9"
+
+#define my_syscall0(num) \
+({ \
+ register long _ret __asm__ ("r3"); \
+ register long _num __asm__ ("r0") = (num); \
+ \
+ __asm__ volatile ( \
+ " sc\n" \
+ " bns+ 1f\n" \
+ " neg %0, %0\n" \
+ "1:\n" \
+ : "=r"(_ret), "+r"(_num) \
+ : \
+ : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7", "r6", "r5", "r4" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ register long _ret __asm__ ("r3"); \
+ register long _num __asm__ ("r0") = (num); \
+ register long _arg1 __asm__ ("r3") = (long)(arg1); \
+ \
+ __asm__ volatile ( \
+ " sc\n" \
+ " bns+ 1f\n" \
+ " neg %0, %0\n" \
+ "1:\n" \
+ : "=r"(_ret), "+r"(_num) \
+ : "0"(_arg1) \
+ : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7", "r6", "r5", "r4" \
+ ); \
+ _ret; \
+})
+
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _ret __asm__ ("r3"); \
+ register long _num __asm__ ("r0") = (num); \
+ register long _arg1 __asm__ ("r3") = (long)(arg1); \
+ register long _arg2 __asm__ ("r4") = (long)(arg2); \
+ \
+ __asm__ volatile ( \
+ " sc\n" \
+ " bns+ 1f\n" \
+ " neg %0, %0\n" \
+ "1:\n" \
+ : "=r"(_ret), "+r"(_num), "+r"(_arg2) \
+ : "0"(_arg1) \
+ : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7", "r6", "r5" \
+ ); \
+ _ret; \
+})
+
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ register long _ret __asm__ ("r3"); \
+ register long _num __asm__ ("r0") = (num); \
+ register long _arg1 __asm__ ("r3") = (long)(arg1); \
+ register long _arg2 __asm__ ("r4") = (long)(arg2); \
+ register long _arg3 __asm__ ("r5") = (long)(arg3); \
+ \
+ __asm__ volatile ( \
+ " sc\n" \
+ " bns+ 1f\n" \
+ " neg %0, %0\n" \
+ "1:\n" \
+ : "=r"(_ret), "+r"(_num), "+r"(_arg2), "+r"(_arg3) \
+ : "0"(_arg1) \
+ : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7", "r6" \
+ ); \
+ _ret; \
+})
+
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ register long _ret __asm__ ("r3"); \
+ register long _num __asm__ ("r0") = (num); \
+ register long _arg1 __asm__ ("r3") = (long)(arg1); \
+ register long _arg2 __asm__ ("r4") = (long)(arg2); \
+ register long _arg3 __asm__ ("r5") = (long)(arg3); \
+ register long _arg4 __asm__ ("r6") = (long)(arg4); \
+ \
+ __asm__ volatile ( \
+ " sc\n" \
+ " bns+ 1f\n" \
+ " neg %0, %0\n" \
+ "1:\n" \
+ : "=r"(_ret), "+r"(_num), "+r"(_arg2), "+r"(_arg3), \
+ "+r"(_arg4) \
+ : "0"(_arg1) \
+ : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7" \
+ ); \
+ _ret; \
+})
+
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _ret __asm__ ("r3"); \
+ register long _num __asm__ ("r0") = (num); \
+ register long _arg1 __asm__ ("r3") = (long)(arg1); \
+ register long _arg2 __asm__ ("r4") = (long)(arg2); \
+ register long _arg3 __asm__ ("r5") = (long)(arg3); \
+ register long _arg4 __asm__ ("r6") = (long)(arg4); \
+ register long _arg5 __asm__ ("r7") = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ " sc\n" \
+ " bns+ 1f\n" \
+ " neg %0, %0\n" \
+ "1:\n" \
+ : "=r"(_ret), "+r"(_num), "+r"(_arg2), "+r"(_arg3), \
+ "+r"(_arg4), "+r"(_arg5) \
+ : "0"(_arg1) \
+ : _NOLIBC_SYSCALL_CLOBBERLIST, "r8" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ register long _ret __asm__ ("r3"); \
+ register long _num __asm__ ("r0") = (num); \
+ register long _arg1 __asm__ ("r3") = (long)(arg1); \
+ register long _arg2 __asm__ ("r4") = (long)(arg2); \
+ register long _arg3 __asm__ ("r5") = (long)(arg3); \
+ register long _arg4 __asm__ ("r6") = (long)(arg4); \
+ register long _arg5 __asm__ ("r7") = (long)(arg5); \
+ register long _arg6 __asm__ ("r8") = (long)(arg6); \
+ \
+ __asm__ volatile ( \
+ " sc\n" \
+ " bns+ 1f\n" \
+ " neg %0, %0\n" \
+ "1:\n" \
+ : "=r"(_ret), "+r"(_num), "+r"(_arg2), "+r"(_arg3), \
+ "+r"(_arg4), "+r"(_arg5), "+r"(_arg6) \
+ : "0"(_arg1) \
+ : _NOLIBC_SYSCALL_CLOBBERLIST \
+ ); \
+ _ret; \
+})
+
+#ifndef __powerpc64__
+/* FIXME: For 32-bit PowerPC, with newer gcc compilers (e.g. gcc 13.1.0),
+ * "omit-frame-pointer" fails with __attribute__((no_stack_protector)) but
+ * works with __attribute__((__optimize__("-fno-stack-protector")))
+ */
+#ifdef __no_stack_protector
+#undef __no_stack_protector
+#define __no_stack_protector __attribute__((__optimize__("-fno-stack-protector")))
+#endif
+#endif /* !__powerpc64__ */
+
+/* startup code */
+void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
+{
+#ifdef __powerpc64__
+#if _CALL_ELF == 2
+ /* with -mabi=elfv2, save TOC/GOT pointer to r2
+ * r12 is global entry pointer, we use it to compute TOC from r12
+ * https://www.llvm.org/devmtg/2014-04/PDFs/Talks/Euro-LLVM-2014-Weigand.pdf
+ * https://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi.pdf
+ */
+ __asm__ volatile (
+ "addis 2, 12, .TOC. - _start@ha\n"
+ "addi 2, 2, .TOC. - _start@l\n"
+ );
+#endif /* _CALL_ELF == 2 */
+
+ __asm__ volatile (
+ "mr 3, 1\n" /* save stack pointer to r3, as arg1 of _start_c */
+ "clrrdi 1, 1, 4\n" /* align the stack to 16 bytes */
+ "li 0, 0\n" /* zero the frame pointer */
+ "stdu 1, -32(1)\n" /* the initial stack frame */
+ "bl _start_c\n" /* transfer to c runtime */
+ );
+#else
+ __asm__ volatile (
+ "mr 3, 1\n" /* save stack pointer to r3, as arg1 of _start_c */
+ "clrrwi 1, 1, 4\n" /* align the stack to 16 bytes */
+ "li 0, 0\n" /* zero the frame pointer */
+ "stwu 1, -16(1)\n" /* the initial stack frame */
+ "bl _start_c\n" /* transfer to c runtime */
+ );
+#endif
+ __builtin_unreachable();
+}
+
+#endif /* _NOLIBC_ARCH_POWERPC_H */
diff --git a/tools/include/nolibc/arch-riscv.h b/tools/include/nolibc/arch-riscv.h
index a2e8564e66d6..950cc2283fd7 100644
--- a/tools/include/nolibc/arch-riscv.h
+++ b/tools/include/nolibc/arch-riscv.h
@@ -8,41 +8,7 @@
#define _NOLIBC_ARCH_RISCV_H
#include "compiler.h"
-
-struct sys_stat_struct {
- unsigned long st_dev; /* Device. */
- unsigned long st_ino; /* File serial number. */
- unsigned int st_mode; /* File mode. */
- unsigned int st_nlink; /* Link count. */
- unsigned int st_uid; /* User ID of the file's owner. */
- unsigned int st_gid; /* Group ID of the file's group. */
- unsigned long st_rdev; /* Device number, if device. */
- unsigned long __pad1;
- long st_size; /* Size of file, in bytes. */
- int st_blksize; /* Optimal block size for I/O. */
- int __pad2;
- long st_blocks; /* Number 512-byte blocks allocated. */
- long st_atime; /* Time of last access. */
- unsigned long st_atime_nsec;
- long st_mtime; /* Time of last modification. */
- unsigned long st_mtime_nsec;
- long st_ctime; /* Time of last status change. */
- unsigned long st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
-};
-
-#if __riscv_xlen == 64
-#define PTRLOG "3"
-#define SZREG "8"
-#define REG_L "ld"
-#define REG_S "sd"
-#elif __riscv_xlen == 32
-#define PTRLOG "2"
-#define SZREG "4"
-#define REG_L "lw"
-#define REG_S "sw"
-#endif
+#include "crt.h"
/* Syscalls for RISCV :
* - stack is 16-byte aligned
@@ -63,7 +29,7 @@ struct sys_stat_struct {
register long _num __asm__ ("a7") = (num); \
register long _arg1 __asm__ ("a0"); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"ecall\n\t" \
: "=r"(_arg1) \
: "r"(_num) \
@@ -77,7 +43,7 @@ struct sys_stat_struct {
register long _num __asm__ ("a7") = (num); \
register long _arg1 __asm__ ("a0") = (long)(arg1); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"ecall\n" \
: "+r"(_arg1) \
: "r"(_num) \
@@ -92,7 +58,7 @@ struct sys_stat_struct {
register long _arg1 __asm__ ("a0") = (long)(arg1); \
register long _arg2 __asm__ ("a1") = (long)(arg2); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"ecall\n" \
: "+r"(_arg1) \
: "r"(_arg2), \
@@ -109,7 +75,7 @@ struct sys_stat_struct {
register long _arg2 __asm__ ("a1") = (long)(arg2); \
register long _arg3 __asm__ ("a2") = (long)(arg3); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"ecall\n\t" \
: "+r"(_arg1) \
: "r"(_arg2), "r"(_arg3), \
@@ -127,7 +93,7 @@ struct sys_stat_struct {
register long _arg3 __asm__ ("a2") = (long)(arg3); \
register long _arg4 __asm__ ("a3") = (long)(arg4); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"ecall\n" \
: "+r"(_arg1) \
: "r"(_arg2), "r"(_arg3), "r"(_arg4), \
@@ -146,7 +112,7 @@ struct sys_stat_struct {
register long _arg4 __asm__ ("a3") = (long)(arg4); \
register long _arg5 __asm__ ("a4") = (long)(arg5); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"ecall\n" \
: "+r"(_arg1) \
: "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
@@ -166,7 +132,7 @@ struct sys_stat_struct {
register long _arg5 __asm__ ("a4") = (long)(arg5); \
register long _arg6 __asm__ ("a5") = (long)(arg6); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"ecall\n" \
: "+r"(_arg1) \
: "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), "r"(_arg6), \
@@ -176,40 +142,17 @@ struct sys_stat_struct {
_arg1; \
})
-char **environ __attribute__((weak));
-const unsigned long *_auxv __attribute__((weak));
-
/* startup code */
-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
+void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
{
__asm__ volatile (
".option push\n"
".option norelax\n"
- "lla gp, __global_pointer$\n"
+ "lla gp, __global_pointer$\n"
".option pop\n"
-#ifdef _NOLIBC_STACKPROTECTOR
- "call __stack_chk_init\n" /* initialize stack protector */
-#endif
- REG_L" a0, 0(sp)\n" /* argc (a0) was in the stack */
- "add a1, sp, "SZREG"\n" /* argv (a1) = sp */
- "slli a2, a0, "PTRLOG"\n" /* envp (a2) = SZREG*argc ... */
- "add a2, a2, "SZREG"\n" /* + SZREG (skip null) */
- "add a2,a2,a1\n" /* + argv */
-
- "add a3, a2, zero\n" /* iterate a3 over envp to find auxv (after NULL) */
- "0:\n" /* do { */
- REG_L" a4, 0(a3)\n" /* a4 = *a3; */
- "add a3, a3, "SZREG"\n" /* a3 += sizeof(void*); */
- "bne a4, zero, 0b\n" /* } while (a4); */
- "lui a4, %hi(_auxv)\n" /* a4 = &_auxv (high bits) */
- REG_S" a3, %lo(_auxv)(a4)\n" /* store a3 into _auxv */
-
- "lui a3, %hi(environ)\n" /* a3 = &environ (high bits) */
- REG_S" a2,%lo(environ)(a3)\n"/* store envp(a2) into environ */
- "andi sp,a1,-16\n" /* sp must be 16-byte aligned */
- "call main\n" /* main() returns the status code, we'll exit with it. */
- "li a7, 93\n" /* NR_exit == 93 */
- "ecall\n"
+ "mv a0, sp\n" /* save stack pointer to a0, as arg1 of _start_c */
+ "andi sp, a0, -16\n" /* sp must be 16-byte aligned */
+ "call _start_c\n" /* transfer to c runtime */
);
__builtin_unreachable();
}
diff --git a/tools/include/nolibc/arch-s390.h b/tools/include/nolibc/arch-s390.h
index 516dff5bff8b..5d60fd43f883 100644
--- a/tools/include/nolibc/arch-s390.h
+++ b/tools/include/nolibc/arch-s390.h
@@ -9,31 +9,7 @@
#include <asm/unistd.h>
#include "compiler.h"
-
-/* The struct returned by the stat() syscall, equivalent to stat64(). The
- * syscall returns 116 bytes and stops in the middle of __unused.
- */
-
-struct sys_stat_struct {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned long st_nlink;
- unsigned int st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int __pad1;
- unsigned long st_rdev;
- unsigned long st_size;
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned long st_blksize;
- long st_blocks;
- unsigned long __unused[3];
-};
+#include "crt.h"
/* Syscalls for s390:
* - registers are 64-bit
@@ -52,7 +28,7 @@ struct sys_stat_struct {
register long _num __asm__ ("1") = (num); \
register long _rc __asm__ ("2"); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"svc 0\n" \
: "=d"(_rc) \
: "d"(_num) \
@@ -66,7 +42,7 @@ struct sys_stat_struct {
register long _num __asm__ ("1") = (num); \
register long _arg1 __asm__ ("2") = (long)(arg1); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"svc 0\n" \
: "+d"(_arg1) \
: "d"(_num) \
@@ -81,7 +57,7 @@ struct sys_stat_struct {
register long _arg1 __asm__ ("2") = (long)(arg1); \
register long _arg2 __asm__ ("3") = (long)(arg2); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"svc 0\n" \
: "+d"(_arg1) \
: "d"(_arg2), "d"(_num) \
@@ -97,7 +73,7 @@ struct sys_stat_struct {
register long _arg2 __asm__ ("3") = (long)(arg2); \
register long _arg3 __asm__ ("4") = (long)(arg3); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"svc 0\n" \
: "+d"(_arg1) \
: "d"(_arg2), "d"(_arg3), "d"(_num) \
@@ -114,7 +90,7 @@ struct sys_stat_struct {
register long _arg3 __asm__ ("4") = (long)(arg3); \
register long _arg4 __asm__ ("5") = (long)(arg4); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"svc 0\n" \
: "+d"(_arg1) \
: "d"(_arg2), "d"(_arg3), "d"(_arg4), "d"(_num) \
@@ -132,7 +108,7 @@ struct sys_stat_struct {
register long _arg4 __asm__ ("5") = (long)(arg4); \
register long _arg5 __asm__ ("6") = (long)(arg5); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"svc 0\n" \
: "+d"(_arg1) \
: "d"(_arg2), "d"(_arg3), "d"(_arg4), "d"(_arg5), \
@@ -152,7 +128,7 @@ struct sys_stat_struct {
register long _arg5 __asm__ ("6") = (long)(arg5); \
register long _arg6 __asm__ ("7") = (long)(arg6); \
\
- __asm__ volatile ( \
+ __asm__ volatile ( \
"svc 0\n" \
: "+d"(_arg1) \
: "d"(_arg2), "d"(_arg3), "d"(_arg4), "d"(_arg5), \
@@ -162,41 +138,14 @@ struct sys_stat_struct {
_arg1; \
})
-char **environ __attribute__((weak));
-const unsigned long *_auxv __attribute__((weak));
-
/* startup code */
-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
+void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
{
__asm__ volatile (
- "lg %r2,0(%r15)\n" /* argument count */
- "la %r3,8(%r15)\n" /* argument pointers */
-
- "xgr %r0,%r0\n" /* r0 will be our NULL value */
- /* search for envp */
- "lgr %r4,%r3\n" /* start at argv */
- "0:\n"
- "clg %r0,0(%r4)\n" /* entry zero? */
- "la %r4,8(%r4)\n" /* advance pointer */
- "jnz 0b\n" /* no -> test next pointer */
- /* yes -> r4 now contains start of envp */
- "larl %r1,environ\n"
- "stg %r4,0(%r1)\n"
-
- /* search for auxv */
- "lgr %r5,%r4\n" /* start at envp */
- "1:\n"
- "clg %r0,0(%r5)\n" /* entry zero? */
- "la %r5,8(%r5)\n" /* advance pointer */
- "jnz 1b\n" /* no -> test next pointer */
- "larl %r1,_auxv\n" /* yes -> store value in _auxv */
- "stg %r5,0(%r1)\n"
-
- "aghi %r15,-160\n" /* allocate new stackframe */
- "xc 0(8,%r15),0(%r15)\n" /* clear backchain */
- "brasl %r14,main\n" /* ret value of main is arg to exit */
- "lghi %r1,1\n" /* __NR_exit */
- "svc 0\n"
+ "lgr %r2, %r15\n" /* save stack pointer to %r2, as arg1 of _start_c */
+ "aghi %r15, -160\n" /* allocate new stackframe */
+ "xc 0(8,%r15), 0(%r15)\n" /* clear backchain */
+ "brasl %r14, _start_c\n" /* transfer to c runtime */
);
__builtin_unreachable();
}
diff --git a/tools/include/nolibc/arch-x86_64.h b/tools/include/nolibc/arch-x86_64.h
index 6fc4d8392742..e5ccb926c903 100644
--- a/tools/include/nolibc/arch-x86_64.h
+++ b/tools/include/nolibc/arch-x86_64.h
@@ -8,33 +8,7 @@
#define _NOLIBC_ARCH_X86_64_H
#include "compiler.h"
-
-/* The struct returned by the stat() syscall, equivalent to stat64(). The
- * syscall returns 116 bytes and stops in the middle of __unused.
- */
-struct sys_stat_struct {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned long st_nlink;
- unsigned int st_mode;
- unsigned int st_uid;
-
- unsigned int st_gid;
- unsigned int __pad0;
- unsigned long st_rdev;
- long st_size;
- long st_blksize;
-
- long st_blocks;
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
-
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- long __unused[3];
-};
+#include "crt.h"
/* Syscalls for x86_64 :
* - registers are 64-bit
@@ -59,8 +33,8 @@ struct sys_stat_struct {
({ \
long _ret; \
register long _num __asm__ ("rax") = (num); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "0"(_num) \
@@ -74,8 +48,8 @@ struct sys_stat_struct {
long _ret; \
register long _num __asm__ ("rax") = (num); \
register long _arg1 __asm__ ("rdi") = (long)(arg1); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), \
@@ -91,8 +65,8 @@ struct sys_stat_struct {
register long _num __asm__ ("rax") = (num); \
register long _arg1 __asm__ ("rdi") = (long)(arg1); \
register long _arg2 __asm__ ("rsi") = (long)(arg2); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), "r"(_arg2), \
@@ -109,8 +83,8 @@ struct sys_stat_struct {
register long _arg1 __asm__ ("rdi") = (long)(arg1); \
register long _arg2 __asm__ ("rsi") = (long)(arg2); \
register long _arg3 __asm__ ("rdx") = (long)(arg3); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), \
@@ -128,8 +102,8 @@ struct sys_stat_struct {
register long _arg2 __asm__ ("rsi") = (long)(arg2); \
register long _arg3 __asm__ ("rdx") = (long)(arg3); \
register long _arg4 __asm__ ("r10") = (long)(arg4); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
@@ -148,8 +122,8 @@ struct sys_stat_struct {
register long _arg3 __asm__ ("rdx") = (long)(arg3); \
register long _arg4 __asm__ ("r10") = (long)(arg4); \
register long _arg5 __asm__ ("r8") = (long)(arg5); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
@@ -169,8 +143,8 @@ struct sys_stat_struct {
register long _arg4 __asm__ ("r10") = (long)(arg4); \
register long _arg5 __asm__ ("r8") = (long)(arg5); \
register long _arg6 __asm__ ("r9") = (long)(arg6); \
- \
- __asm__ volatile ( \
+ \
+ __asm__ volatile ( \
"syscall\n" \
: "=a"(_ret) \
: "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
@@ -180,9 +154,6 @@ struct sys_stat_struct {
_ret; \
})
-char **environ __attribute__((weak));
-const unsigned long *_auxv __attribute__((weak));
-
/* startup code */
/*
* x86-64 System V ABI mandates:
@@ -190,29 +161,14 @@ const unsigned long *_auxv __attribute__((weak));
* 2) The deepest stack frame should be zero (the %rbp).
*
*/
-void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __no_stack_protector _start(void)
+void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_stack_protector _start(void)
{
__asm__ volatile (
-#ifdef _NOLIBC_STACKPROTECTOR
- "call __stack_chk_init\n" /* initialize stack protector */
-#endif
- "pop %rdi\n" /* argc (first arg, %rdi) */
- "mov %rsp, %rsi\n" /* argv[] (second arg, %rsi) */
- "lea 8(%rsi,%rdi,8),%rdx\n" /* then a NULL then envp (third arg, %rdx) */
- "mov %rdx, environ\n" /* save environ */
- "xor %ebp, %ebp\n" /* zero the stack frame */
- "mov %rdx, %rax\n" /* search for auxv (follows NULL after last env) */
- "0:\n"
- "add $8, %rax\n" /* search for auxv using rax, it follows the */
- "cmp -8(%rax), %rbp\n" /* ... NULL after last env (rbp is zero here) */
- "jnz 0b\n"
- "mov %rax, _auxv\n" /* save it into _auxv */
- "and $-16, %rsp\n" /* x86 ABI : esp must be 16-byte aligned before call */
- "call main\n" /* main() returns the status code, we'll exit with it. */
- "mov %eax, %edi\n" /* retrieve exit code (32 bit) */
- "mov $60, %eax\n" /* NR_exit == 60 */
- "syscall\n" /* really exit */
- "hlt\n" /* ensure it does not return */
+ "xor %ebp, %ebp\n" /* zero the stack frame */
+ "mov %rsp, %rdi\n" /* save stack pointer to %rdi, as arg1 of _start_c */
+ "and $-16, %rsp\n" /* %rsp must be 16-byte aligned before call */
+ "call _start_c\n" /* transfer to c runtime */
+ "hlt\n" /* ensure it does not return */
);
__builtin_unreachable();
}
diff --git a/tools/include/nolibc/arch.h b/tools/include/nolibc/arch.h
index 82b43935650f..e276fb0680af 100644
--- a/tools/include/nolibc/arch.h
+++ b/tools/include/nolibc/arch.h
@@ -25,6 +25,8 @@
#include "arch-aarch64.h"
#elif defined(__mips__) && defined(_ABIO32)
#include "arch-mips.h"
+#elif defined(__powerpc__)
+#include "arch-powerpc.h"
#elif defined(__riscv)
#include "arch-riscv.h"
#elif defined(__s390x__)
diff --git a/tools/include/nolibc/crt.h b/tools/include/nolibc/crt.h
new file mode 100644
index 000000000000..a5f33fef1672
--- /dev/null
+++ b/tools/include/nolibc/crt.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * C Run Time support for NOLIBC
+ * Copyright (C) 2023 Zhangjin Wu <falcon@tinylab.org>
+ */
+
+#ifndef _NOLIBC_CRT_H
+#define _NOLIBC_CRT_H
+
+char **environ __attribute__((weak));
+const unsigned long *_auxv __attribute__((weak));
+
+static void __stack_chk_init(void);
+static void exit(int);
+
+void _start_c(long *sp)
+{
+ long argc;
+ char **argv;
+ char **envp;
+ const unsigned long *auxv;
+ /* silence potential warning: conflicting types for 'main' */
+ int _nolibc_main(int, char **, char **) __asm__ ("main");
+
+ /* initialize stack protector */
+ __stack_chk_init();
+
+ /*
+ * sp : argc <-- argument count, required by main()
+ * argv: argv[0] <-- argument vector, required by main()
+ * argv[1]
+ * ...
+ * argv[argc-1]
+ * null
+ * environ: environ[0] <-- environment variables, required by main() and getenv()
+ * environ[1]
+ * ...
+ * null
+ * _auxv: _auxv[0] <-- auxiliary vector, required by getauxval()
+ * _auxv[1]
+ * ...
+ * null
+ */
+
+ /* assign argc and argv */
+ argc = *sp;
+ argv = (void *)(sp + 1);
+
+ /* find environ */
+ environ = envp = argv + argc + 1;
+
+ /* find _auxv */
+ for (auxv = (void *)envp; *auxv++;)
+ ;
+ _auxv = auxv;
+
+ /* go to application */
+ exit(_nolibc_main(argc, argv, envp));
+}
+
+#endif /* _NOLIBC_CRT_H */
diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h
index 05a228a6ee78..1f8d821000ac 100644
--- a/tools/include/nolibc/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -13,11 +13,10 @@
* Syscalls are split into 3 levels:
* - The lower level is the arch-specific syscall() definition, consisting in
* assembly code in compound expressions. These are called my_syscall0() to
- * my_syscall6() depending on the number of arguments. The MIPS
- * implementation is limited to 5 arguments. All input arguments are cast
- * to a long stored in a register. These expressions always return the
- * syscall's return value as a signed long value which is often either a
- * pointer or the negated errno value.
+ * my_syscall6() depending on the number of arguments. All input arguments
+ * are castto a long stored in a register. These expressions always return
+ * the syscall's return value as a signed long value which is often either
+ * a pointer or the negated errno value.
*
* - The second level is mostly architecture-independent. It is made of
* static functions called sys_<name>() which rely on my_syscallN()
diff --git a/tools/include/nolibc/stackprotector.h b/tools/include/nolibc/stackprotector.h
index 88f7b2d098ff..13f1d0e60387 100644
--- a/tools/include/nolibc/stackprotector.h
+++ b/tools/include/nolibc/stackprotector.h
@@ -37,14 +37,15 @@ void __stack_chk_fail_local(void)
__attribute__((weak,section(".data.nolibc_stack_chk")))
uintptr_t __stack_chk_guard;
-__attribute__((weak,section(".text.nolibc_stack_chk"))) __no_stack_protector
-void __stack_chk_init(void)
+static __no_stack_protector void __stack_chk_init(void)
{
my_syscall3(__NR_getrandom, &__stack_chk_guard, sizeof(__stack_chk_guard), 0);
/* a bit more randomness in case getrandom() fails, ensure the guard is never 0 */
if (__stack_chk_guard != (uintptr_t) &__stack_chk_guard)
__stack_chk_guard ^= (uintptr_t) &__stack_chk_guard;
}
+#else /* !defined(_NOLIBC_STACKPROTECTOR) */
+static void __stack_chk_init(void) {}
#endif /* defined(_NOLIBC_STACKPROTECTOR) */
#endif /* _NOLIBC_STACKPROTECTOR_H */
diff --git a/tools/include/nolibc/stdint.h b/tools/include/nolibc/stdint.h
index 4b282435a59a..6665e272e213 100644
--- a/tools/include/nolibc/stdint.h
+++ b/tools/include/nolibc/stdint.h
@@ -15,7 +15,7 @@ typedef unsigned int uint32_t;
typedef signed int int32_t;
typedef unsigned long long uint64_t;
typedef signed long long int64_t;
-typedef unsigned long size_t;
+typedef __SIZE_TYPE__ size_t;
typedef signed long ssize_t;
typedef unsigned long uintptr_t;
typedef signed long intptr_t;
diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h
index 0eef91daf289..cae402c11e57 100644
--- a/tools/include/nolibc/stdio.h
+++ b/tools/include/nolibc/stdio.h
@@ -21,6 +21,11 @@
#define EOF (-1)
#endif
+/* Buffering mode used by setvbuf. */
+#define _IOFBF 0 /* Fully buffered. */
+#define _IOLBF 1 /* Line buffered. */
+#define _IONBF 2 /* No buffering. */
+
/* just define FILE as a non-empty type. The value of the pointer gives
* the FD: FILE=~fd for fd>=0 or NULL for fd<0. This way positive FILE
* are immediately identified as abnormal entries (i.e. possible copies
@@ -350,6 +355,28 @@ void perror(const char *msg)
fprintf(stderr, "%s%serrno=%d\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "", errno);
}
+static __attribute__((unused))
+int setvbuf(FILE *stream __attribute__((unused)),
+ char *buf __attribute__((unused)),
+ int mode,
+ size_t size __attribute__((unused)))
+{
+ /*
+ * nolibc does not support buffering so this is a nop. Just check mode
+ * is valid as required by the spec.
+ */
+ switch (mode) {
+ case _IOFBF:
+ case _IOLBF:
+ case _IONBF:
+ break;
+ default:
+ return EOF;
+ }
+
+ return 0;
+}
+
/* make sure to include all global symbols */
#include "nolibc.h"
diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
index 902162f80337..bacfd35c5156 100644
--- a/tools/include/nolibc/stdlib.h
+++ b/tools/include/nolibc/stdlib.h
@@ -83,11 +83,10 @@ void free(void *ptr)
* declared as a char **, and must be terminated by a NULL (it is recommended
* to set this variable to the "envp" argument of main()). If the requested
* environment variable exists its value is returned otherwise NULL is
- * returned. getenv() is forcefully inlined so that the reference to "environ"
- * will be dropped if unused, even at -O0.
+ * returned.
*/
static __attribute__((unused))
-char *_getenv(const char *name, char **environ)
+char *getenv(const char *name)
{
int idx, i;
@@ -102,13 +101,6 @@ char *_getenv(const char *name, char **environ)
return NULL;
}
-static __inline__ __attribute__((unused,always_inline))
-char *getenv(const char *name)
-{
- extern char **environ;
- return _getenv(name, environ);
-}
-
static __attribute__((unused))
unsigned long getauxval(unsigned long type)
{
diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
index 856249a11890..fdb6bd6c0e2f 100644
--- a/tools/include/nolibc/sys.h
+++ b/tools/include/nolibc/sys.h
@@ -21,7 +21,6 @@
#include <linux/auxvec.h>
#include <linux/fcntl.h> /* for O_* and AT_* */
#include <linux/stat.h> /* for statx() */
-#include <linux/reboot.h> /* for LINUX_REBOOT_* */
#include <linux/prctl.h>
#include "arch.h"
@@ -29,6 +28,22 @@
#include "types.h"
+/* Syscall return helper: takes the syscall value in argument and checks for an
+ * error in it. This may only be used with signed returns (int or long), but
+ * not with pointers. An error is any value < 0. When an error is encountered,
+ * -ret is set into errno and -1 is returned. Otherwise the returned value is
+ * passed as-is with its type preserved.
+ */
+
+#define __sysret(arg) \
+({ \
+ __typeof__(arg) __sysret_arg = (arg); \
+ (__sysret_arg < 0) /* error ? */ \
+ ? (({ SET_ERRNO(-__sysret_arg); }), -1) /* ret -1 with errno = -arg */ \
+ : __sysret_arg; /* return original value */ \
+})
+
+
/* Functions in this file only describe syscalls. They're declared static so
* that the compiler usually decides to inline them while still being allowed
* to pass a pointer to one of their instances. Each syscall exists in two
@@ -78,10 +93,10 @@ int brk(void *addr)
static __attribute__((unused))
void *sbrk(intptr_t inc)
{
- void *ret;
-
/* first call to find current end */
- if ((ret = sys_brk(0)) && (sys_brk(ret + inc) == ret + inc))
+ void *ret = sys_brk(0);
+
+ if (ret && sys_brk(ret + inc) == ret + inc)
return ret + inc;
SET_ERRNO(ENOMEM);
@@ -102,13 +117,7 @@ int sys_chdir(const char *path)
static __attribute__((unused))
int chdir(const char *path)
{
- int ret = sys_chdir(path);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_chdir(path));
}
@@ -124,20 +133,14 @@ int sys_chmod(const char *path, mode_t mode)
#elif defined(__NR_chmod)
return my_syscall2(__NR_chmod, path, mode);
#else
-#error Neither __NR_fchmodat nor __NR_chmod defined, cannot implement sys_chmod()
+ return -ENOSYS;
#endif
}
static __attribute__((unused))
int chmod(const char *path, mode_t mode)
{
- int ret = sys_chmod(path, mode);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_chmod(path, mode));
}
@@ -153,20 +156,14 @@ int sys_chown(const char *path, uid_t owner, gid_t group)
#elif defined(__NR_chown)
return my_syscall3(__NR_chown, path, owner, group);
#else
-#error Neither __NR_fchownat nor __NR_chown defined, cannot implement sys_chown()
+ return -ENOSYS;
#endif
}
static __attribute__((unused))
int chown(const char *path, uid_t owner, gid_t group)
{
- int ret = sys_chown(path, owner, group);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_chown(path, owner, group));
}
@@ -183,13 +180,7 @@ int sys_chroot(const char *path)
static __attribute__((unused))
int chroot(const char *path)
{
- int ret = sys_chroot(path);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_chroot(path));
}
@@ -206,13 +197,7 @@ int sys_close(int fd)
static __attribute__((unused))
int close(int fd)
{
- int ret = sys_close(fd);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_close(fd));
}
@@ -229,13 +214,7 @@ int sys_dup(int fd)
static __attribute__((unused))
int dup(int fd)
{
- int ret = sys_dup(fd);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_dup(fd));
}
@@ -251,20 +230,14 @@ int sys_dup2(int old, int new)
#elif defined(__NR_dup2)
return my_syscall2(__NR_dup2, old, new);
#else
-#error Neither __NR_dup3 nor __NR_dup2 defined, cannot implement sys_dup2()
+ return -ENOSYS;
#endif
}
static __attribute__((unused))
int dup2(int old, int new)
{
- int ret = sys_dup2(old, new);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_dup2(old, new));
}
@@ -282,13 +255,7 @@ int sys_dup3(int old, int new, int flags)
static __attribute__((unused))
int dup3(int old, int new, int flags)
{
- int ret = sys_dup3(old, new, flags);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_dup3(old, new, flags));
}
#endif
@@ -306,13 +273,7 @@ int sys_execve(const char *filename, char *const argv[], char *const envp[])
static __attribute__((unused))
int execve(const char *filename, char *const argv[], char *const envp[])
{
- int ret = sys_execve(filename, argv, envp);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_execve(filename, argv, envp));
}
@@ -351,7 +312,7 @@ pid_t sys_fork(void)
#elif defined(__NR_fork)
return my_syscall0(__NR_fork);
#else
-#error Neither __NR_clone nor __NR_fork defined, cannot implement sys_fork()
+ return -ENOSYS;
#endif
}
#endif
@@ -359,13 +320,7 @@ pid_t sys_fork(void)
static __attribute__((unused))
pid_t fork(void)
{
- pid_t ret = sys_fork();
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_fork());
}
@@ -382,13 +337,7 @@ int sys_fsync(int fd)
static __attribute__((unused))
int fsync(int fd)
{
- int ret = sys_fsync(fd);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_fsync(fd));
}
@@ -405,13 +354,7 @@ int sys_getdents64(int fd, struct linux_dirent64 *dirp, int count)
static __attribute__((unused))
int getdents64(int fd, struct linux_dirent64 *dirp, int count)
{
- int ret = sys_getdents64(fd, dirp, count);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_getdents64(fd, dirp, count));
}
@@ -449,13 +392,7 @@ pid_t sys_getpgid(pid_t pid)
static __attribute__((unused))
pid_t getpgid(pid_t pid)
{
- pid_t ret = sys_getpgid(pid);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_getpgid(pid));
}
@@ -529,21 +466,13 @@ pid_t gettid(void)
static unsigned long getauxval(unsigned long key);
/*
- * long getpagesize(void);
+ * int getpagesize(void);
*/
static __attribute__((unused))
-long getpagesize(void)
+int getpagesize(void)
{
- long ret;
-
- ret = getauxval(AT_PAGESZ);
- if (!ret) {
- SET_ERRNO(ENOENT);
- return -1;
- }
-
- return ret;
+ return __sysret((int)getauxval(AT_PAGESZ) ?: -ENOENT);
}
@@ -554,19 +483,17 @@ long getpagesize(void)
static __attribute__((unused))
int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
{
+#ifdef __NR_gettimeofday
return my_syscall2(__NR_gettimeofday, tv, tz);
+#else
+ return -ENOSYS;
+#endif
}
static __attribute__((unused))
int gettimeofday(struct timeval *tv, struct timezone *tz)
{
- int ret = sys_gettimeofday(tv, tz);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_gettimeofday(tv, tz));
}
@@ -604,13 +531,7 @@ int sys_ioctl(int fd, unsigned long req, void *value)
static __attribute__((unused))
int ioctl(int fd, unsigned long req, void *value)
{
- int ret = sys_ioctl(fd, req, value);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_ioctl(fd, req, value));
}
/*
@@ -626,13 +547,7 @@ int sys_kill(pid_t pid, int signal)
static __attribute__((unused))
int kill(pid_t pid, int signal)
{
- int ret = sys_kill(pid, signal);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_kill(pid, signal));
}
@@ -648,20 +563,14 @@ int sys_link(const char *old, const char *new)
#elif defined(__NR_link)
return my_syscall2(__NR_link, old, new);
#else
-#error Neither __NR_linkat nor __NR_link defined, cannot implement sys_link()
+ return -ENOSYS;
#endif
}
static __attribute__((unused))
int link(const char *old, const char *new)
{
- int ret = sys_link(old, new);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_link(old, new));
}
@@ -672,19 +581,17 @@ int link(const char *old, const char *new)
static __attribute__((unused))
off_t sys_lseek(int fd, off_t offset, int whence)
{
+#ifdef __NR_lseek
return my_syscall3(__NR_lseek, fd, offset, whence);
+#else
+ return -ENOSYS;
+#endif
}
static __attribute__((unused))
off_t lseek(int fd, off_t offset, int whence)
{
- off_t ret = sys_lseek(fd, offset, whence);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_lseek(fd, offset, whence));
}
@@ -700,20 +607,36 @@ int sys_mkdir(const char *path, mode_t mode)
#elif defined(__NR_mkdir)
return my_syscall2(__NR_mkdir, path, mode);
#else
-#error Neither __NR_mkdirat nor __NR_mkdir defined, cannot implement sys_mkdir()
+ return -ENOSYS;
#endif
}
static __attribute__((unused))
int mkdir(const char *path, mode_t mode)
{
- int ret = sys_mkdir(path, mode);
+ return __sysret(sys_mkdir(path, mode));
+}
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+/*
+ * int rmdir(const char *path);
+ */
+
+static __attribute__((unused))
+int sys_rmdir(const char *path)
+{
+#ifdef __NR_rmdir
+ return my_syscall1(__NR_rmdir, path);
+#elif defined(__NR_unlinkat)
+ return my_syscall3(__NR_unlinkat, AT_FDCWD, path, AT_REMOVEDIR);
+#else
+ return -ENOSYS;
+#endif
+}
+
+static __attribute__((unused))
+int rmdir(const char *path)
+{
+ return __sysret(sys_rmdir(path));
}
@@ -729,42 +652,21 @@ long sys_mknod(const char *path, mode_t mode, dev_t dev)
#elif defined(__NR_mknod)
return my_syscall3(__NR_mknod, path, mode, dev);
#else
-#error Neither __NR_mknodat nor __NR_mknod defined, cannot implement sys_mknod()
+ return -ENOSYS;
#endif
}
static __attribute__((unused))
int mknod(const char *path, mode_t mode, dev_t dev)
{
- int ret = sys_mknod(path, mode, dev);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_mknod(path, mode, dev));
}
-#ifndef MAP_SHARED
-#define MAP_SHARED 0x01 /* Share changes */
-#define MAP_PRIVATE 0x02 /* Changes are private */
-#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
-#endif
-
-#ifndef MAP_FAILED
-#define MAP_FAILED ((void *)-1)
-#endif
-
#ifndef sys_mmap
static __attribute__((unused))
void *sys_mmap(void *addr, size_t length, int prot, int flags, int fd,
off_t offset)
{
-#ifndef my_syscall6
- /* Function not implemented. */
- return (void *)-ENOSYS;
-#else
-
int n;
#if defined(__NR_mmap2)
@@ -775,10 +677,14 @@ void *sys_mmap(void *addr, size_t length, int prot, int flags, int fd,
#endif
return (void *)my_syscall6(n, addr, length, prot, flags, fd, offset);
-#endif
}
#endif
+/* Note that on Linux, MAP_FAILED is -1 so we can use the generic __sysret()
+ * which returns -1 upon error and still satisfy user land that checks for
+ * MAP_FAILED.
+ */
+
static __attribute__((unused))
void *mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset)
{
@@ -800,13 +706,7 @@ int sys_munmap(void *addr, size_t length)
static __attribute__((unused))
int munmap(void *addr, size_t length)
{
- int ret = sys_munmap(addr, length);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_munmap(addr, length));
}
/*
@@ -826,13 +726,7 @@ int mount(const char *src, const char *tgt,
const char *fst, unsigned long flags,
const void *data)
{
- int ret = sys_mount(src, tgt, fst, flags, data);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_mount(src, tgt, fst, flags, data));
}
@@ -848,7 +742,7 @@ int sys_open(const char *path, int flags, mode_t mode)
#elif defined(__NR_open)
return my_syscall3(__NR_open, path, flags, mode);
#else
-#error Neither __NR_openat nor __NR_open defined, cannot implement sys_open()
+ return -ENOSYS;
#endif
}
@@ -856,7 +750,6 @@ static __attribute__((unused))
int open(const char *path, int flags, ...)
{
mode_t mode = 0;
- int ret;
if (flags & O_CREAT) {
va_list args;
@@ -866,13 +759,31 @@ int open(const char *path, int flags, ...)
va_end(args);
}
- ret = sys_open(path, flags, mode);
+ return __sysret(sys_open(path, flags, mode));
+}
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+
+/*
+ * int pipe2(int pipefd[2], int flags);
+ * int pipe(int pipefd[2]);
+ */
+
+static __attribute__((unused))
+int sys_pipe2(int pipefd[2], int flags)
+{
+ return my_syscall2(__NR_pipe2, pipefd, flags);
+}
+
+static __attribute__((unused))
+int pipe2(int pipefd[2], int flags)
+{
+ return __sysret(sys_pipe2(pipefd, flags));
+}
+
+static __attribute__((unused))
+int pipe(int pipefd[2])
+{
+ return pipe2(pipefd, 0);
}
@@ -892,13 +803,7 @@ static __attribute__((unused))
int prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
- int ret = sys_prctl(option, arg2, arg3, arg4, arg5);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_prctl(option, arg2, arg3, arg4, arg5));
}
@@ -915,13 +820,7 @@ int sys_pivot_root(const char *new, const char *old)
static __attribute__((unused))
int pivot_root(const char *new, const char *old)
{
- int ret = sys_pivot_root(new, old);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_pivot_root(new, old));
}
@@ -943,20 +842,14 @@ int sys_poll(struct pollfd *fds, int nfds, int timeout)
#elif defined(__NR_poll)
return my_syscall3(__NR_poll, fds, nfds, timeout);
#else
-#error Neither __NR_ppoll nor __NR_poll defined, cannot implement sys_poll()
+ return -ENOSYS;
#endif
}
static __attribute__((unused))
int poll(struct pollfd *fds, int nfds, int timeout)
{
- int ret = sys_poll(fds, nfds, timeout);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_poll(fds, nfds, timeout));
}
@@ -973,13 +866,7 @@ ssize_t sys_read(int fd, void *buf, size_t count)
static __attribute__((unused))
ssize_t read(int fd, void *buf, size_t count)
{
- ssize_t ret = sys_read(fd, buf, count);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_read(fd, buf, count));
}
@@ -997,13 +884,7 @@ ssize_t sys_reboot(int magic1, int magic2, int cmd, void *arg)
static __attribute__((unused))
int reboot(int cmd)
{
- int ret = sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, 0);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, 0));
}
@@ -1020,13 +901,7 @@ int sys_sched_yield(void)
static __attribute__((unused))
int sched_yield(void)
{
- int ret = sys_sched_yield();
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_sched_yield());
}
@@ -1059,20 +934,14 @@ int sys_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeva
#endif
return my_syscall5(__NR__newselect, nfds, rfds, wfds, efds, timeout);
#else
-#error None of __NR_select, __NR_pselect6, nor __NR__newselect defined, cannot implement sys_select()
+ return -ENOSYS;
#endif
}
static __attribute__((unused))
int select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
{
- int ret = sys_select(nfds, rfds, wfds, efds, timeout);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_select(nfds, rfds, wfds, efds, timeout));
}
@@ -1089,13 +958,7 @@ int sys_setpgid(pid_t pid, pid_t pgid)
static __attribute__((unused))
int setpgid(pid_t pid, pid_t pgid)
{
- int ret = sys_setpgid(pid, pgid);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_setpgid(pid, pgid));
}
@@ -1112,55 +975,41 @@ pid_t sys_setsid(void)
static __attribute__((unused))
pid_t setsid(void)
{
- pid_t ret = sys_setsid();
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_setsid());
}
-#if defined(__NR_statx)
/*
* int statx(int fd, const char *path, int flags, unsigned int mask, struct statx *buf);
+ * int stat(const char *path, struct stat *buf);
*/
static __attribute__((unused))
int sys_statx(int fd, const char *path, int flags, unsigned int mask, struct statx *buf)
{
+#ifdef __NR_statx
return my_syscall5(__NR_statx, fd, path, flags, mask, buf);
+#else
+ return -ENOSYS;
+#endif
}
static __attribute__((unused))
int statx(int fd, const char *path, int flags, unsigned int mask, struct statx *buf)
{
- int ret = sys_statx(fd, path, flags, mask, buf);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_statx(fd, path, flags, mask, buf));
}
-#endif
-/*
- * int stat(const char *path, struct stat *buf);
- * Warning: the struct stat's layout is arch-dependent.
- */
-#if defined(__NR_statx) && !defined(__NR_newfstatat) && !defined(__NR_stat)
-/*
- * Maybe we can just use statx() when available for all architectures?
- */
static __attribute__((unused))
-int sys_stat(const char *path, struct stat *buf)
+int stat(const char *path, struct stat *buf)
{
struct statx statx;
long ret;
- ret = sys_statx(AT_FDCWD, path, AT_NO_AUTOMOUNT, STATX_BASIC_STATS, &statx);
+ ret = __sysret(sys_statx(AT_FDCWD, path, AT_NO_AUTOMOUNT, STATX_BASIC_STATS, &statx));
+ if (ret == -1)
+ return ret;
+
buf->st_dev = ((statx.stx_dev_minor & 0xff)
| (statx.stx_dev_major << 8)
| ((statx.stx_dev_minor & ~0xff) << 12));
@@ -1181,53 +1030,8 @@ int sys_stat(const char *path, struct stat *buf)
buf->st_mtim.tv_nsec = statx.stx_mtime.tv_nsec;
buf->st_ctim.tv_sec = statx.stx_ctime.tv_sec;
buf->st_ctim.tv_nsec = statx.stx_ctime.tv_nsec;
- return ret;
-}
-#else
-static __attribute__((unused))
-int sys_stat(const char *path, struct stat *buf)
-{
- struct sys_stat_struct stat;
- long ret;
-
-#ifdef __NR_newfstatat
- /* only solution for arm64 */
- ret = my_syscall4(__NR_newfstatat, AT_FDCWD, path, &stat, 0);
-#elif defined(__NR_stat)
- ret = my_syscall2(__NR_stat, path, &stat);
-#else
-#error Neither __NR_newfstatat nor __NR_stat defined, cannot implement sys_stat()
-#endif
- buf->st_dev = stat.st_dev;
- buf->st_ino = stat.st_ino;
- buf->st_mode = stat.st_mode;
- buf->st_nlink = stat.st_nlink;
- buf->st_uid = stat.st_uid;
- buf->st_gid = stat.st_gid;
- buf->st_rdev = stat.st_rdev;
- buf->st_size = stat.st_size;
- buf->st_blksize = stat.st_blksize;
- buf->st_blocks = stat.st_blocks;
- buf->st_atim.tv_sec = stat.st_atime;
- buf->st_atim.tv_nsec = stat.st_atime_nsec;
- buf->st_mtim.tv_sec = stat.st_mtime;
- buf->st_mtim.tv_nsec = stat.st_mtime_nsec;
- buf->st_ctim.tv_sec = stat.st_ctime;
- buf->st_ctim.tv_nsec = stat.st_ctime_nsec;
- return ret;
-}
-#endif
-static __attribute__((unused))
-int stat(const char *path, struct stat *buf)
-{
- int ret = sys_stat(path, buf);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return 0;
}
@@ -1243,20 +1047,14 @@ int sys_symlink(const char *old, const char *new)
#elif defined(__NR_symlink)
return my_syscall2(__NR_symlink, old, new);
#else
-#error Neither __NR_symlinkat nor __NR_symlink defined, cannot implement sys_symlink()
+ return -ENOSYS;
#endif
}
static __attribute__((unused))
int symlink(const char *old, const char *new)
{
- int ret = sys_symlink(old, new);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_symlink(old, new));
}
@@ -1290,13 +1088,7 @@ int sys_umount2(const char *path, int flags)
static __attribute__((unused))
int umount2(const char *path, int flags)
{
- int ret = sys_umount2(path, flags);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_umount2(path, flags));
}
@@ -1312,20 +1104,14 @@ int sys_unlink(const char *path)
#elif defined(__NR_unlink)
return my_syscall1(__NR_unlink, path);
#else
-#error Neither __NR_unlinkat nor __NR_unlink defined, cannot implement sys_unlink()
+ return -ENOSYS;
#endif
}
static __attribute__((unused))
int unlink(const char *path)
{
- int ret = sys_unlink(path);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_unlink(path));
}
@@ -1338,44 +1124,30 @@ int unlink(const char *path)
static __attribute__((unused))
pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage)
{
+#ifdef __NR_wait4
return my_syscall4(__NR_wait4, pid, status, options, rusage);
+#else
+ return -ENOSYS;
+#endif
}
static __attribute__((unused))
pid_t wait(int *status)
{
- pid_t ret = sys_wait4(-1, status, 0, NULL);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_wait4(-1, status, 0, NULL));
}
static __attribute__((unused))
pid_t wait4(pid_t pid, int *status, int options, struct rusage *rusage)
{
- pid_t ret = sys_wait4(pid, status, options, rusage);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_wait4(pid, status, options, rusage));
}
static __attribute__((unused))
pid_t waitpid(pid_t pid, int *status, int options)
{
- pid_t ret = sys_wait4(pid, status, options, NULL);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_wait4(pid, status, options, NULL));
}
@@ -1392,13 +1164,7 @@ ssize_t sys_write(int fd, const void *buf, size_t count)
static __attribute__((unused))
ssize_t write(int fd, const void *buf, size_t count)
{
- ssize_t ret = sys_write(fd, buf, count);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_write(fd, buf, count));
}
@@ -1415,13 +1181,7 @@ int sys_memfd_create(const char *name, unsigned int flags)
static __attribute__((unused))
int memfd_create(const char *name, unsigned int flags)
{
- ssize_t ret = sys_memfd_create(name, flags);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
+ return __sysret(sys_memfd_create(name, flags));
}
/* make sure to include all global symbols */
diff --git a/tools/include/nolibc/types.h b/tools/include/nolibc/types.h
index f96e28bff4ba..8cfc4c860fa4 100644
--- a/tools/include/nolibc/types.h
+++ b/tools/include/nolibc/types.h
@@ -8,13 +8,15 @@
#define _NOLIBC_TYPES_H
#include "std.h"
-#include <linux/time.h>
+#include <linux/mman.h>
+#include <linux/reboot.h> /* for LINUX_REBOOT_* */
#include <linux/stat.h>
+#include <linux/time.h>
/* Only the generic macros and types may be defined here. The arch-specific
- * ones such as the O_RDONLY and related macros used by fcntl() and open(), or
- * the layout of sys_stat_struct must not be defined here.
+ * ones such as the O_RDONLY and related macros used by fcntl() and open()
+ * must not be defined here.
*/
/* stat flags (WARNING, octal here). We need to check for an existing
@@ -81,11 +83,25 @@
#define MAXPATHLEN (PATH_MAX)
#endif
+/* flags for mmap */
+#ifndef MAP_FAILED
+#define MAP_FAILED ((void *)-1)
+#endif
+
/* whence values for lseek() */
#define SEEK_SET 0
#define SEEK_CUR 1
#define SEEK_END 2
+/* flags for reboot */
+#define RB_AUTOBOOT LINUX_REBOOT_CMD_RESTART
+#define RB_HALT_SYSTEM LINUX_REBOOT_CMD_HALT
+#define RB_ENABLE_CAD LINUX_REBOOT_CMD_CAD_ON
+#define RB_DISABLE_CAD LINUX_REBOOT_CMD_CAD_OFF
+#define RB_POWER_OFF LINUX_REBOOT_CMD_POWER_OFF
+#define RB_SW_SUSPEND LINUX_REBOOT_CMD_SW_SUSPEND
+#define RB_KEXEC LINUX_REBOOT_CMD_KEXEC
+
/* Macros used on waitpid()'s return status */
#define WEXITSTATUS(status) (((status) & 0xff00) >> 8)
#define WIFEXITED(status) (((status) & 0x7f) == 0)
diff --git a/tools/include/nolibc/unistd.h b/tools/include/nolibc/unistd.h
index 0e832e10a0b2..e38f3660c051 100644
--- a/tools/include/nolibc/unistd.h
+++ b/tools/include/nolibc/unistd.h
@@ -56,18 +56,9 @@ int tcsetpgrp(int fd, pid_t pid)
return ioctl(fd, TIOCSPGRP, &pid);
}
-#define _syscall(N, ...) \
-({ \
- long _ret = my_syscall##N(__VA_ARGS__); \
- if (_ret < 0) { \
- SET_ERRNO(-_ret); \
- _ret = -1; \
- } \
- _ret; \
-})
-
-#define _syscall_narg(...) __syscall_narg(__VA_ARGS__, 6, 5, 4, 3, 2, 1, 0)
#define __syscall_narg(_0, _1, _2, _3, _4, _5, _6, N, ...) N
+#define _syscall_narg(...) __syscall_narg(__VA_ARGS__, 6, 5, 4, 3, 2, 1, 0)
+#define _syscall(N, ...) __sysret(my_syscall##N(__VA_ARGS__))
#define _syscall_n(N, ...) _syscall(N, __VA_ARGS__)
#define syscall(...) _syscall_n(_syscall_narg(__VA_ARGS__), ##__VA_ARGS__)
diff --git a/tools/net/ynl/cli.py b/tools/net/ynl/cli.py
index ffaa8038aa8c..564ecf07cd2c 100755
--- a/tools/net/ynl/cli.py
+++ b/tools/net/ynl/cli.py
@@ -6,7 +6,7 @@ import json
import pprint
import time
-from lib import YnlFamily
+from lib import YnlFamily, Netlink
def main():
@@ -19,6 +19,14 @@ def main():
parser.add_argument('--dump', dest='dump', type=str)
parser.add_argument('--sleep', dest='sleep', type=int)
parser.add_argument('--subscribe', dest='ntf', type=str)
+ parser.add_argument('--replace', dest='flags', action='append_const',
+ const=Netlink.NLM_F_REPLACE)
+ parser.add_argument('--excl', dest='flags', action='append_const',
+ const=Netlink.NLM_F_EXCL)
+ parser.add_argument('--create', dest='flags', action='append_const',
+ const=Netlink.NLM_F_CREATE)
+ parser.add_argument('--append', dest='flags', action='append_const',
+ const=Netlink.NLM_F_APPEND)
args = parser.parse_args()
if args.no_schema:
@@ -37,7 +45,7 @@ def main():
time.sleep(args.sleep)
if args.do:
- reply = ynl.do(args.do, attrs)
+ reply = ynl.do(args.do, attrs, args.flags)
pprint.PrettyPrinter().pprint(reply)
if args.dump:
reply = ynl.dump(args.dump, attrs)
diff --git a/tools/net/ynl/lib/__init__.py b/tools/net/ynl/lib/__init__.py
index 4b3797fe784b..f7eaa07783e7 100644
--- a/tools/net/ynl/lib/__init__.py
+++ b/tools/net/ynl/lib/__init__.py
@@ -2,7 +2,7 @@
from .nlspec import SpecAttr, SpecAttrSet, SpecEnumEntry, SpecEnumSet, \
SpecFamily, SpecOperation
-from .ynl import YnlFamily
+from .ynl import YnlFamily, Netlink
__all__ = ["SpecAttr", "SpecAttrSet", "SpecEnumEntry", "SpecEnumSet",
- "SpecFamily", "SpecOperation", "YnlFamily"]
+ "SpecFamily", "SpecOperation", "YnlFamily", "Netlink"]
diff --git a/tools/net/ynl/lib/nlspec.py b/tools/net/ynl/lib/nlspec.py
index 0ff0d18666b2..37bcb4d8b37b 100644
--- a/tools/net/ynl/lib/nlspec.py
+++ b/tools/net/ynl/lib/nlspec.py
@@ -322,6 +322,26 @@ class SpecOperation(SpecElement):
self.attr_set = self.family.attr_sets[attr_set_name]
+class SpecMcastGroup(SpecElement):
+ """Netlink Multicast Group
+
+ Information about a multicast group.
+
+ Value is only used for classic netlink families that use the
+ netlink-raw schema. Genetlink families use dynamic ID allocation
+ where the ids of multicast groups get resolved at runtime. Value
+ will be None for genetlink families.
+
+ Attributes:
+ name name of the mulitcast group
+ value integer id of this multicast group for netlink-raw or None
+ yaml raw spec as loaded from the spec file
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+ self.value = self.yaml.get('value')
+
+
class SpecFamily(SpecElement):
""" Netlink Family Spec class.
@@ -343,6 +363,7 @@ class SpecFamily(SpecElement):
ntfs dict of all async events
consts dict of all constants/enums
fixed_header string, optional name of family default fixed header struct
+ mcast_groups dict of all multicast groups (index by name)
"""
def __init__(self, spec_path, schema_path=None, exclude_ops=None):
with open(spec_path, "r") as stream:
@@ -384,6 +405,7 @@ class SpecFamily(SpecElement):
self.ops = collections.OrderedDict()
self.ntfs = collections.OrderedDict()
self.consts = collections.OrderedDict()
+ self.mcast_groups = collections.OrderedDict()
last_exception = None
while len(self._resolution_list) > 0:
@@ -416,6 +438,9 @@ class SpecFamily(SpecElement):
def new_operation(self, elem, req_val, rsp_val):
return SpecOperation(self, elem, req_val, rsp_val)
+ def new_mcast_group(self, elem):
+ return SpecMcastGroup(self, elem)
+
def add_unresolved(self, elem):
self._resolution_list.append(elem)
@@ -512,3 +537,9 @@ class SpecFamily(SpecElement):
self.ops[op.name] = op
elif op.is_async:
self.ntfs[op.name] = op
+
+ mcgs = self.yaml.get('mcast-groups')
+ if mcgs:
+ for elem in mcgs['list']:
+ mcg = self.new_mcast_group(elem)
+ self.mcast_groups[elem['name']] = mcg
diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
index fa4f1c28efc5..13c4b019a881 100644
--- a/tools/net/ynl/lib/ynl.py
+++ b/tools/net/ynl/lib/ynl.py
@@ -25,6 +25,7 @@ class Netlink:
NETLINK_ADD_MEMBERSHIP = 1
NETLINK_CAP_ACK = 10
NETLINK_EXT_ACK = 11
+ NETLINK_GET_STRICT_CHK = 12
# Netlink message
NLMSG_ERROR = 2
@@ -34,6 +35,10 @@ class Netlink:
NLM_F_ACK = 4
NLM_F_ROOT = 0x100
NLM_F_MATCH = 0x200
+
+ NLM_F_REPLACE = 0x100
+ NLM_F_EXCL = 0x200
+ NLM_F_CREATE = 0x400
NLM_F_APPEND = 0x800
NLM_F_CAPPED = 0x100
@@ -228,6 +233,9 @@ class NlMsg:
desc += f" ({spec['doc']})"
self.extack['miss-type'] = desc
+ def cmd(self):
+ return self.nl_type
+
def __repr__(self):
msg = f"nl_len = {self.nl_len} ({len(self.raw)}) nl_flags = 0x{self.nl_flags:x} nl_type = {self.nl_type}\n"
if self.error:
@@ -293,7 +301,7 @@ def _genl_load_families():
gm = GenlMsg(nl_msg)
fam = dict()
- for attr in gm.raw_attrs:
+ for attr in NlAttrs(gm.raw):
if attr.type == Netlink.CTRL_ATTR_FAMILY_ID:
fam['id'] = attr.as_scalar('u16')
elif attr.type == Netlink.CTRL_ATTR_FAMILY_NAME:
@@ -317,23 +325,13 @@ def _genl_load_families():
class GenlMsg:
- def __init__(self, nl_msg, fixed_header_members=[]):
+ def __init__(self, nl_msg):
self.nl = nl_msg
+ self.genl_cmd, self.genl_version, _ = struct.unpack_from("BBH", nl_msg.raw, 0)
+ self.raw = nl_msg.raw[4:]
- self.hdr = nl_msg.raw[0:4]
- offset = 4
-
- self.genl_cmd, self.genl_version, _ = struct.unpack("BBH", self.hdr)
-
- self.fixed_header_attrs = dict()
- for m in fixed_header_members:
- format = NlAttr.get_format(m.type, m.byte_order)
- decoded = format.unpack_from(nl_msg.raw, offset)
- offset += format.size
- self.fixed_header_attrs[m.name] = decoded[0]
-
- self.raw = nl_msg.raw[offset:]
- self.raw_attrs = NlAttrs(self.raw)
+ def cmd(self):
+ return self.genl_cmd
def __repr__(self):
msg = repr(self.nl)
@@ -343,9 +341,41 @@ class GenlMsg:
return msg
-class GenlFamily:
- def __init__(self, family_name):
+class NetlinkProtocol:
+ def __init__(self, family_name, proto_num):
self.family_name = family_name
+ self.proto_num = proto_num
+
+ def _message(self, nl_type, nl_flags, seq=None):
+ if seq is None:
+ seq = random.randint(1, 1024)
+ nlmsg = struct.pack("HHII", nl_type, nl_flags, seq, 0)
+ return nlmsg
+
+ def message(self, flags, command, version, seq=None):
+ return self._message(command, flags, seq)
+
+ def _decode(self, nl_msg):
+ return nl_msg
+
+ def decode(self, ynl, nl_msg):
+ msg = self._decode(nl_msg)
+ fixed_header_size = 0
+ if ynl:
+ op = ynl.rsp_by_value[msg.cmd()]
+ fixed_header_size = ynl._fixed_header_size(op)
+ msg.raw_attrs = NlAttrs(msg.raw[fixed_header_size:])
+ return msg
+
+ def get_mcast_id(self, mcast_name, mcast_groups):
+ if mcast_name not in mcast_groups:
+ raise Exception(f'Multicast group "{mcast_name}" not present in the spec')
+ return mcast_groups[mcast_name].value
+
+
+class GenlProtocol(NetlinkProtocol):
+ def __init__(self, family_name):
+ super().__init__(family_name, Netlink.NETLINK_GENERIC)
global genl_family_name_to_id
if genl_family_name_to_id is None:
@@ -354,6 +384,19 @@ class GenlFamily:
self.genl_family = genl_family_name_to_id[family_name]
self.family_id = genl_family_name_to_id[family_name]['id']
+ def message(self, flags, command, version, seq=None):
+ nlmsg = self._message(self.family_id, flags, seq)
+ genlmsg = struct.pack("BBH", command, version, 0)
+ return nlmsg + genlmsg
+
+ def _decode(self, nl_msg):
+ return GenlMsg(nl_msg)
+
+ def get_mcast_id(self, mcast_name, mcast_groups):
+ if mcast_name not in self.genl_family['mcast']:
+ raise Exception(f'Multicast group "{mcast_name}" not present in the family')
+ return self.genl_family['mcast'][mcast_name]
+
#
# YNL implementation details.
@@ -366,9 +409,19 @@ class YnlFamily(SpecFamily):
self.include_raw = False
- self.sock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, Netlink.NETLINK_GENERIC)
+ try:
+ if self.proto == "netlink-raw":
+ self.nlproto = NetlinkProtocol(self.yaml['name'],
+ self.yaml['protonum'])
+ else:
+ self.nlproto = GenlProtocol(self.yaml['name'])
+ except KeyError:
+ raise Exception(f"Family '{self.yaml['name']}' not supported by the kernel")
+
+ self.sock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, self.nlproto.proto_num)
self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1)
self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_EXT_ACK, 1)
+ self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_GET_STRICT_CHK, 1)
self.async_msg_ids = set()
self.async_msg_queue = []
@@ -381,18 +434,12 @@ class YnlFamily(SpecFamily):
bound_f = functools.partial(self._op, op_name)
setattr(self, op.ident_name, bound_f)
- try:
- self.family = GenlFamily(self.yaml['name'])
- except KeyError:
- raise Exception(f"Family '{self.yaml['name']}' not supported by the kernel")
def ntf_subscribe(self, mcast_name):
- if mcast_name not in self.family.genl_family['mcast']:
- raise Exception(f'Multicast group "{mcast_name}" not present in the family')
-
+ mcast_id = self.nlproto.get_mcast_id(mcast_name, self.mcast_groups)
self.sock.bind((0, 0))
self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_ADD_MEMBERSHIP,
- self.family.genl_family['mcast'][mcast_name])
+ mcast_id)
def _add_attr(self, space, name, value):
try:
@@ -454,6 +501,17 @@ class YnlFamily(SpecFamily):
decoded = NlAttr.formatted_string(decoded, attr_spec.display_hint)
return decoded
+ def _decode_array_nest(self, attr, attr_spec):
+ decoded = []
+ offset = 0
+ while offset < len(attr.raw):
+ item = NlAttr(attr.raw, offset)
+ offset += item.full_len
+
+ subattrs = self._decode(NlAttrs(item.raw), attr_spec['nested-attributes'])
+ decoded.append({ item.type: subattrs })
+ return decoded
+
def _decode(self, attrs, space):
attr_space = self.attr_sets[space]
rsp = dict()
@@ -473,6 +531,8 @@ class YnlFamily(SpecFamily):
decoded = True
elif attr_spec["type"] in NlAttr.type_formats:
decoded = attr.as_scalar(attr_spec['type'], attr_spec.byte_order)
+ elif attr_spec["type"] == 'array-nest':
+ decoded = self._decode_array_nest(attr, attr_spec)
else:
raise Exception(f'Unknown {attr_spec["type"]} with name {attr_spec["name"]}')
@@ -514,25 +574,53 @@ class YnlFamily(SpecFamily):
return None
- def _decode_extack(self, request, attr_space, extack):
+ def _decode_extack(self, request, op, extack):
if 'bad-attr-offs' not in extack:
return
- genl_req = GenlMsg(NlMsg(request, 0, attr_space=attr_space))
- path = self._decode_extack_path(genl_req.raw_attrs, attr_space,
- 20, extack['bad-attr-offs'])
+ msg = self.nlproto.decode(self, NlMsg(request, 0, op.attr_set))
+ offset = 20 + self._fixed_header_size(op)
+ path = self._decode_extack_path(msg.raw_attrs, op.attr_set, offset,
+ extack['bad-attr-offs'])
if path:
del extack['bad-attr-offs']
extack['bad-attr'] = path
- def handle_ntf(self, nl_msg, genl_msg):
+ def _fixed_header_size(self, op):
+ if op.fixed_header:
+ fixed_header_members = self.consts[op.fixed_header].members
+ size = 0
+ for m in fixed_header_members:
+ format = NlAttr.get_format(m.type, m.byte_order)
+ size += format.size
+ return size
+ else:
+ return 0
+
+ def _decode_fixed_header(self, msg, name):
+ fixed_header_members = self.consts[name].members
+ fixed_header_attrs = dict()
+ offset = 0
+ for m in fixed_header_members:
+ format = NlAttr.get_format(m.type, m.byte_order)
+ [ value ] = format.unpack_from(msg.raw, offset)
+ offset += format.size
+ if m.enum:
+ value = self._decode_enum(value, m)
+ fixed_header_attrs[m.name] = value
+ return fixed_header_attrs
+
+ def handle_ntf(self, decoded):
msg = dict()
if self.include_raw:
- msg['nlmsg'] = nl_msg
- msg['genlmsg'] = genl_msg
- op = self.rsp_by_value[genl_msg.genl_cmd]
+ msg['raw'] = decoded
+ op = self.rsp_by_value[decoded.cmd()]
+ attrs = self._decode(decoded.raw_attrs, op.attr_set.name)
+ if op.fixed_header:
+ attrs.update(self._decode_fixed_header(decoded, op.fixed_header))
+
msg['name'] = op['name']
- msg['msg'] = self._decode(genl_msg.raw_attrs, op.attr_set.name)
+ msg['msg'] = attrs
self.async_msg_queue.append(msg)
def check_ntf(self):
@@ -552,12 +640,12 @@ class YnlFamily(SpecFamily):
print("Netlink done while checking for ntf!?")
continue
- gm = GenlMsg(nl_msg)
- if gm.genl_cmd not in self.async_msg_ids:
- print("Unexpected msg id done while checking for ntf", gm)
+ decoded = self.nlproto.decode(self, nl_msg)
+ if decoded.cmd() not in self.async_msg_ids:
+ print("Unexpected msg id done while checking for ntf", decoded)
continue
- self.handle_ntf(nl_msg, gm)
+ self.handle_ntf(decoded)
def operation_do_attributes(self, name):
"""
@@ -570,15 +658,17 @@ class YnlFamily(SpecFamily):
return op['do']['request']['attributes'].copy()
- def _op(self, method, vals, dump=False):
+ def _op(self, method, vals, flags, dump=False):
op = self.ops[method]
nl_flags = Netlink.NLM_F_REQUEST | Netlink.NLM_F_ACK
+ for flag in flags or []:
+ nl_flags |= flag
if dump:
nl_flags |= Netlink.NLM_F_DUMP
req_seq = random.randint(1024, 65535)
- msg = _genl_msg(self.family.family_id, nl_flags, op.req_value, 1, req_seq)
+ msg = self.nlproto.message(nl_flags, op.req_value, 1, req_seq)
fixed_header_members = []
if op.fixed_header:
fixed_header_members = self.consts[op.fixed_header].members
@@ -599,7 +689,7 @@ class YnlFamily(SpecFamily):
nms = NlMsgs(reply, attr_space=op.attr_set)
for nl_msg in nms:
if nl_msg.extack:
- self._decode_extack(msg, op.attr_set, nl_msg.extack)
+ self._decode_extack(msg, op, nl_msg.extack)
if nl_msg.error:
raise NlError(nl_msg)
@@ -610,18 +700,20 @@ class YnlFamily(SpecFamily):
done = True
break
- gm = GenlMsg(nl_msg, fixed_header_members)
+ decoded = self.nlproto.decode(self, nl_msg)
+
# Check if this is a reply to our request
- if nl_msg.nl_seq != req_seq or gm.genl_cmd != op.rsp_value:
- if gm.genl_cmd in self.async_msg_ids:
- self.handle_ntf(nl_msg, gm)
+ if nl_msg.nl_seq != req_seq or decoded.cmd() != op.rsp_value:
+ if decoded.cmd() in self.async_msg_ids:
+ self.handle_ntf(decoded)
continue
else:
- print('Unexpected message: ' + repr(gm))
+ print('Unexpected message: ' + repr(decoded))
continue
- rsp_msg = self._decode(gm.raw_attrs, op.attr_set.name)
- rsp_msg.update(gm.fixed_header_attrs)
+ rsp_msg = self._decode(decoded.raw_attrs, op.attr_set.name)
+ if op.fixed_header:
+ rsp_msg.update(self._decode_fixed_header(decoded, op.fixed_header))
rsp.append(rsp_msg)
if not rsp:
@@ -630,8 +722,8 @@ class YnlFamily(SpecFamily):
return rsp[0]
return rsp
- def do(self, method, vals):
- return self._op(method, vals)
+ def do(self, method, vals, flags):
+ return self._op(method, vals, flags)
def dump(self, method, vals):
- return self._op(method, vals, dump=True)
+ return self._op(method, vals, [], dump=True)
diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build
index 0f158dc8139b..07bbc449329e 100644
--- a/tools/perf/bench/Build
+++ b/tools/perf/bench/Build
@@ -1,5 +1,6 @@
perf-y += sched-messaging.o
perf-y += sched-pipe.o
+perf-y += sched-seccomp-notify.o
perf-y += syscall.o
perf-y += mem-functions.o
perf-y += futex-hash.o
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index 0d2b65976212..a0625c77bea3 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -21,6 +21,7 @@ extern struct timeval bench__start, bench__end, bench__runtime;
int bench_numa(int argc, const char **argv);
int bench_sched_messaging(int argc, const char **argv);
int bench_sched_pipe(int argc, const char **argv);
+int bench_sched_seccomp_notify(int argc, const char **argv);
int bench_syscall_basic(int argc, const char **argv);
int bench_syscall_getpgid(int argc, const char **argv);
int bench_syscall_fork(int argc, const char **argv);
diff --git a/tools/perf/bench/sched-seccomp-notify.c b/tools/perf/bench/sched-seccomp-notify.c
new file mode 100644
index 000000000000..b04ebcde4036
--- /dev/null
+++ b/tools/perf/bench/sched-seccomp-notify.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <subcmd/parse-options.h>
+#include "bench.h"
+
+#include <uapi/linux/filter.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <linux/unistd.h>
+#include <sys/syscall.h>
+#include <sys/ioctl.h>
+#include <linux/time64.h>
+#include <linux/seccomp.h>
+#include <sys/prctl.h>
+
+#include <unistd.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <sys/wait.h>
+#include <string.h>
+#include <errno.h>
+#include <err.h>
+#include <inttypes.h>
+
+#define LOOPS_DEFAULT 1000000UL
+static uint64_t loops = LOOPS_DEFAULT;
+static bool sync_mode;
+
+static const struct option options[] = {
+ OPT_U64('l', "loop", &loops, "Specify number of loops"),
+ OPT_BOOLEAN('s', "sync-mode", &sync_mode,
+ "Enable the synchronious mode for seccomp notifications"),
+ OPT_END()
+};
+
+static const char * const bench_seccomp_usage[] = {
+ "perf bench sched secccomp-notify <options>",
+ NULL
+};
+
+static int seccomp(unsigned int op, unsigned int flags, void *args)
+{
+ return syscall(__NR_seccomp, op, flags, args);
+}
+
+static int user_notif_syscall(int nr, unsigned int flags)
+{
+ struct sock_filter filter[] = {
+ BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
+ offsetof(struct seccomp_data, nr)),
+ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, nr, 0, 1),
+ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_USER_NOTIF),
+ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
+ };
+
+ struct sock_fprog prog = {
+ .len = (unsigned short)ARRAY_SIZE(filter),
+ .filter = filter,
+ };
+
+ return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog);
+}
+
+#define USER_NOTIF_MAGIC INT_MAX
+static void user_notification_sync_loop(int listener)
+{
+ struct seccomp_notif_resp resp;
+ struct seccomp_notif req;
+ uint64_t nr;
+
+ for (nr = 0; nr < loops; nr++) {
+ memset(&req, 0, sizeof(req));
+ if (ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req))
+ err(EXIT_FAILURE, "SECCOMP_IOCTL_NOTIF_RECV failed");
+
+ if (req.data.nr != __NR_gettid)
+ errx(EXIT_FAILURE, "unexpected syscall: %d", req.data.nr);
+
+ resp.id = req.id;
+ resp.error = 0;
+ resp.val = USER_NOTIF_MAGIC;
+ resp.flags = 0;
+ if (ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp))
+ err(EXIT_FAILURE, "SECCOMP_IOCTL_NOTIF_SEND failed");
+ }
+}
+
+#ifndef SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP
+#define SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP (1UL << 0)
+#define SECCOMP_IOCTL_NOTIF_SET_FLAGS SECCOMP_IOW(4, __u64)
+#endif
+int bench_sched_seccomp_notify(int argc, const char **argv)
+{
+ struct timeval start, stop, diff;
+ unsigned long long result_usec = 0;
+ int status, listener;
+ pid_t pid;
+ long ret;
+
+ argc = parse_options(argc, argv, options, bench_seccomp_usage, 0);
+
+ gettimeofday(&start, NULL);
+
+ prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ listener = user_notif_syscall(__NR_gettid,
+ SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ if (listener < 0)
+ err(EXIT_FAILURE, "can't create a notification descriptor");
+
+ pid = fork();
+ if (pid < 0)
+ err(EXIT_FAILURE, "fork");
+ if (pid == 0) {
+ if (prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0))
+ err(EXIT_FAILURE, "can't set the parent death signal");
+ while (1) {
+ ret = syscall(__NR_gettid);
+ if (ret == USER_NOTIF_MAGIC)
+ continue;
+ break;
+ }
+ _exit(1);
+ }
+
+ if (sync_mode) {
+ if (ioctl(listener, SECCOMP_IOCTL_NOTIF_SET_FLAGS,
+ SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP, 0))
+ err(EXIT_FAILURE,
+ "can't set SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP");
+ }
+ user_notification_sync_loop(listener);
+
+ kill(pid, SIGKILL);
+ if (waitpid(pid, &status, 0) != pid)
+ err(EXIT_FAILURE, "waitpid(%d) failed", pid);
+ if (!WIFSIGNALED(status) || WTERMSIG(status) != SIGKILL)
+ errx(EXIT_FAILURE, "unexpected exit code: %d", status);
+
+ gettimeofday(&stop, NULL);
+ timersub(&stop, &start, &diff);
+
+ switch (bench_format) {
+ case BENCH_FORMAT_DEFAULT:
+ printf("# Executed %" PRIu64 " system calls\n\n",
+ loops);
+
+ result_usec = diff.tv_sec * USEC_PER_SEC;
+ result_usec += diff.tv_usec;
+
+ printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
+ (unsigned long) diff.tv_sec,
+ (unsigned long) (diff.tv_usec / USEC_PER_MSEC));
+
+ printf(" %14lf usecs/op\n",
+ (double)result_usec / (double)loops);
+ printf(" %14d ops/sec\n",
+ (int)((double)loops /
+ ((double)result_usec / (double)USEC_PER_SEC)));
+ break;
+
+ case BENCH_FORMAT_SIMPLE:
+ printf("%lu.%03lu\n",
+ (unsigned long) diff.tv_sec,
+ (unsigned long) (diff.tv_usec / USEC_PER_MSEC));
+ break;
+
+ default:
+ /* reaching here is something disaster */
+ fprintf(stderr, "Unknown format:%d\n", bench_format);
+ exit(1);
+ break;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index db435b791a09..5033e8bab276 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -47,6 +47,7 @@ static struct bench numa_benchmarks[] = {
static struct bench sched_benchmarks[] = {
{ "messaging", "Benchmark for scheduling and IPC", bench_sched_messaging },
{ "pipe", "Benchmark for pipe() between two processes", bench_sched_pipe },
+ { "seccomp-notify", "Benchmark for seccomp user notify", bench_sched_seccomp_notify},
{ "all", "Run all scheduler benchmarks", NULL },
{ NULL, NULL, NULL }
};
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 59bfa05dec5d..dc531805a570 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -53,7 +53,7 @@ DESTDIR ?=
VERSION:= $(shell ./utils/version-gen.sh)
LIB_MAJ= 0.0.1
-LIB_MIN= 0
+LIB_MIN= 1
PACKAGE = cpupower
PACKAGE_BUGREPORT = linux-pm@vger.kernel.org
diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c
index 3f7d0c0c5067..7a2ef691b20e 100644
--- a/tools/power/cpupower/lib/cpupower.c
+++ b/tools/power/cpupower/lib/cpupower.c
@@ -14,6 +14,13 @@
#include "cpupower.h"
#include "cpupower_intern.h"
+int is_valid_path(const char *path)
+{
+ if (access(path, F_OK) == -1)
+ return 0;
+ return 1;
+}
+
unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen)
{
ssize_t numread;
diff --git a/tools/power/cpupower/lib/cpupower_intern.h b/tools/power/cpupower/lib/cpupower_intern.h
index ac1112b956ec..5fdb8620d41b 100644
--- a/tools/power/cpupower/lib/cpupower_intern.h
+++ b/tools/power/cpupower/lib/cpupower_intern.h
@@ -7,5 +7,6 @@
#define SYSFS_PATH_MAX 255
+int is_valid_path(const char *path);
unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen);
unsigned int cpupower_write_sysfs(const char *path, char *buf, size_t buflen);
diff --git a/tools/power/cpupower/utils/cpuidle-set.c b/tools/power/cpupower/utils/cpuidle-set.c
index 46158928f9ad..a551d1d4ac51 100644
--- a/tools/power/cpupower/utils/cpuidle-set.c
+++ b/tools/power/cpupower/utils/cpuidle-set.c
@@ -41,14 +41,6 @@ int cmd_idle_set(int argc, char **argv)
cont = 0;
break;
case 'd':
- if (param) {
- param = -1;
- cont = 0;
- break;
- }
- param = ret;
- idlestate = atoi(optarg);
- break;
case 'e':
if (param) {
param = -1;
@@ -56,7 +48,13 @@ int cmd_idle_set(int argc, char **argv)
break;
}
param = ret;
- idlestate = atoi(optarg);
+ strtol(optarg, &endptr, 10);
+ if (*endptr != '\0') {
+ printf(_("Bad value: %s, Integer expected\n"), optarg);
+ exit(EXIT_FAILURE);
+ } else {
+ idlestate = atoi(optarg);
+ }
break;
case 'D':
if (param) {
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index 180d5ba877e6..0677b58374ab 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -18,6 +18,9 @@
static struct option set_opts[] = {
{"perf-bias", required_argument, NULL, 'b'},
+ {"epp", required_argument, NULL, 'e'},
+ {"amd-pstate-mode", required_argument, NULL, 'm'},
+ {"turbo-boost", required_argument, NULL, 't'},
{ },
};
@@ -37,11 +40,15 @@ int cmd_set(int argc, char **argv)
union {
struct {
int perf_bias:1;
+ int epp:1;
+ int mode:1;
+ int turbo_boost:1;
};
int params;
} params;
- int perf_bias = 0;
+ int perf_bias = 0, turbo_boost = 1;
int ret = 0;
+ char epp[30], mode[20];
ret = uname(&uts);
if (!ret && (!strcmp(uts.machine, "ppc64le") ||
@@ -55,7 +62,7 @@ int cmd_set(int argc, char **argv)
params.params = 0;
/* parameter parsing */
- while ((ret = getopt_long(argc, argv, "b:",
+ while ((ret = getopt_long(argc, argv, "b:e:m:",
set_opts, NULL)) != -1) {
switch (ret) {
case 'b':
@@ -69,6 +76,38 @@ int cmd_set(int argc, char **argv)
}
params.perf_bias = 1;
break;
+ case 'e':
+ if (params.epp)
+ print_wrong_arg_exit();
+ if (sscanf(optarg, "%29s", epp) != 1) {
+ print_wrong_arg_exit();
+ return -EINVAL;
+ }
+ params.epp = 1;
+ break;
+ case 'm':
+ if (cpupower_cpu_info.vendor != X86_VENDOR_AMD)
+ print_wrong_arg_exit();
+ if (params.mode)
+ print_wrong_arg_exit();
+ if (sscanf(optarg, "%19s", mode) != 1) {
+ print_wrong_arg_exit();
+ return -EINVAL;
+ }
+ params.mode = 1;
+ break;
+ case 't':
+ if (params.turbo_boost)
+ print_wrong_arg_exit();
+ turbo_boost = atoi(optarg);
+ if (turbo_boost < 0 || turbo_boost > 1) {
+ printf("--turbo-boost param out of range [0-1]\n");
+ print_wrong_arg_exit();
+ }
+ params.turbo_boost = 1;
+ break;
+
+
default:
print_wrong_arg_exit();
}
@@ -77,6 +116,18 @@ int cmd_set(int argc, char **argv)
if (!params.params)
print_wrong_arg_exit();
+ if (params.mode) {
+ ret = cpupower_set_amd_pstate_mode(mode);
+ if (ret)
+ fprintf(stderr, "Error setting mode\n");
+ }
+
+ if (params.turbo_boost) {
+ ret = cpupower_set_turbo_boost(turbo_boost);
+ if (ret)
+ fprintf(stderr, "Error setting turbo-boost\n");
+ }
+
/* Default is: set all CPUs */
if (bitmask_isallclear(cpus_chosen))
bitmask_setall(cpus_chosen);
@@ -102,6 +153,16 @@ int cmd_set(int argc, char **argv)
break;
}
}
+
+ if (params.epp) {
+ ret = cpupower_set_epp(cpu, epp);
+ if (ret) {
+ fprintf(stderr,
+ "Error setting epp value on CPU %d\n", cpu);
+ break;
+ }
+ }
+
}
return ret;
}
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
index 96e4bede078b..95749b8ee475 100644
--- a/tools/power/cpupower/utils/helpers/helpers.h
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -116,6 +116,10 @@ extern int cpupower_intel_set_perf_bias(unsigned int cpu, unsigned int val);
extern int cpupower_intel_get_perf_bias(unsigned int cpu);
extern unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu);
+extern int cpupower_set_epp(unsigned int cpu, char *epp);
+extern int cpupower_set_amd_pstate_mode(char *mode);
+extern int cpupower_set_turbo_boost(int turbo_boost);
+
/* Read/Write msr ****************************/
/* PCI stuff ****************************/
@@ -173,6 +177,13 @@ static inline int cpupower_intel_get_perf_bias(unsigned int cpu)
static inline unsigned long long msr_intel_get_turbo_ratio(unsigned int cpu)
{ return 0; };
+static inline int cpupower_set_epp(unsigned int cpu, char *epp)
+{ return -1; };
+static inline int cpupower_set_amd_pstate_mode(char *mode)
+{ return -1; };
+static inline int cpupower_set_turbo_boost(int turbo_boost)
+{ return -1; };
+
/* Read/Write msr ****************************/
static inline int cpufreq_has_boost_support(unsigned int cpu, int *support,
diff --git a/tools/power/cpupower/utils/helpers/misc.c b/tools/power/cpupower/utils/helpers/misc.c
index 9547b29254a7..76e461ff4f74 100644
--- a/tools/power/cpupower/utils/helpers/misc.c
+++ b/tools/power/cpupower/utils/helpers/misc.c
@@ -87,6 +87,61 @@ int cpupower_intel_set_perf_bias(unsigned int cpu, unsigned int val)
return 0;
}
+int cpupower_set_epp(unsigned int cpu, char *epp)
+{
+ char path[SYSFS_PATH_MAX];
+ char linebuf[30] = {};
+
+ snprintf(path, sizeof(path),
+ PATH_TO_CPU "cpu%u/cpufreq/energy_performance_preference", cpu);
+
+ if (!is_valid_path(path))
+ return -1;
+
+ snprintf(linebuf, sizeof(linebuf), "%s", epp);
+
+ if (cpupower_write_sysfs(path, linebuf, 30) <= 0)
+ return -1;
+
+ return 0;
+}
+
+int cpupower_set_amd_pstate_mode(char *mode)
+{
+ char path[SYSFS_PATH_MAX];
+ char linebuf[20] = {};
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "amd_pstate/status");
+
+ if (!is_valid_path(path))
+ return -1;
+
+ snprintf(linebuf, sizeof(linebuf), "%s\n", mode);
+
+ if (cpupower_write_sysfs(path, linebuf, 20) <= 0)
+ return -1;
+
+ return 0;
+}
+
+int cpupower_set_turbo_boost(int turbo_boost)
+{
+ char path[SYSFS_PATH_MAX];
+ char linebuf[2] = {};
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "cpufreq/boost");
+
+ if (!is_valid_path(path))
+ return -1;
+
+ snprintf(linebuf, sizeof(linebuf), "%d", turbo_boost);
+
+ if (cpupower_write_sysfs(path, linebuf, 2) <= 0)
+ return -1;
+
+ return 0;
+}
+
bool cpupower_amd_pstate_enabled(void)
{
char *driver = cpufreq_get_driver(0);
@@ -95,7 +150,7 @@ bool cpupower_amd_pstate_enabled(void)
if (!driver)
return ret;
- if (!strcmp(driver, "amd-pstate"))
+ if (!strncmp(driver, "amd", 3))
ret = true;
cpufreq_put_driver(driver);
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 8a36ba5df9f9..9a10512e3407 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -5447,7 +5447,7 @@ unsigned int intel_model_duplicates(unsigned int model)
case INTEL_FAM6_LAKEFIELD:
case INTEL_FAM6_ALDERLAKE:
case INTEL_FAM6_ALDERLAKE_L:
- case INTEL_FAM6_ALDERLAKE_N:
+ case INTEL_FAM6_ATOM_GRACEMONT:
case INTEL_FAM6_RAPTORLAKE:
case INTEL_FAM6_RAPTORLAKE_P:
case INTEL_FAM6_RAPTORLAKE_S:
diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config
index 0393940c706a..873f3e06ccad 100644
--- a/tools/testing/kunit/configs/all_tests.config
+++ b/tools/testing/kunit/configs/all_tests.config
@@ -33,5 +33,7 @@ CONFIG_DAMON_PADDR=y
CONFIG_DEBUG_FS=y
CONFIG_DAMON_DBGFS=y
+CONFIG_REGMAP_BUILD=y
+
CONFIG_SECURITY=y
CONFIG_SECURITY_APPARMOR=y
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 3905c43369c3..bc74088c458a 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -55,8 +55,12 @@ class KunitExecRequest(KunitParseRequest):
build_dir: str
timeout: int
filter_glob: str
+ filter: str
+ filter_action: Optional[str]
kernel_args: Optional[List[str]]
run_isolated: Optional[str]
+ list_tests: bool
+ list_tests_attr: bool
@dataclass
class KunitRequest(KunitExecRequest, KunitBuildRequest):
@@ -102,19 +106,41 @@ def config_and_build_tests(linux: kunit_kernel.LinuxSourceTree,
def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> List[str]:
args = ['kunit.action=list']
+
+ if request.kernel_args:
+ args.extend(request.kernel_args)
+
+ output = linux.run_kernel(args=args,
+ timeout=request.timeout,
+ filter_glob=request.filter_glob,
+ filter=request.filter,
+ filter_action=request.filter_action,
+ build_dir=request.build_dir)
+ lines = kunit_parser.extract_tap_lines(output)
+ # Hack! Drop the dummy TAP version header that the executor prints out.
+ lines.pop()
+
+ # Filter out any extraneous non-test output that might have gotten mixed in.
+ return [l for l in output if re.match(r'^[^\s.]+\.[^\s.]+$', l)]
+
+def _list_tests_attr(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> Iterable[str]:
+ args = ['kunit.action=list_attr']
+
if request.kernel_args:
args.extend(request.kernel_args)
output = linux.run_kernel(args=args,
timeout=request.timeout,
filter_glob=request.filter_glob,
+ filter=request.filter,
+ filter_action=request.filter_action,
build_dir=request.build_dir)
lines = kunit_parser.extract_tap_lines(output)
# Hack! Drop the dummy TAP version header that the executor prints out.
lines.pop()
# Filter out any extraneous non-test output that might have gotten mixed in.
- return [l for l in lines if re.match(r'^[^\s.]+\.[^\s.]+$', l)]
+ return lines
def _suites_from_test_list(tests: List[str]) -> List[str]:
"""Extracts all the suites from an ordered list of tests."""
@@ -128,10 +154,18 @@ def _suites_from_test_list(tests: List[str]) -> List[str]:
suites.append(suite)
return suites
-
-
def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> KunitResult:
filter_globs = [request.filter_glob]
+ if request.list_tests:
+ output = _list_tests(linux, request)
+ for line in output:
+ print(line.rstrip())
+ return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0)
+ if request.list_tests_attr:
+ attr_output = _list_tests_attr(linux, request)
+ for line in attr_output:
+ print(line.rstrip())
+ return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0)
if request.run_isolated:
tests = _list_tests(linux, request)
if request.run_isolated == 'test':
@@ -155,6 +189,8 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -
args=request.kernel_args,
timeout=request.timeout,
filter_glob=filter_glob,
+ filter=request.filter,
+ filter_action=request.filter_action,
build_dir=request.build_dir)
_, test_result = parse_tests(request, metadata, run_result)
@@ -341,6 +377,16 @@ def add_exec_opts(parser: argparse.ArgumentParser) -> None:
nargs='?',
default='',
metavar='filter_glob')
+ parser.add_argument('--filter',
+ help='Filter KUnit tests with attributes, '
+ 'e.g. module=example or speed>slow',
+ type=str,
+ default='')
+ parser.add_argument('--filter_action',
+ help='If set to skip, filtered tests will be skipped, '
+ 'e.g. --filter_action=skip. Otherwise they will not run.',
+ type=str,
+ choices=['skip'])
parser.add_argument('--kernel_args',
help='Kernel command-line parameters. Maybe be repeated',
action='append', metavar='')
@@ -350,6 +396,12 @@ def add_exec_opts(parser: argparse.ArgumentParser) -> None:
'what ran before it.',
type=str,
choices=['suite', 'test'])
+ parser.add_argument('--list_tests', help='If set, list all tests that will be '
+ 'run.',
+ action='store_true')
+ parser.add_argument('--list_tests_attr', help='If set, list all tests and test '
+ 'attributes.',
+ action='store_true')
def add_parse_opts(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--raw_output', help='If set don\'t parse output from kernel. '
@@ -398,8 +450,12 @@ def run_handler(cli_args: argparse.Namespace) -> None:
json=cli_args.json,
timeout=cli_args.timeout,
filter_glob=cli_args.filter_glob,
+ filter=cli_args.filter,
+ filter_action=cli_args.filter_action,
kernel_args=cli_args.kernel_args,
- run_isolated=cli_args.run_isolated)
+ run_isolated=cli_args.run_isolated,
+ list_tests=cli_args.list_tests,
+ list_tests_attr=cli_args.list_tests_attr)
result = run_tests(linux, request)
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
@@ -441,8 +497,12 @@ def exec_handler(cli_args: argparse.Namespace) -> None:
json=cli_args.json,
timeout=cli_args.timeout,
filter_glob=cli_args.filter_glob,
+ filter=cli_args.filter,
+ filter_action=cli_args.filter_action,
kernel_args=cli_args.kernel_args,
- run_isolated=cli_args.run_isolated)
+ run_isolated=cli_args.run_isolated,
+ list_tests=cli_args.list_tests,
+ list_tests_attr=cli_args.list_tests_attr)
result = exec_tests(linux, exec_request)
stdout.print_with_timestamp((
'Elapsed time: %.3fs\n') % (result.elapsed_time))
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 7f648802caf6..0b6488efed47 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -330,11 +330,15 @@ class LinuxSourceTree:
return False
return self.validate_config(build_dir)
- def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', timeout: Optional[int]=None) -> Iterator[str]:
+ def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', filter: str='', filter_action: Optional[str]=None, timeout: Optional[int]=None) -> Iterator[str]:
if not args:
args = []
if filter_glob:
- args.append('kunit.filter_glob='+filter_glob)
+ args.append('kunit.filter_glob=' + filter_glob)
+ if filter:
+ args.append('kunit.filter="' + filter + '"')
+ if filter_action:
+ args.append('kunit.filter_action=' + filter_action)
args.append('kunit.enable=1')
process = self._ops.start(args, build_dir)
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index fbc094f0567e..79d8832c862a 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -212,6 +212,7 @@ KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$')
TAP_START = re.compile(r'\s*TAP version ([0-9]+)$')
KTAP_END = re.compile(r'\s*(List of all partitions:|'
'Kernel panic - not syncing: VFS:|reboot: System halted)')
+EXECUTOR_ERROR = re.compile(r'\s*kunit executor: (.*)$')
def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
"""Extracts KTAP lines from the kernel output."""
@@ -242,6 +243,8 @@ def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
# remove the prefix, if any.
line = line[prefix_len:]
yield line_num, line
+ elif EXECUTOR_ERROR.search(line):
+ yield line_num, line
return LineStream(lines=isolate_ktap_output(kernel_output))
KTAP_VERSIONS = [1]
@@ -447,7 +450,7 @@ def parse_diagnostic(lines: LineStream) -> List[str]:
Log of diagnostic lines
"""
log = [] # type: List[str]
- non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START]
+ non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START, TAP_START]
while lines and not any(re.match(lines.peek())
for re in non_diagnostic_lines):
log.append(lines.pop())
@@ -713,6 +716,11 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
"""
test = Test()
test.log.extend(log)
+
+ # Parse any errors prior to parsing tests
+ err_log = parse_diagnostic(lines)
+ test.log.extend(err_log)
+
if not is_subtest:
# If parsing the main/top-level test, parse KTAP version line and
# test plan
@@ -774,6 +782,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
# Don't override a bad status if this test had one reported.
# Assumption: no subtests means CRASHED is from Test.__init__()
if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
+ print_log(test.log)
test.status = TestStatus.NO_TESTS
test.add_error('0 tests run!')
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index be35999bb84f..b28c1510be2e 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -597,7 +597,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
- args=None, build_dir='.kunit', filter_glob='', timeout=300)
+ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_passes_args_pass(self):
@@ -605,7 +605,7 @@ class KUnitMainTest(unittest.TestCase):
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
- args=None, build_dir='.kunit', filter_glob='', timeout=300)
+ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_exec_passes_args_fail(self):
@@ -629,7 +629,7 @@ class KUnitMainTest(unittest.TestCase):
kunit.main(['run'])
self.assertEqual(e.exception.code, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
- args=None, build_dir='.kunit', filter_glob='', timeout=300)
+ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains(' 0 tests run!'))
def test_exec_raw_output(self):
@@ -670,13 +670,13 @@ class KUnitMainTest(unittest.TestCase):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
kunit.main(['run', '--raw_output', 'filter_glob'])
self.linux_source_mock.run_kernel.assert_called_once_with(
- args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
+ args=None, build_dir='.kunit', filter_glob='filter_glob', filter='', filter_action=None, timeout=300)
def test_exec_timeout(self):
timeout = 3453
kunit.main(['exec', '--timeout', str(timeout)])
self.linux_source_mock.run_kernel.assert_called_once_with(
- args=None, build_dir='.kunit', filter_glob='', timeout=timeout)
+ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_timeout(self):
@@ -684,7 +684,7 @@ class KUnitMainTest(unittest.TestCase):
kunit.main(['run', '--timeout', str(timeout)])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
- args=None, build_dir='.kunit', filter_glob='', timeout=timeout)
+ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_builddir(self):
@@ -692,7 +692,7 @@ class KUnitMainTest(unittest.TestCase):
kunit.main(['run', '--build_dir=.kunit'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
- args=None, build_dir=build_dir, filter_glob='', timeout=300)
+ args=None, build_dir=build_dir, filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_config_builddir(self):
@@ -710,7 +710,7 @@ class KUnitMainTest(unittest.TestCase):
build_dir = '.kunit'
kunit.main(['exec', '--build_dir', build_dir])
self.linux_source_mock.run_kernel.assert_called_once_with(
- args=None, build_dir=build_dir, filter_glob='', timeout=300)
+ args=None, build_dir=build_dir, filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_kunitconfig(self):
@@ -786,7 +786,7 @@ class KUnitMainTest(unittest.TestCase):
kunit.main(['run', '--kernel_args=a=1', '--kernel_args=b=2'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
- args=['a=1','b=2'], build_dir='.kunit', filter_glob='', timeout=300)
+ args=['a=1','b=2'], build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_list_tests(self):
@@ -794,13 +794,11 @@ class KUnitMainTest(unittest.TestCase):
self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want
got = kunit._list_tests(self.linux_source_mock,
- kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', None, 'suite'))
-
+ kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', '', None, None, 'suite', False, False))
self.assertEqual(got, want)
# Should respect the user's filter glob when listing tests.
self.linux_source_mock.run_kernel.assert_called_once_with(
- args=['kunit.action=list'], build_dir='.kunit', filter_glob='suite*', timeout=300)
-
+ args=['kunit.action=list'], build_dir='.kunit', filter_glob='suite*', filter='', filter_action=None, timeout=300)
@mock.patch.object(kunit, '_list_tests')
def test_run_isolated_by_suite(self, mock_tests):
@@ -809,10 +807,10 @@ class KUnitMainTest(unittest.TestCase):
# Should respect the user's filter glob when listing tests.
mock_tests.assert_called_once_with(mock.ANY,
- kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*.test*', None, 'suite'))
+ kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*.test*', '', None, None, 'suite', False, False))
self.linux_source_mock.run_kernel.assert_has_calls([
- mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', timeout=300),
- mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', timeout=300),
+ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', filter='', filter_action=None, timeout=300),
+ mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', filter='', filter_action=None, timeout=300),
])
@mock.patch.object(kunit, '_list_tests')
@@ -822,13 +820,12 @@ class KUnitMainTest(unittest.TestCase):
# Should respect the user's filter glob when listing tests.
mock_tests.assert_called_once_with(mock.ANY,
- kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', None, 'test'))
+ kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', '', None, None, 'test', False, False))
self.linux_source_mock.run_kernel.assert_has_calls([
- mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', timeout=300),
- mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', timeout=300),
- mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test1', timeout=300),
+ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', filter='', filter_action=None, timeout=300),
+ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', filter='', filter_action=None, timeout=300),
+ mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test1', filter='', filter_action=None, timeout=300),
])
-
if __name__ == '__main__':
unittest.main()
diff --git a/tools/testing/kunit/qemu_configs/arm64.py b/tools/testing/kunit/qemu_configs/arm64.py
index 67d04064f785..d3ff27024755 100644
--- a/tools/testing/kunit/qemu_configs/arm64.py
+++ b/tools/testing/kunit/qemu_configs/arm64.py
@@ -9,4 +9,4 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''',
qemu_arch='aarch64',
kernel_path='arch/arm64/boot/Image.gz',
kernel_command_line='console=ttyAMA0',
- extra_qemu_params=['-machine', 'virt', '-cpu', 'cortex-a57'])
+ extra_qemu_params=['-machine', 'virt', '-cpu', 'max,pauth-impdef=on'])
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 5c60a7cea732..42806add0114 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -13,12 +13,14 @@ TARGETS += core
TARGETS += cpufreq
TARGETS += cpu-hotplug
TARGETS += damon
+TARGETS += dmabuf-heaps
TARGETS += drivers/dma-buf
TARGETS += drivers/s390x/uvdevice
TARGETS += drivers/net/bonding
TARGETS += drivers/net/team
TARGETS += efivarfs
TARGETS += exec
+TARGETS += fchmodat2
TARGETS += filesystems
TARGETS += filesystems/binderfs
TARGETS += filesystems/epoll
@@ -57,6 +59,7 @@ TARGETS += net/mptcp
TARGETS += net/openvswitch
TARGETS += netfilter
TARGETS += nsfs
+TARGETS += perf_events
TARGETS += pidfd
TARGETS += pid_namespace
TARGETS += powerpc
@@ -89,7 +92,9 @@ endif
TARGETS += tmpfs
TARGETS += tpm2
TARGETS += tty
+TARGETS += uevents
TARGETS += user
+TARGETS += user_events
TARGETS += vDSO
TARGETS += mm
TARGETS += x86
diff --git a/tools/testing/selftests/arm64/Makefile b/tools/testing/selftests/arm64/Makefile
index ace8b67fb22d..28b93cab8c0d 100644
--- a/tools/testing/selftests/arm64/Makefile
+++ b/tools/testing/selftests/arm64/Makefile
@@ -19,6 +19,8 @@ CFLAGS += -I$(top_srcdir)/tools/testing/selftests/
CFLAGS += $(KHDR_INCLUDES)
+CFLAGS += -I$(top_srcdir)/tools/include
+
export CFLAGS
export top_srcdir
diff --git a/tools/testing/selftests/arm64/abi/hwcap.c b/tools/testing/selftests/arm64/abi/hwcap.c
index d4ad813fed10..e3d262831d91 100644
--- a/tools/testing/selftests/arm64/abi/hwcap.c
+++ b/tools/testing/selftests/arm64/abi/hwcap.c
@@ -19,19 +19,38 @@
#include "../../kselftest.h"
-#define TESTS_PER_HWCAP 2
+#define TESTS_PER_HWCAP 3
/*
- * Function expected to generate SIGILL when the feature is not
- * supported and return when it is supported. If SIGILL is generated
- * then the handler must be able to skip over the instruction safely.
+ * Function expected to generate exception when the feature is not
+ * supported and return when it is supported. If the specific exception
+ * is generated then the handler must be able to skip over the
+ * instruction safely.
*
* Note that it is expected that for many architecture extensions
* there are no specific traps due to no architecture state being
* added so we may not fault if running on a kernel which doesn't know
* to add the hwcap.
*/
-typedef void (*sigill_fn)(void);
+typedef void (*sig_fn)(void);
+
+static void aes_sigill(void)
+{
+ /* AESE V0.16B, V0.16B */
+ asm volatile(".inst 0x4e284800" : : : );
+}
+
+static void atomics_sigill(void)
+{
+ /* STADD W0, [SP] */
+ asm volatile(".inst 0xb82003ff" : : : );
+}
+
+static void crc32_sigill(void)
+{
+ /* CRC32W W0, W0, W1 */
+ asm volatile(".inst 0x1ac14800" : : : );
+}
static void cssc_sigill(void)
{
@@ -39,6 +58,29 @@ static void cssc_sigill(void)
asm volatile(".inst 0xdac01c00" : : : "x0");
}
+static void fp_sigill(void)
+{
+ asm volatile("fmov s0, #1");
+}
+
+static void ilrcpc_sigill(void)
+{
+ /* LDAPUR W0, [SP, #8] */
+ asm volatile(".inst 0x994083e0" : : : );
+}
+
+static void jscvt_sigill(void)
+{
+ /* FJCVTZS W0, D0 */
+ asm volatile(".inst 0x1e7e0000" : : : );
+}
+
+static void lrcpc_sigill(void)
+{
+ /* LDAPR W0, [SP, #0] */
+ asm volatile(".inst 0xb8bfc3e0" : : : );
+}
+
static void mops_sigill(void)
{
char dst[1], src[1];
@@ -53,11 +95,35 @@ static void mops_sigill(void)
: "cc", "memory");
}
+static void pmull_sigill(void)
+{
+ /* PMULL V0.1Q, V0.1D, V0.1D */
+ asm volatile(".inst 0x0ee0e000" : : : );
+}
+
static void rng_sigill(void)
{
asm volatile("mrs x0, S3_3_C2_C4_0" : : : "x0");
}
+static void sha1_sigill(void)
+{
+ /* SHA1H S0, S0 */
+ asm volatile(".inst 0x5e280800" : : : );
+}
+
+static void sha2_sigill(void)
+{
+ /* SHA256H Q0, Q0, V0.4S */
+ asm volatile(".inst 0x5e004000" : : : );
+}
+
+static void sha512_sigill(void)
+{
+ /* SHA512H Q0, Q0, V0.2D */
+ asm volatile(".inst 0xce608000" : : : );
+}
+
static void sme_sigill(void)
{
/* RDSVL x0, #0 */
@@ -208,15 +274,46 @@ static void svebf16_sigill(void)
asm volatile(".inst 0x658aa000" : : : "z0");
}
+static void hbc_sigill(void)
+{
+ /* BC.EQ +4 */
+ asm volatile("cmp xzr, xzr\n"
+ ".inst 0x54000030" : : : "cc");
+}
+
+static void uscat_sigbus(void)
+{
+ /* unaligned atomic access */
+ asm volatile("ADD x1, sp, #2" : : : );
+ /* STADD W0, [X1] */
+ asm volatile(".inst 0xb820003f" : : : );
+}
+
static const struct hwcap_data {
const char *name;
unsigned long at_hwcap;
unsigned long hwcap_bit;
const char *cpuinfo;
- sigill_fn sigill_fn;
+ sig_fn sigill_fn;
bool sigill_reliable;
+ sig_fn sigbus_fn;
+ bool sigbus_reliable;
} hwcaps[] = {
{
+ .name = "AES",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_AES,
+ .cpuinfo = "aes",
+ .sigill_fn = aes_sigill,
+ },
+ {
+ .name = "CRC32",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_CRC32,
+ .cpuinfo = "crc32",
+ .sigill_fn = crc32_sigill,
+ },
+ {
.name = "CSSC",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_CSSC,
@@ -224,6 +321,50 @@ static const struct hwcap_data {
.sigill_fn = cssc_sigill,
},
{
+ .name = "FP",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_FP,
+ .cpuinfo = "fp",
+ .sigill_fn = fp_sigill,
+ },
+ {
+ .name = "JSCVT",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_JSCVT,
+ .cpuinfo = "jscvt",
+ .sigill_fn = jscvt_sigill,
+ },
+ {
+ .name = "LRCPC",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_LRCPC,
+ .cpuinfo = "lrcpc",
+ .sigill_fn = lrcpc_sigill,
+ },
+ {
+ .name = "LRCPC2",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_ILRCPC,
+ .cpuinfo = "ilrcpc",
+ .sigill_fn = ilrcpc_sigill,
+ },
+ {
+ .name = "LSE",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_ATOMICS,
+ .cpuinfo = "atomics",
+ .sigill_fn = atomics_sigill,
+ },
+ {
+ .name = "LSE2",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_USCAT,
+ .cpuinfo = "uscat",
+ .sigill_fn = atomics_sigill,
+ .sigbus_fn = uscat_sigbus,
+ .sigbus_reliable = true,
+ },
+ {
.name = "MOPS",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_MOPS,
@@ -232,6 +373,13 @@ static const struct hwcap_data {
.sigill_reliable = true,
},
{
+ .name = "PMULL",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_PMULL,
+ .cpuinfo = "pmull",
+ .sigill_fn = pmull_sigill,
+ },
+ {
.name = "RNG",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_RNG,
@@ -245,6 +393,27 @@ static const struct hwcap_data {
.cpuinfo = "rprfm",
},
{
+ .name = "SHA1",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SHA1,
+ .cpuinfo = "sha1",
+ .sigill_fn = sha1_sigill,
+ },
+ {
+ .name = "SHA2",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SHA2,
+ .cpuinfo = "sha2",
+ .sigill_fn = sha2_sigill,
+ },
+ {
+ .name = "SHA512",
+ .at_hwcap = AT_HWCAP,
+ .hwcap_bit = HWCAP_SHA512,
+ .cpuinfo = "sha512",
+ .sigill_fn = sha512_sigill,
+ },
+ {
.name = "SME",
.at_hwcap = AT_HWCAP2,
.hwcap_bit = HWCAP2_SME,
@@ -386,20 +555,32 @@ static const struct hwcap_data {
.hwcap_bit = HWCAP2_SVE_EBF16,
.cpuinfo = "sveebf16",
},
+ {
+ .name = "HBC",
+ .at_hwcap = AT_HWCAP2,
+ .hwcap_bit = HWCAP2_HBC,
+ .cpuinfo = "hbc",
+ .sigill_fn = hbc_sigill,
+ .sigill_reliable = true,
+ },
};
-static bool seen_sigill;
-
-static void handle_sigill(int sig, siginfo_t *info, void *context)
-{
- ucontext_t *uc = context;
-
- seen_sigill = true;
-
- /* Skip over the offending instruction */
- uc->uc_mcontext.pc += 4;
+typedef void (*sighandler_fn)(int, siginfo_t *, void *);
+
+#define DEF_SIGHANDLER_FUNC(SIG, NUM) \
+static bool seen_##SIG; \
+static void handle_##SIG(int sig, siginfo_t *info, void *context) \
+{ \
+ ucontext_t *uc = context; \
+ \
+ seen_##SIG = true; \
+ /* Skip over the offending instruction */ \
+ uc->uc_mcontext.pc += 4; \
}
+DEF_SIGHANDLER_FUNC(sigill, SIGILL);
+DEF_SIGHANDLER_FUNC(sigbus, SIGBUS);
+
bool cpuinfo_present(const char *name)
{
FILE *f;
@@ -442,24 +623,77 @@ bool cpuinfo_present(const char *name)
return false;
}
-int main(void)
+static int install_sigaction(int signum, sighandler_fn handler)
{
- const struct hwcap_data *hwcap;
- int i, ret;
- bool have_cpuinfo, have_hwcap;
+ int ret;
struct sigaction sa;
- ksft_print_header();
- ksft_set_plan(ARRAY_SIZE(hwcaps) * TESTS_PER_HWCAP);
-
memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handle_sigill;
+ sa.sa_sigaction = handler;
sa.sa_flags = SA_RESTART | SA_SIGINFO;
sigemptyset(&sa.sa_mask);
- ret = sigaction(SIGILL, &sa, NULL);
+ ret = sigaction(signum, &sa, NULL);
if (ret < 0)
- ksft_exit_fail_msg("Failed to install SIGILL handler: %s (%d)\n",
+ ksft_exit_fail_msg("Failed to install SIGNAL handler: %s (%d)\n",
+ strerror(errno), errno);
+
+ return ret;
+}
+
+static void uninstall_sigaction(int signum)
+{
+ if (sigaction(signum, NULL, NULL) < 0)
+ ksft_exit_fail_msg("Failed to uninstall SIGNAL handler: %s (%d)\n",
strerror(errno), errno);
+}
+
+#define DEF_INST_RAISE_SIG(SIG, NUM) \
+static bool inst_raise_##SIG(const struct hwcap_data *hwcap, \
+ bool have_hwcap) \
+{ \
+ if (!hwcap->SIG##_fn) { \
+ ksft_test_result_skip(#SIG"_%s\n", hwcap->name); \
+ /* assume that it would raise exception in default */ \
+ return true; \
+ } \
+ \
+ install_sigaction(NUM, handle_##SIG); \
+ \
+ seen_##SIG = false; \
+ hwcap->SIG##_fn(); \
+ \
+ if (have_hwcap) { \
+ /* Should be able to use the extension */ \
+ ksft_test_result(!seen_##SIG, \
+ #SIG"_%s\n", hwcap->name); \
+ } else if (hwcap->SIG##_reliable) { \
+ /* Guaranteed a SIGNAL */ \
+ ksft_test_result(seen_##SIG, \
+ #SIG"_%s\n", hwcap->name); \
+ } else { \
+ /* Missing SIGNAL might be fine */ \
+ ksft_print_msg(#SIG"_%sreported for %s\n", \
+ seen_##SIG ? "" : "not ", \
+ hwcap->name); \
+ ksft_test_result_skip(#SIG"_%s\n", \
+ hwcap->name); \
+ } \
+ \
+ uninstall_sigaction(NUM); \
+ return seen_##SIG; \
+}
+
+DEF_INST_RAISE_SIG(sigill, SIGILL);
+DEF_INST_RAISE_SIG(sigbus, SIGBUS);
+
+int main(void)
+{
+ int i;
+ const struct hwcap_data *hwcap;
+ bool have_cpuinfo, have_hwcap, raise_sigill;
+
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(hwcaps) * TESTS_PER_HWCAP);
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
hwcap = &hwcaps[i];
@@ -473,30 +707,15 @@ int main(void)
ksft_test_result(have_hwcap == have_cpuinfo,
"cpuinfo_match_%s\n", hwcap->name);
- if (hwcap->sigill_fn) {
- seen_sigill = false;
- hwcap->sigill_fn();
-
- if (have_hwcap) {
- /* Should be able to use the extension */
- ksft_test_result(!seen_sigill, "sigill_%s\n",
- hwcap->name);
- } else if (hwcap->sigill_reliable) {
- /* Guaranteed a SIGILL */
- ksft_test_result(seen_sigill, "sigill_%s\n",
- hwcap->name);
- } else {
- /* Missing SIGILL might be fine */
- ksft_print_msg("SIGILL %sreported for %s\n",
- seen_sigill ? "" : "not ",
- hwcap->name);
- ksft_test_result_skip("sigill_%s\n",
- hwcap->name);
- }
- } else {
- ksft_test_result_skip("sigill_%s\n",
- hwcap->name);
- }
+ /*
+ * Testing for SIGBUS only makes sense after make sure
+ * that the instruction does not cause a SIGILL signal.
+ */
+ raise_sigill = inst_raise_sigill(hwcap, have_hwcap);
+ if (!raise_sigill)
+ inst_raise_sigbus(hwcap, have_hwcap);
+ else
+ ksft_test_result_skip("sigbus_%s\n", hwcap->name);
}
ksft_print_cnts();
diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c
index 18cc123e2347..d704511a0955 100644
--- a/tools/testing/selftests/arm64/abi/syscall-abi.c
+++ b/tools/testing/selftests/arm64/abi/syscall-abi.c
@@ -20,12 +20,20 @@
#include "syscall-abi.h"
+/*
+ * The kernel defines a much larger SVE_VQ_MAX than is expressable in
+ * the architecture, this creates a *lot* of overhead filling the
+ * buffers (especially ZA) on emulated platforms so use the actual
+ * architectural maximum instead.
+ */
+#define ARCH_SVE_VQ_MAX 16
+
static int default_sme_vl;
static int sve_vl_count;
-static unsigned int sve_vls[SVE_VQ_MAX];
+static unsigned int sve_vls[ARCH_SVE_VQ_MAX];
static int sme_vl_count;
-static unsigned int sme_vls[SVE_VQ_MAX];
+static unsigned int sme_vls[ARCH_SVE_VQ_MAX];
extern void do_syscall(int sve_vl, int sme_vl);
@@ -130,9 +138,9 @@ static int check_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
#define SVE_Z_SHARED_BYTES (128 / 8)
-static uint8_t z_zero[__SVE_ZREG_SIZE(SVE_VQ_MAX)];
-uint8_t z_in[SVE_NUM_ZREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)];
-uint8_t z_out[SVE_NUM_ZREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)];
+static uint8_t z_zero[__SVE_ZREG_SIZE(ARCH_SVE_VQ_MAX)];
+uint8_t z_in[SVE_NUM_ZREGS * __SVE_ZREG_SIZE(ARCH_SVE_VQ_MAX)];
+uint8_t z_out[SVE_NUM_ZREGS * __SVE_ZREG_SIZE(ARCH_SVE_VQ_MAX)];
static void setup_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
@@ -190,8 +198,8 @@ static int check_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
return errors;
}
-uint8_t p_in[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)];
-uint8_t p_out[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)];
+uint8_t p_in[SVE_NUM_PREGS * __SVE_PREG_SIZE(ARCH_SVE_VQ_MAX)];
+uint8_t p_out[SVE_NUM_PREGS * __SVE_PREG_SIZE(ARCH_SVE_VQ_MAX)];
static void setup_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
@@ -222,8 +230,8 @@ static int check_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
return errors;
}
-uint8_t ffr_in[__SVE_PREG_SIZE(SVE_VQ_MAX)];
-uint8_t ffr_out[__SVE_PREG_SIZE(SVE_VQ_MAX)];
+uint8_t ffr_in[__SVE_PREG_SIZE(ARCH_SVE_VQ_MAX)];
+uint8_t ffr_out[__SVE_PREG_SIZE(ARCH_SVE_VQ_MAX)];
static void setup_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
@@ -300,8 +308,8 @@ static int check_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
return errors;
}
-uint8_t za_in[ZA_SIG_REGS_SIZE(SVE_VQ_MAX)];
-uint8_t za_out[ZA_SIG_REGS_SIZE(SVE_VQ_MAX)];
+uint8_t za_in[ZA_SIG_REGS_SIZE(ARCH_SVE_VQ_MAX)];
+uint8_t za_out[ZA_SIG_REGS_SIZE(ARCH_SVE_VQ_MAX)];
static void setup_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
@@ -470,9 +478,9 @@ void sve_count_vls(void)
return;
/*
- * Enumerate up to SVE_VQ_MAX vector lengths
+ * Enumerate up to ARCH_SVE_VQ_MAX vector lengths
*/
- for (vq = SVE_VQ_MAX; vq > 0; vq /= 2) {
+ for (vq = ARCH_SVE_VQ_MAX; vq > 0; vq /= 2) {
vl = prctl(PR_SVE_SET_VL, vq * 16);
if (vl == -1)
ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n",
@@ -496,9 +504,9 @@ void sme_count_vls(void)
return;
/*
- * Enumerate up to SVE_VQ_MAX vector lengths
+ * Enumerate up to ARCH_SVE_VQ_MAX vector lengths
*/
- for (vq = SVE_VQ_MAX; vq > 0; vq /= 2) {
+ for (vq = ARCH_SVE_VQ_MAX; vq > 0; vq /= 2) {
vl = prctl(PR_SME_SET_VL, vq * 16);
if (vl == -1)
ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
diff --git a/tools/testing/selftests/arm64/bti/Makefile b/tools/testing/selftests/arm64/bti/Makefile
index ccdac414ad94..05e4ee523a53 100644
--- a/tools/testing/selftests/arm64/bti/Makefile
+++ b/tools/testing/selftests/arm64/bti/Makefile
@@ -2,8 +2,6 @@
TEST_GEN_PROGS := btitest nobtitest
-PROGS := $(patsubst %,gen/%,$(TEST_GEN_PROGS))
-
# These tests are built as freestanding binaries since otherwise BTI
# support in ld.so is required which is not currently widespread; when
# it is available it will still be useful to test this separately as the
@@ -18,44 +16,41 @@ CFLAGS_COMMON = -ffreestanding -Wall -Wextra $(CFLAGS)
BTI_CC_COMMAND = $(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -c -o $@ $<
NOBTI_CC_COMMAND = $(CC) $(CFLAGS_NOBTI) $(CFLAGS_COMMON) -c -o $@ $<
-%-bti.o: %.c
+$(OUTPUT)/%-bti.o: %.c
$(BTI_CC_COMMAND)
-%-bti.o: %.S
+$(OUTPUT)/%-bti.o: %.S
$(BTI_CC_COMMAND)
-%-nobti.o: %.c
+$(OUTPUT)/%-nobti.o: %.c
$(NOBTI_CC_COMMAND)
-%-nobti.o: %.S
+$(OUTPUT)/%-nobti.o: %.S
$(NOBTI_CC_COMMAND)
BTI_OBJS = \
- test-bti.o \
- signal-bti.o \
- start-bti.o \
- syscall-bti.o \
- system-bti.o \
- teststubs-bti.o \
- trampoline-bti.o
-gen/btitest: $(BTI_OBJS)
+ $(OUTPUT)/test-bti.o \
+ $(OUTPUT)/signal-bti.o \
+ $(OUTPUT)/start-bti.o \
+ $(OUTPUT)/syscall-bti.o \
+ $(OUTPUT)/system-bti.o \
+ $(OUTPUT)/teststubs-bti.o \
+ $(OUTPUT)/trampoline-bti.o
+$(OUTPUT)/btitest: $(BTI_OBJS)
$(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -static -o $@ $^
NOBTI_OBJS = \
- test-nobti.o \
- signal-nobti.o \
- start-nobti.o \
- syscall-nobti.o \
- system-nobti.o \
- teststubs-nobti.o \
- trampoline-nobti.o
-gen/nobtitest: $(NOBTI_OBJS)
+ $(OUTPUT)/test-nobti.o \
+ $(OUTPUT)/signal-nobti.o \
+ $(OUTPUT)/start-nobti.o \
+ $(OUTPUT)/syscall-nobti.o \
+ $(OUTPUT)/system-nobti.o \
+ $(OUTPUT)/teststubs-nobti.o \
+ $(OUTPUT)/trampoline-nobti.o
+$(OUTPUT)/nobtitest: $(NOBTI_OBJS)
$(CC) $(CFLAGS_BTI) $(CFLAGS_COMMON) -nostdlib -static -o $@ $^
# Including KSFT lib.mk here will also mangle the TEST_GEN_PROGS list
# to account for any OUTPUT target-dirs optionally provided by
# the toplevel makefile
include ../../lib.mk
-
-$(TEST_GEN_PROGS): $(PROGS)
- cp $(PROGS) $(OUTPUT)/
diff --git a/tools/testing/selftests/arm64/bti/compiler.h b/tools/testing/selftests/arm64/bti/compiler.h
deleted file mode 100644
index ebb6204f447a..000000000000
--- a/tools/testing/selftests/arm64/bti/compiler.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2019 Arm Limited
- * Original author: Dave Martin <Dave.Martin@arm.com>
- */
-
-#ifndef COMPILER_H
-#define COMPILER_H
-
-#define __always_unused __attribute__((__unused__))
-#define __noreturn __attribute__((__noreturn__))
-#define __unreachable() __builtin_unreachable()
-
-/* curse(e) has value e, but the compiler cannot assume so */
-#define curse(e) ({ \
- __typeof__(e) __curse_e = (e); \
- asm ("" : "+r" (__curse_e)); \
- __curse_e; \
-})
-
-#endif /* ! COMPILER_H */
diff --git a/tools/testing/selftests/arm64/bti/gen/.gitignore b/tools/testing/selftests/arm64/bti/gen/.gitignore
deleted file mode 100644
index 73869fabada4..000000000000
--- a/tools/testing/selftests/arm64/bti/gen/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-btitest
-nobtitest
diff --git a/tools/testing/selftests/arm64/bti/system.c b/tools/testing/selftests/arm64/bti/system.c
index 6385d8d4973b..93d772b00bfe 100644
--- a/tools/testing/selftests/arm64/bti/system.c
+++ b/tools/testing/selftests/arm64/bti/system.c
@@ -8,12 +8,10 @@
#include <asm/unistd.h>
-#include "compiler.h"
-
void __noreturn exit(int n)
{
syscall(__NR_exit, n);
- __unreachable();
+ unreachable();
}
ssize_t write(int fd, const void *buf, size_t size)
diff --git a/tools/testing/selftests/arm64/bti/system.h b/tools/testing/selftests/arm64/bti/system.h
index aca118589705..2e9ee1284a0c 100644
--- a/tools/testing/selftests/arm64/bti/system.h
+++ b/tools/testing/selftests/arm64/bti/system.h
@@ -14,12 +14,12 @@ typedef __kernel_size_t size_t;
typedef __kernel_ssize_t ssize_t;
#include <linux/errno.h>
+#include <linux/compiler.h>
+
#include <asm/hwcap.h>
#include <asm/ptrace.h>
#include <asm/unistd.h>
-#include "compiler.h"
-
long syscall(int nr, ...);
void __noreturn exit(int n);
diff --git a/tools/testing/selftests/arm64/bti/test.c b/tools/testing/selftests/arm64/bti/test.c
index 2cd8dcee5aec..28a8e8a28a84 100644
--- a/tools/testing/selftests/arm64/bti/test.c
+++ b/tools/testing/selftests/arm64/bti/test.c
@@ -17,7 +17,6 @@
typedef struct ucontext ucontext_t;
#include "btitest.h"
-#include "compiler.h"
#include "signal.h"
#define EXPECTED_TESTS 18
diff --git a/tools/testing/selftests/arm64/fp/vec-syscfg.c b/tools/testing/selftests/arm64/fp/vec-syscfg.c
index 9bcfcdc34ee9..5f648b97a06f 100644
--- a/tools/testing/selftests/arm64/fp/vec-syscfg.c
+++ b/tools/testing/selftests/arm64/fp/vec-syscfg.c
@@ -6,6 +6,7 @@
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
+#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
@@ -39,9 +40,11 @@ struct vec_data {
int max_vl;
};
+#define VEC_SVE 0
+#define VEC_SME 1
static struct vec_data vec_data[] = {
- {
+ [VEC_SVE] = {
.name = "SVE",
.hwcap_type = AT_HWCAP,
.hwcap = HWCAP_SVE,
@@ -51,7 +54,7 @@ static struct vec_data vec_data[] = {
.prctl_set = PR_SVE_SET_VL,
.default_vl_file = "/proc/sys/abi/sve_default_vector_length",
},
- {
+ [VEC_SME] = {
.name = "SME",
.hwcap_type = AT_HWCAP2,
.hwcap = HWCAP2_SME,
@@ -551,7 +554,8 @@ static void prctl_set_onexec(struct vec_data *data)
/* For each VQ verify that setting via prctl() does the right thing */
static void prctl_set_all_vqs(struct vec_data *data)
{
- int ret, vq, vl, new_vl;
+ int ret, vq, vl, new_vl, i;
+ int orig_vls[ARRAY_SIZE(vec_data)];
int errors = 0;
if (!data->min_vl || !data->max_vl) {
@@ -560,6 +564,9 @@ static void prctl_set_all_vqs(struct vec_data *data)
return;
}
+ for (i = 0; i < ARRAY_SIZE(vec_data); i++)
+ orig_vls[i] = vec_data[i].rdvl();
+
for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) {
vl = sve_vl_from_vq(vq);
@@ -582,6 +589,22 @@ static void prctl_set_all_vqs(struct vec_data *data)
errors++;
}
+ /* Did any other VLs change? */
+ for (i = 0; i < ARRAY_SIZE(vec_data); i++) {
+ if (&vec_data[i] == data)
+ continue;
+
+ if (!(getauxval(vec_data[i].hwcap_type) & vec_data[i].hwcap))
+ continue;
+
+ if (vec_data[i].rdvl() != orig_vls[i]) {
+ ksft_print_msg("%s VL changed from %d to %d\n",
+ vec_data[i].name, orig_vls[i],
+ vec_data[i].rdvl());
+ errors++;
+ }
+ }
+
/* Was that the VL we asked for? */
if (new_vl == vl)
continue;
@@ -644,18 +667,107 @@ static const test_type tests[] = {
prctl_set_all_vqs,
};
+static inline void smstart(void)
+{
+ asm volatile("msr S0_3_C4_C7_3, xzr");
+}
+
+static inline void smstart_sm(void)
+{
+ asm volatile("msr S0_3_C4_C3_3, xzr");
+}
+
+static inline void smstop(void)
+{
+ asm volatile("msr S0_3_C4_C6_3, xzr");
+}
+
+
+/*
+ * Verify we can change the SVE vector length while SME is active and
+ * continue to use SME afterwards.
+ */
+static void change_sve_with_za(void)
+{
+ struct vec_data *sve_data = &vec_data[VEC_SVE];
+ bool pass = true;
+ int ret, i;
+
+ if (sve_data->min_vl == sve_data->max_vl) {
+ ksft_print_msg("Only one SVE VL supported, can't change\n");
+ ksft_test_result_skip("change_sve_while_sme\n");
+ return;
+ }
+
+ /* Ensure we will trigger a change when we set the maximum */
+ ret = prctl(sve_data->prctl_set, sve_data->min_vl);
+ if (ret != sve_data->min_vl) {
+ ksft_print_msg("Failed to set SVE VL %d: %d\n",
+ sve_data->min_vl, ret);
+ pass = false;
+ }
+
+ /* Enable SM and ZA */
+ smstart();
+
+ /* Trigger another VL change */
+ ret = prctl(sve_data->prctl_set, sve_data->max_vl);
+ if (ret != sve_data->max_vl) {
+ ksft_print_msg("Failed to set SVE VL %d: %d\n",
+ sve_data->max_vl, ret);
+ pass = false;
+ }
+
+ /*
+ * Spin for a bit with SM enabled to try to trigger another
+ * save/restore. We can't use syscalls without exiting
+ * streaming mode.
+ */
+ for (i = 0; i < 100000000; i++)
+ smstart_sm();
+
+ /*
+ * TODO: Verify that ZA was preserved over the VL change and
+ * spin.
+ */
+
+ /* Clean up after ourselves */
+ smstop();
+ ret = prctl(sve_data->prctl_set, sve_data->default_vl);
+ if (ret != sve_data->default_vl) {
+ ksft_print_msg("Failed to restore SVE VL %d: %d\n",
+ sve_data->default_vl, ret);
+ pass = false;
+ }
+
+ ksft_test_result(pass, "change_sve_with_za\n");
+}
+
+typedef void (*test_all_type)(void);
+
+static const struct {
+ const char *name;
+ test_all_type test;
+} all_types_tests[] = {
+ { "change_sve_with_za", change_sve_with_za },
+};
+
int main(void)
{
+ bool all_supported = true;
int i, j;
ksft_print_header();
- ksft_set_plan(ARRAY_SIZE(tests) * ARRAY_SIZE(vec_data));
+ ksft_set_plan(ARRAY_SIZE(tests) * ARRAY_SIZE(vec_data) +
+ ARRAY_SIZE(all_types_tests));
for (i = 0; i < ARRAY_SIZE(vec_data); i++) {
struct vec_data *data = &vec_data[i];
unsigned long supported;
supported = getauxval(data->hwcap_type) & data->hwcap;
+ if (!supported)
+ all_supported = false;
for (j = 0; j < ARRAY_SIZE(tests); j++) {
if (supported)
@@ -666,5 +778,12 @@ int main(void)
}
}
+ for (i = 0; i < ARRAY_SIZE(all_types_tests); i++) {
+ if (all_supported)
+ all_types_tests[i].test();
+ else
+ ksft_test_result_skip("%s\n", all_types_tests[i].name);
+ }
+
ksft_exit_pass();
}
diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.h b/tools/testing/selftests/arm64/signal/test_signals_utils.h
index 222093f51b67..762c8fe9c54a 100644
--- a/tools/testing/selftests/arm64/signal/test_signals_utils.h
+++ b/tools/testing/selftests/arm64/signal/test_signals_utils.h
@@ -8,6 +8,8 @@
#include <stdio.h>
#include <string.h>
+#include <linux/compiler.h>
+
#include "test_signals.h"
int test_init(struct tdescr *td);
@@ -60,13 +62,25 @@ static __always_inline bool get_current_context(struct tdescr *td,
size_t dest_sz)
{
static volatile bool seen_already;
+ int i;
+ char *uc = (char *)dest_uc;
assert(td && dest_uc);
/* it's a genuine invocation..reinit */
seen_already = 0;
td->live_uc_valid = 0;
td->live_sz = dest_sz;
- memset(dest_uc, 0x00, td->live_sz);
+
+ /*
+ * This is a memset() but we don't want the compiler to
+ * optimise it into either instructions or a library call
+ * which might be incompatible with streaming mode.
+ */
+ for (i = 0; i < td->live_sz; i++) {
+ uc[i] = 0;
+ OPTIMIZER_HIDE_VAR(uc[0]);
+ }
+
td->live_uc = dest_uc;
/*
* Grab ucontext_t triggering a SIGTRAP.
@@ -104,6 +118,17 @@ static __always_inline bool get_current_context(struct tdescr *td,
: "memory");
/*
+ * If we were grabbing a streaming mode context then we may
+ * have entered streaming mode behind the system's back and
+ * libc or compiler generated code might decide to do
+ * something invalid in streaming mode, or potentially even
+ * the state of ZA. Issue a SMSTOP to exit both now we have
+ * grabbed the state.
+ */
+ if (td->feats_supported & FEAT_SME)
+ asm volatile("msr S0_3_C4_C6_3, xzr");
+
+ /*
* If we get here with seen_already==1 it implies the td->live_uc
* context has been used to get back here....this probably means
* a test has failed to cause a SEGV...anyway live_uc does not
diff --git a/tools/testing/selftests/arm64/signal/testcases/zt_regs.c b/tools/testing/selftests/arm64/signal/testcases/zt_regs.c
index e1eb4d5c027a..2e384d731618 100644
--- a/tools/testing/selftests/arm64/signal/testcases/zt_regs.c
+++ b/tools/testing/selftests/arm64/signal/testcases/zt_regs.c
@@ -65,6 +65,7 @@ int zt_regs_run(struct tdescr *td, siginfo_t *si, ucontext_t *uc)
if (memcmp(zeros, (char *)zt + ZT_SIG_REGS_OFFSET,
ZT_SIG_REGS_SIZE(zt->nregs)) != 0) {
fprintf(stderr, "ZT data invalid\n");
+ free(zeros);
return 1;
}
diff --git a/tools/testing/selftests/cachestat/Makefile b/tools/testing/selftests/cachestat/Makefile
index fca73aaa7d14..778b54ebb036 100644
--- a/tools/testing/selftests/cachestat/Makefile
+++ b/tools/testing/selftests/cachestat/Makefile
@@ -3,6 +3,6 @@ TEST_GEN_PROGS := test_cachestat
CFLAGS += $(KHDR_INCLUDES)
CFLAGS += -Wall
-CFLAGS += -lrt
+LDLIBS += -lrt
include ../lib.mk
diff --git a/tools/testing/selftests/cachestat/test_cachestat.c b/tools/testing/selftests/cachestat/test_cachestat.c
index 54d09b820ed4..4804c7dc7b31 100644
--- a/tools/testing/selftests/cachestat/test_cachestat.c
+++ b/tools/testing/selftests/cachestat/test_cachestat.c
@@ -4,10 +4,12 @@
#include <stdio.h>
#include <stdbool.h>
#include <linux/kernel.h>
+#include <linux/magic.h>
#include <linux/mman.h>
#include <sys/mman.h>
#include <sys/shm.h>
#include <sys/syscall.h>
+#include <sys/vfs.h>
#include <unistd.h>
#include <string.h>
#include <fcntl.h>
@@ -15,11 +17,12 @@
#include "../kselftest.h"
+#define NR_TESTS 9
+
static const char * const dev_files[] = {
"/dev/zero", "/dev/null", "/dev/urandom",
"/proc/version", "/proc"
};
-static const int cachestat_nr = 451;
void print_cachestat(struct cachestat *cs)
{
@@ -91,19 +94,33 @@ out:
}
/*
+ * fsync() is implemented via noop_fsync() on tmpfs. This makes the fsync()
+ * test fail below, so we need to check for test file living on a tmpfs.
+ */
+static bool is_on_tmpfs(int fd)
+{
+ struct statfs statfs_buf;
+
+ if (fstatfs(fd, &statfs_buf))
+ return false;
+
+ return statfs_buf.f_type == TMPFS_MAGIC;
+}
+
+/*
* Open/create the file at filename, (optionally) write random data to it
* (exactly num_pages), then test the cachestat syscall on this file.
*
* If test_fsync == true, fsync the file, then check the number of dirty
* pages.
*/
-bool test_cachestat(const char *filename, bool write_random, bool create,
- bool test_fsync, unsigned long num_pages, int open_flags,
- mode_t open_mode)
+static int test_cachestat(const char *filename, bool write_random, bool create,
+ bool test_fsync, unsigned long num_pages,
+ int open_flags, mode_t open_mode)
{
size_t PS = sysconf(_SC_PAGESIZE);
int filesize = num_pages * PS;
- bool ret = true;
+ int ret = KSFT_PASS;
long syscall_ret;
struct cachestat cs;
struct cachestat_range cs_range = { 0, filesize };
@@ -112,7 +129,7 @@ bool test_cachestat(const char *filename, bool write_random, bool create,
if (fd == -1) {
ksft_print_msg("Unable to create/open file.\n");
- ret = false;
+ ret = KSFT_FAIL;
goto out;
} else {
ksft_print_msg("Create/open %s\n", filename);
@@ -121,18 +138,18 @@ bool test_cachestat(const char *filename, bool write_random, bool create,
if (write_random) {
if (!write_exactly(fd, filesize)) {
ksft_print_msg("Unable to access urandom.\n");
- ret = false;
+ ret = KSFT_FAIL;
goto out1;
}
}
- syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0);
+ syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0);
ksft_print_msg("Cachestat call returned %ld\n", syscall_ret);
if (syscall_ret) {
ksft_print_msg("Cachestat returned non-zero.\n");
- ret = false;
+ ret = KSFT_FAIL;
goto out1;
} else {
@@ -142,17 +159,19 @@ bool test_cachestat(const char *filename, bool write_random, bool create,
if (cs.nr_cache + cs.nr_evicted != num_pages) {
ksft_print_msg(
"Total number of cached and evicted pages is off.\n");
- ret = false;
+ ret = KSFT_FAIL;
}
}
}
if (test_fsync) {
- if (fsync(fd)) {
+ if (is_on_tmpfs(fd)) {
+ ret = KSFT_SKIP;
+ } else if (fsync(fd)) {
ksft_print_msg("fsync fails.\n");
- ret = false;
+ ret = KSFT_FAIL;
} else {
- syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0);
+ syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0);
ksft_print_msg("Cachestat call (after fsync) returned %ld\n",
syscall_ret);
@@ -161,13 +180,13 @@ bool test_cachestat(const char *filename, bool write_random, bool create,
print_cachestat(&cs);
if (cs.nr_dirty) {
- ret = false;
+ ret = KSFT_FAIL;
ksft_print_msg(
"Number of dirty should be zero after fsync.\n");
}
} else {
ksft_print_msg("Cachestat (after fsync) returned non-zero.\n");
- ret = false;
+ ret = KSFT_FAIL;
goto out1;
}
}
@@ -213,7 +232,7 @@ bool test_cachestat_shmem(void)
goto close_fd;
}
- syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0);
+ syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0);
if (syscall_ret) {
ksft_print_msg("Cachestat returned non-zero.\n");
@@ -236,13 +255,29 @@ out:
int main(void)
{
- int ret = 0;
+ int ret;
+
+ ksft_print_header();
+
+ ret = syscall(__NR_cachestat, -1, NULL, NULL, 0);
+ if (ret == -1 && errno == ENOSYS)
+ ksft_exit_skip("cachestat syscall not available\n");
+
+ ksft_set_plan(NR_TESTS);
+
+ if (ret == -1 && errno == EBADF) {
+ ksft_test_result_pass("bad file descriptor recognized\n");
+ ret = 0;
+ } else {
+ ksft_test_result_fail("bad file descriptor ignored\n");
+ ret = 1;
+ }
for (int i = 0; i < 5; i++) {
const char *dev_filename = dev_files[i];
if (test_cachestat(dev_filename, false, false, false,
- 4, O_RDONLY, 0400))
+ 4, O_RDONLY, 0400) == KSFT_PASS)
ksft_test_result_pass("cachestat works with %s\n", dev_filename);
else {
ksft_test_result_fail("cachestat fails with %s\n", dev_filename);
@@ -251,13 +286,27 @@ int main(void)
}
if (test_cachestat("tmpfilecachestat", true, true,
- true, 4, O_CREAT | O_RDWR, 0400 | 0600))
+ false, 4, O_CREAT | O_RDWR, 0600) == KSFT_PASS)
ksft_test_result_pass("cachestat works with a normal file\n");
else {
ksft_test_result_fail("cachestat fails with normal file\n");
ret = 1;
}
+ switch (test_cachestat("tmpfilecachestat", true, true,
+ true, 4, O_CREAT | O_RDWR, 0600)) {
+ case KSFT_FAIL:
+ ksft_test_result_fail("cachestat fsync fails with normal file\n");
+ ret = KSFT_FAIL;
+ break;
+ case KSFT_PASS:
+ ksft_test_result_pass("cachestat fsync works with a normal file\n");
+ break;
+ case KSFT_SKIP:
+ ksft_test_result_skip("tmpfilecachestat is on tmpfs\n");
+ break;
+ }
+
if (test_cachestat_shmem())
ksft_test_result_pass("cachestat works with a shmem file\n");
else {
diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c
index 1b2cec9d18a4..ed2e50bb1e76 100644
--- a/tools/testing/selftests/cgroup/test_kmem.c
+++ b/tools/testing/selftests/cgroup/test_kmem.c
@@ -75,11 +75,11 @@ static int test_kmem_basic(const char *root)
sleep(1);
slab1 = cg_read_key_long(cg, "memory.stat", "slab ");
- if (slab1 <= 0)
+ if (slab1 < 0)
goto cleanup;
current = cg_read_long(cg, "memory.current");
- if (current <= 0)
+ if (current < 0)
goto cleanup;
if (slab1 < slab0 / 2 && current < slab0 / 2)
diff --git a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
index 7b2d421f09cf..4917dbb35a44 100755
--- a/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
+++ b/tools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh
@@ -22,14 +22,12 @@ server_ip4=192.168.1.254
echo 180 >/proc/sys/kernel/panic
# build namespaces
-ip link add dev link1_1 type veth peer name link1_2
-
ip netns add "server"
-ip link set dev link1_2 netns server up name eth0
+ip netns add "client"
+ip -n client link add eth0 type veth peer name eth0 netns server
+ip netns exec server ip link set dev eth0 up
ip netns exec server ip addr add ${server_ip4}/24 dev eth0
-ip netns add "client"
-ip link set dev link1_1 netns client down name eth0
ip netns exec client ip link add dev bond0 down type bond mode 1 \
miimon 100 all_slaves_active 1
ip netns exec client ip link set dev eth0 down master bond0
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/.gitignore b/tools/testing/selftests/fchmodat2/.gitignore
index 24e27957efcc..82a4846cbc4b 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/.gitignore
+++ b/tools/testing/selftests/fchmodat2/.gitignore
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-srcu.c
+/*_test
diff --git a/tools/testing/selftests/fchmodat2/Makefile b/tools/testing/selftests/fchmodat2/Makefile
new file mode 100644
index 000000000000..20839f8e43f2
--- /dev/null
+++ b/tools/testing/selftests/fchmodat2/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined $(KHDR_INCLUDES)
+TEST_GEN_PROGS := fchmodat2_test
+
+include ../lib.mk
diff --git a/tools/testing/selftests/fchmodat2/fchmodat2_test.c b/tools/testing/selftests/fchmodat2/fchmodat2_test.c
new file mode 100644
index 000000000000..e0319417124d
--- /dev/null
+++ b/tools/testing/selftests/fchmodat2/fchmodat2_test.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <syscall.h>
+#include <unistd.h>
+
+#include "../kselftest.h"
+
+int sys_fchmodat2(int dfd, const char *filename, mode_t mode, int flags)
+{
+ int ret = syscall(__NR_fchmodat2, dfd, filename, mode, flags);
+
+ return ret >= 0 ? ret : -errno;
+}
+
+int setup_testdir(void)
+{
+ int dfd, ret;
+ char dirname[] = "/tmp/ksft-fchmodat2.XXXXXX";
+
+ /* Make the top-level directory. */
+ if (!mkdtemp(dirname))
+ ksft_exit_fail_msg("%s: failed to create tmpdir\n", __func__);
+
+ dfd = open(dirname, O_PATH | O_DIRECTORY);
+ if (dfd < 0)
+ ksft_exit_fail_msg("%s: failed to open tmpdir\n", __func__);
+
+ ret = openat(dfd, "regfile", O_CREAT | O_WRONLY | O_TRUNC, 0644);
+ if (ret < 0)
+ ksft_exit_fail_msg("%s: failed to create file in tmpdir\n",
+ __func__);
+ close(ret);
+
+ ret = symlinkat("regfile", dfd, "symlink");
+ if (ret < 0)
+ ksft_exit_fail_msg("%s: failed to create symlink in tmpdir\n",
+ __func__);
+
+ return dfd;
+}
+
+int expect_mode(int dfd, const char *filename, mode_t expect_mode)
+{
+ struct stat st;
+ int ret = fstatat(dfd, filename, &st, AT_SYMLINK_NOFOLLOW);
+
+ if (ret)
+ ksft_exit_fail_msg("%s: %s: fstatat failed\n",
+ __func__, filename);
+
+ return (st.st_mode == expect_mode);
+}
+
+void test_regfile(void)
+{
+ int dfd, ret;
+
+ dfd = setup_testdir();
+
+ ret = sys_fchmodat2(dfd, "regfile", 0640, 0);
+
+ if (ret < 0)
+ ksft_exit_fail_msg("%s: fchmodat2(noflag) failed\n", __func__);
+
+ if (!expect_mode(dfd, "regfile", 0100640))
+ ksft_exit_fail_msg("%s: wrong file mode bits after fchmodat2\n",
+ __func__);
+
+ ret = sys_fchmodat2(dfd, "regfile", 0600, AT_SYMLINK_NOFOLLOW);
+
+ if (ret < 0)
+ ksft_exit_fail_msg("%s: fchmodat2(AT_SYMLINK_NOFOLLOW) failed\n",
+ __func__);
+
+ if (!expect_mode(dfd, "regfile", 0100600))
+ ksft_exit_fail_msg("%s: wrong file mode bits after fchmodat2 with nofollow\n",
+ __func__);
+
+ ksft_test_result_pass("fchmodat2(regfile)\n");
+}
+
+void test_symlink(void)
+{
+ int dfd, ret;
+
+ dfd = setup_testdir();
+
+ ret = sys_fchmodat2(dfd, "symlink", 0640, 0);
+
+ if (ret < 0)
+ ksft_exit_fail_msg("%s: fchmodat2(noflag) failed\n", __func__);
+
+ if (!expect_mode(dfd, "regfile", 0100640))
+ ksft_exit_fail_msg("%s: wrong file mode bits after fchmodat2\n",
+ __func__);
+
+ if (!expect_mode(dfd, "symlink", 0120777))
+ ksft_exit_fail_msg("%s: wrong symlink mode bits after fchmodat2\n",
+ __func__);
+
+ ret = sys_fchmodat2(dfd, "symlink", 0600, AT_SYMLINK_NOFOLLOW);
+
+ /*
+ * On certain filesystems (xfs or btrfs), chmod operation fails. So we
+ * first check the symlink target but if the operation fails we mark the
+ * test as skipped.
+ *
+ * https://sourceware.org/legacy-ml/libc-alpha/2020-02/msg00467.html
+ */
+ if (ret == 0 && !expect_mode(dfd, "symlink", 0120600))
+ ksft_exit_fail_msg("%s: wrong symlink mode bits after fchmodat2 with nofollow\n",
+ __func__);
+
+ if (!expect_mode(dfd, "regfile", 0100640))
+ ksft_exit_fail_msg("%s: wrong file mode bits after fchmodat2 with nofollow\n",
+ __func__);
+
+ if (ret != 0)
+ ksft_test_result_skip("fchmodat2(symlink)\n");
+ else
+ ksft_test_result_pass("fchmodat2(symlink)\n");
+}
+
+#define NUM_TESTS 2
+
+int main(int argc, char **argv)
+{
+ ksft_print_header();
+ ksft_set_plan(NUM_TESTS);
+
+ test_regfile();
+ test_symlink();
+
+ if (ksft_get_fail_cnt() + ksft_get_error_cnt() > 0)
+ ksft_exit_fail();
+ else
+ ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/filelock/Makefile b/tools/testing/selftests/filelock/Makefile
new file mode 100644
index 000000000000..478e82f8b464
--- /dev/null
+++ b/tools/testing/selftests/filelock/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+TEST_GEN_PROGS := ofdlocks
+
+include ../lib.mk
diff --git a/tools/testing/selftests/filelock/ofdlocks.c b/tools/testing/selftests/filelock/ofdlocks.c
new file mode 100644
index 000000000000..a55b79810ab2
--- /dev/null
+++ b/tools/testing/selftests/filelock/ofdlocks.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <assert.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include "../kselftest.h"
+
+static int lock_set(int fd, struct flock *fl)
+{
+ int ret;
+
+ fl->l_pid = 0; // needed for OFD locks
+ fl->l_whence = SEEK_SET;
+ ret = fcntl(fd, F_OFD_SETLK, fl);
+ if (ret)
+ perror("fcntl()");
+ return ret;
+}
+
+static int lock_get(int fd, struct flock *fl)
+{
+ int ret;
+
+ fl->l_pid = 0; // needed for OFD locks
+ fl->l_whence = SEEK_SET;
+ ret = fcntl(fd, F_OFD_GETLK, fl);
+ if (ret)
+ perror("fcntl()");
+ return ret;
+}
+
+int main(void)
+{
+ int rc;
+ struct flock fl, fl2;
+ int fd = open("/tmp/aa", O_RDWR | O_CREAT | O_EXCL, 0600);
+ int fd2 = open("/tmp/aa", O_RDONLY);
+
+ unlink("/tmp/aa");
+ assert(fd != -1);
+ assert(fd2 != -1);
+ ksft_print_msg("[INFO] opened fds %i %i\n", fd, fd2);
+
+ /* Set some read lock */
+ fl.l_type = F_RDLCK;
+ fl.l_start = 5;
+ fl.l_len = 3;
+ rc = lock_set(fd, &fl);
+ if (rc == 0) {
+ ksft_print_msg
+ ("[SUCCESS] set OFD read lock on first fd\n");
+ } else {
+ ksft_print_msg("[FAIL] to set OFD read lock on first fd\n");
+ return -1;
+ }
+ /* Make sure read locks do not conflict on different fds. */
+ fl.l_type = F_RDLCK;
+ fl.l_start = 5;
+ fl.l_len = 1;
+ rc = lock_get(fd2, &fl);
+ if (rc != 0)
+ return -1;
+ if (fl.l_type != F_UNLCK) {
+ ksft_print_msg("[FAIL] read locks conflicted\n");
+ return -1;
+ }
+ /* Make sure read/write locks do conflict on different fds. */
+ fl.l_type = F_WRLCK;
+ fl.l_start = 5;
+ fl.l_len = 1;
+ rc = lock_get(fd2, &fl);
+ if (rc != 0)
+ return -1;
+ if (fl.l_type != F_UNLCK) {
+ ksft_print_msg
+ ("[SUCCESS] read and write locks conflicted\n");
+ } else {
+ ksft_print_msg
+ ("[SUCCESS] read and write locks not conflicted\n");
+ return -1;
+ }
+ /* Get info about the lock on first fd. */
+ fl.l_type = F_UNLCK;
+ fl.l_start = 5;
+ fl.l_len = 1;
+ rc = lock_get(fd, &fl);
+ if (rc != 0) {
+ ksft_print_msg
+ ("[FAIL] F_OFD_GETLK with F_UNLCK not supported\n");
+ return -1;
+ }
+ if (fl.l_type != F_UNLCK) {
+ ksft_print_msg
+ ("[SUCCESS] F_UNLCK test returns: locked, type %i pid %i len %zi\n",
+ fl.l_type, fl.l_pid, fl.l_len);
+ } else {
+ ksft_print_msg
+ ("[FAIL] F_OFD_GETLK with F_UNLCK did not return lock info\n");
+ return -1;
+ }
+ /* Try the same but by locking everything by len==0. */
+ fl2.l_type = F_UNLCK;
+ fl2.l_start = 0;
+ fl2.l_len = 0;
+ rc = lock_get(fd, &fl2);
+ if (rc != 0) {
+ ksft_print_msg
+ ("[FAIL] F_OFD_GETLK with F_UNLCK not supported\n");
+ return -1;
+ }
+ if (memcmp(&fl, &fl2, sizeof(fl))) {
+ ksft_print_msg
+ ("[FAIL] F_UNLCK test returns: locked, type %i pid %i len %zi\n",
+ fl.l_type, fl.l_pid, fl.l_len);
+ return -1;
+ }
+ ksft_print_msg("[SUCCESS] F_UNLCK with len==0 returned the same\n");
+ /* Get info about the lock on second fd - no locks on it. */
+ fl.l_type = F_UNLCK;
+ fl.l_start = 0;
+ fl.l_len = 0;
+ lock_get(fd2, &fl);
+ if (fl.l_type != F_UNLCK) {
+ ksft_print_msg
+ ("[FAIL] F_OFD_GETLK with F_UNLCK return lock info from another fd\n");
+ return -1;
+ }
+ return 0;
+}
diff --git a/tools/testing/selftests/filesystems/fat/run_fat_tests.sh b/tools/testing/selftests/filesystems/fat/run_fat_tests.sh
index 7f35dc3d15df..d61264d4795d 100755
--- a/tools/testing/selftests/filesystems/fat/run_fat_tests.sh
+++ b/tools/testing/selftests/filesystems/fat/run_fat_tests.sh
@@ -12,7 +12,7 @@ set -u
set -o pipefail
BASE_DIR="$(dirname $0)"
-TMP_DIR="$(mktemp -d /tmp/fat_tests_tmp.XXXX)"
+TMP_DIR="$(mktemp -d /tmp/fat_tests_tmp.XXXXXX)"
IMG_PATH="${TMP_DIR}/fat.img"
MNT_PATH="${TMP_DIR}/mnt"
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot1.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot1.tc
new file mode 100644
index 000000000000..63b76cf2a360
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot1.tc
@@ -0,0 +1,31 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Snapshot and tracing_cpumask
+# requires: trace_marker tracing_cpumask snapshot
+# flags: instance
+
+# This testcase is constrived to reproduce a problem that the cpu buffers
+# become unavailable which is due to 'record_disabled' of array_buffer and
+# max_buffer being messed up.
+
+# Store origin cpumask
+ORIG_CPUMASK=`cat tracing_cpumask`
+
+# Stop tracing all cpu
+echo 0 > tracing_cpumask
+
+# Take a snapshot of the main buffer
+echo 1 > snapshot
+
+# Restore origin cpumask, note that there should be some cpus being traced
+echo ${ORIG_CPUMASK} > tracing_cpumask
+
+# Set tracing on
+echo 1 > tracing_on
+
+# Write a log into buffer
+echo "test input 1" > trace_marker
+
+# Ensure the log writed so that cpu buffers are still available
+grep -q "test input 1" trace
+exit 0
diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
index 3651ce17beeb..d183f878360b 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
@@ -24,6 +24,7 @@
static long timeout_ns = 100000; /* 100us default timeout */
static futex_t futex_pi;
+static pthread_barrier_t barrier;
void usage(char *prog)
{
@@ -48,6 +49,8 @@ void *get_pi_lock(void *arg)
if (ret != 0)
error("futex_lock_pi failed\n", ret);
+ pthread_barrier_wait(&barrier);
+
/* Blocks forever */
ret = futex_wait(&lock, 0, NULL, 0);
error("futex_wait failed\n", ret);
@@ -130,6 +133,7 @@ int main(int argc, char *argv[])
basename(argv[0]));
ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
+ pthread_barrier_init(&barrier, NULL, 2);
pthread_create(&thread, NULL, get_pi_lock, NULL);
/* initialize relative timeout */
@@ -163,6 +167,9 @@ int main(int argc, char *argv[])
res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, 0);
test_timeout(res, &ret, "futex_wait_requeue_pi monotonic", ETIMEDOUT);
+ /* Wait until the other thread calls futex_lock_pi() */
+ pthread_barrier_wait(&barrier);
+ pthread_barrier_destroy(&barrier);
/*
* FUTEX_LOCK_PI with CLOCK_REALTIME
* Due to historical reasons, FUTEX_LOCK_PI supports only realtime
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index 5fd49ad0c696..e05ac8261046 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -938,7 +938,11 @@ void __wait_for_test(struct __test_metadata *t)
fprintf(TH_LOG_STREAM,
"# %s: Test terminated by timeout\n", t->name);
} else if (WIFEXITED(status)) {
- if (t->termsig != -1) {
+ if (WEXITSTATUS(status) == 255) {
+ /* SKIP */
+ t->passed = 1;
+ t->skip = 1;
+ } else if (t->termsig != -1) {
t->passed = 0;
fprintf(TH_LOG_STREAM,
"# %s: Test exited normally instead of by signal (code: %d)\n",
@@ -950,11 +954,6 @@ void __wait_for_test(struct __test_metadata *t)
case 0:
t->passed = 1;
break;
- /* SKIP */
- case 255:
- t->passed = 1;
- t->skip = 1;
- break;
/* Other failure, assume step report. */
default:
t->passed = 0;
diff --git a/tools/testing/selftests/mm/hmm-tests.c b/tools/testing/selftests/mm/hmm-tests.c
index 4adaad1b822f..20294553a5dd 100644
--- a/tools/testing/selftests/mm/hmm-tests.c
+++ b/tools/testing/selftests/mm/hmm-tests.c
@@ -57,9 +57,14 @@ enum {
#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
/* Just the flags we need, copied from mm.h: */
+
+#ifndef FOLL_WRITE
#define FOLL_WRITE 0x01 /* check pte is writable */
-#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite */
+#endif
+#ifndef FOLL_LONGTERM
+#define FOLL_LONGTERM 0x100 /* mapping lifetime is indefinite */
+#endif
FIXTURE(hmm)
{
int fd;
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index cd3cc52c59b4..8da562a9ae87 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -51,3 +51,4 @@ CONFIG_AMT=m
CONFIG_VXLAN=m
CONFIG_IP_SCTP=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_CRYPTO_ARIA=y
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 4b63708c6a81..297d972558fb 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -30,12 +30,15 @@ static int fips_enabled;
struct tls_crypto_info_keys {
union {
+ struct tls_crypto_info crypto_info;
struct tls12_crypto_info_aes_gcm_128 aes128;
struct tls12_crypto_info_chacha20_poly1305 chacha20;
struct tls12_crypto_info_sm4_gcm sm4gcm;
struct tls12_crypto_info_sm4_ccm sm4ccm;
struct tls12_crypto_info_aes_ccm_128 aesccm128;
struct tls12_crypto_info_aes_gcm_256 aesgcm256;
+ struct tls12_crypto_info_aria_gcm_128 ariagcm128;
+ struct tls12_crypto_info_aria_gcm_256 ariagcm256;
};
size_t len;
};
@@ -76,6 +79,16 @@ static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type,
tls12->aesgcm256.info.version = tls_version;
tls12->aesgcm256.info.cipher_type = cipher_type;
break;
+ case TLS_CIPHER_ARIA_GCM_128:
+ tls12->len = sizeof(struct tls12_crypto_info_aria_gcm_128);
+ tls12->ariagcm128.info.version = tls_version;
+ tls12->ariagcm128.info.cipher_type = cipher_type;
+ break;
+ case TLS_CIPHER_ARIA_GCM_256:
+ tls12->len = sizeof(struct tls12_crypto_info_aria_gcm_256);
+ tls12->ariagcm256.info.version = tls_version;
+ tls12->ariagcm256.info.cipher_type = cipher_type;
+ break;
default:
break;
}
@@ -228,6 +241,31 @@ TEST_F(tls_basic, base_base)
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
};
+TEST_F(tls_basic, bad_cipher)
+{
+ struct tls_crypto_info_keys tls12;
+
+ tls12.crypto_info.version = 200;
+ tls12.crypto_info.cipher_type = TLS_CIPHER_AES_GCM_128;
+ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, sizeof(struct tls12_crypto_info_aes_gcm_128)), -1);
+
+ tls12.crypto_info.version = TLS_1_2_VERSION;
+ tls12.crypto_info.cipher_type = 50;
+ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, sizeof(struct tls12_crypto_info_aes_gcm_128)), -1);
+
+ tls12.crypto_info.version = TLS_1_2_VERSION;
+ tls12.crypto_info.cipher_type = 59;
+ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, sizeof(struct tls12_crypto_info_aes_gcm_128)), -1);
+
+ tls12.crypto_info.version = TLS_1_2_VERSION;
+ tls12.crypto_info.cipher_type = 10;
+ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, sizeof(struct tls12_crypto_info_aes_gcm_128)), -1);
+
+ tls12.crypto_info.version = TLS_1_2_VERSION;
+ tls12.crypto_info.cipher_type = 70;
+ EXPECT_EQ(setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, sizeof(struct tls12_crypto_info_aes_gcm_128)), -1);
+}
+
FIXTURE(tls)
{
int fd, cfd;
@@ -312,6 +350,18 @@ FIXTURE_VARIANT_ADD(tls, 13_nopad)
.nopad = true,
};
+FIXTURE_VARIANT_ADD(tls, 12_aria_gcm)
+{
+ .tls_version = TLS_1_2_VERSION,
+ .cipher_type = TLS_CIPHER_ARIA_GCM_128,
+};
+
+FIXTURE_VARIANT_ADD(tls, 12_aria_gcm_256)
+{
+ .tls_version = TLS_1_2_VERSION,
+ .cipher_type = TLS_CIPHER_ARIA_GCM_256,
+};
+
FIXTURE_SETUP(tls)
{
struct tls_crypto_info_keys tls12;
@@ -1472,6 +1522,40 @@ TEST_F(tls, shutdown_reuse)
EXPECT_EQ(errno, EISCONN);
}
+TEST_F(tls, getsockopt)
+{
+ struct tls_crypto_info_keys expect, get;
+ socklen_t len;
+
+ /* get only the version/cipher */
+ len = sizeof(struct tls_crypto_info);
+ memrnd(&get, sizeof(get));
+ EXPECT_EQ(getsockopt(self->fd, SOL_TLS, TLS_TX, &get, &len), 0);
+ EXPECT_EQ(len, sizeof(struct tls_crypto_info));
+ EXPECT_EQ(get.crypto_info.version, variant->tls_version);
+ EXPECT_EQ(get.crypto_info.cipher_type, variant->cipher_type);
+
+ /* get the full crypto_info */
+ tls_crypto_info_init(variant->tls_version, variant->cipher_type, &expect);
+ len = expect.len;
+ memrnd(&get, sizeof(get));
+ EXPECT_EQ(getsockopt(self->fd, SOL_TLS, TLS_TX, &get, &len), 0);
+ EXPECT_EQ(len, expect.len);
+ EXPECT_EQ(get.crypto_info.version, variant->tls_version);
+ EXPECT_EQ(get.crypto_info.cipher_type, variant->cipher_type);
+ EXPECT_EQ(memcmp(&get, &expect, expect.len), 0);
+
+ /* short get should fail */
+ len = sizeof(struct tls_crypto_info) - 1;
+ EXPECT_EQ(getsockopt(self->fd, SOL_TLS, TLS_TX, &get, &len), -1);
+ EXPECT_EQ(errno, EINVAL);
+
+ /* partial get of the cipher data should fail */
+ len = expect.len - 1;
+ EXPECT_EQ(getsockopt(self->fd, SOL_TLS, TLS_TX, &get, &len), -1);
+ EXPECT_EQ(errno, EINVAL);
+}
+
FIXTURE(tls_err)
{
int fd, cfd;
diff --git a/tools/testing/selftests/nolibc/Makefile b/tools/testing/selftests/nolibc/Makefile
index 1b7b3c82f8ad..dfe66776a331 100644
--- a/tools/testing/selftests/nolibc/Makefile
+++ b/tools/testing/selftests/nolibc/Makefile
@@ -14,6 +14,31 @@ include $(srctree)/scripts/subarch.include
ARCH = $(SUBARCH)
endif
+# XARCH extends the kernel's ARCH with a few variants of the same
+# architecture that only differ by the configuration, the toolchain
+# and the Qemu program used. It is copied as-is into ARCH except for
+# a few specific values which are mapped like this:
+#
+# XARCH | ARCH | config
+# -------------|-----------|-------------------------
+# ppc | powerpc | 32 bits
+# ppc64 | powerpc | 64 bits big endian
+# ppc64le | powerpc | 64 bits little endian
+#
+# It is recommended to only use XARCH, though it does not harm if
+# ARCH is already set. For simplicity, ARCH is sufficient for all
+# architectures where both are equal.
+
+# configure default variants for target kernel supported architectures
+XARCH_powerpc = ppc
+XARCH = $(or $(XARCH_$(ARCH)),$(ARCH))
+
+# map from user input variants to their kernel supported architectures
+ARCH_ppc = powerpc
+ARCH_ppc64 = powerpc
+ARCH_ppc64le = powerpc
+ARCH := $(or $(ARCH_$(XARCH)),$(XARCH))
+
# kernel image names by architecture
IMAGE_i386 = arch/x86/boot/bzImage
IMAGE_x86_64 = arch/x86/boot/bzImage
@@ -21,10 +46,13 @@ IMAGE_x86 = arch/x86/boot/bzImage
IMAGE_arm64 = arch/arm64/boot/Image
IMAGE_arm = arch/arm/boot/zImage
IMAGE_mips = vmlinuz
+IMAGE_ppc = vmlinux
+IMAGE_ppc64 = vmlinux
+IMAGE_ppc64le = arch/powerpc/boot/zImage
IMAGE_riscv = arch/riscv/boot/Image
IMAGE_s390 = arch/s390/boot/bzImage
IMAGE_loongarch = arch/loongarch/boot/vmlinuz.efi
-IMAGE = $(IMAGE_$(ARCH))
+IMAGE = $(IMAGE_$(XARCH))
IMAGE_NAME = $(notdir $(IMAGE))
# default kernel configurations that appear to be usable
@@ -34,10 +62,13 @@ DEFCONFIG_x86 = defconfig
DEFCONFIG_arm64 = defconfig
DEFCONFIG_arm = multi_v7_defconfig
DEFCONFIG_mips = malta_defconfig
+DEFCONFIG_ppc = pmac32_defconfig
+DEFCONFIG_ppc64 = powernv_be_defconfig
+DEFCONFIG_ppc64le = powernv_defconfig
DEFCONFIG_riscv = defconfig
DEFCONFIG_s390 = defconfig
DEFCONFIG_loongarch = defconfig
-DEFCONFIG = $(DEFCONFIG_$(ARCH))
+DEFCONFIG = $(DEFCONFIG_$(XARCH))
# optional tests to run (default = all)
TEST =
@@ -49,10 +80,13 @@ QEMU_ARCH_x86 = x86_64
QEMU_ARCH_arm64 = aarch64
QEMU_ARCH_arm = arm
QEMU_ARCH_mips = mipsel # works with malta_defconfig
+QEMU_ARCH_ppc = ppc
+QEMU_ARCH_ppc64 = ppc64
+QEMU_ARCH_ppc64le = ppc64le
QEMU_ARCH_riscv = riscv64
QEMU_ARCH_s390 = s390x
QEMU_ARCH_loongarch = loongarch64
-QEMU_ARCH = $(QEMU_ARCH_$(ARCH))
+QEMU_ARCH = $(QEMU_ARCH_$(XARCH))
# QEMU_ARGS : some arch-specific args to pass to qemu
QEMU_ARGS_i386 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(TEST:%=NOLIBC_TEST=%)"
@@ -61,10 +95,13 @@ QEMU_ARGS_x86 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(
QEMU_ARGS_arm64 = -M virt -cpu cortex-a53 -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_arm = -M virt -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_mips = -M malta -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_ppc = -M g3beige -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_ppc64 = -M powernv -append "console=hvc0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_ppc64le = -M powernv -append "console=hvc0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_riscv = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_s390 = -M s390-ccw-virtio -m 1G -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_loongarch = -M virt -append "console=ttyS0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
-QEMU_ARGS = $(QEMU_ARGS_$(ARCH)) $(QEMU_ARGS_EXTRA)
+QEMU_ARGS = $(QEMU_ARGS_$(XARCH)) $(QEMU_ARGS_EXTRA)
# OUTPUT is only set when run from the main makefile, otherwise
# it defaults to this nolibc directory.
@@ -76,13 +113,21 @@ else
Q=@
endif
+CFLAGS_ppc = -m32 -mbig-endian -mno-vsx $(call cc-option,-mmultiple)
+CFLAGS_ppc64 = -m64 -mbig-endian -mno-vsx $(call cc-option,-mmultiple)
+CFLAGS_ppc64le = -m64 -mlittle-endian -mno-vsx $(call cc-option,-mabi=elfv2)
CFLAGS_s390 = -m64
CFLAGS_mips = -EL
CFLAGS_STACKPROTECTOR ?= $(call cc-option,-mstack-protector-guard=global $(call cc-option,-fstack-protector-all))
-CFLAGS ?= -Os -fno-ident -fno-asynchronous-unwind-tables -std=c89 \
+CFLAGS ?= -Os -fno-ident -fno-asynchronous-unwind-tables -std=c89 -W -Wall -Wextra \
$(call cc-option,-fno-stack-protector) \
- $(CFLAGS_$(ARCH)) $(CFLAGS_STACKPROTECTOR)
-LDFLAGS := -s
+ $(CFLAGS_$(XARCH)) $(CFLAGS_STACKPROTECTOR)
+LDFLAGS :=
+
+REPORT ?= awk '/\[OK\][\r]*$$/{p++} /\[FAIL\][\r]*$$/{if (!f) printf("\n"); f++; print;} /\[SKIPPED\][\r]*$$/{s++} \
+ END{ printf("\n%3d test(s): %3d passed, %3d skipped, %3d failed => status: ", p+s+f, p, s, f); \
+ if (f) printf("failure\n"); else if (s) printf("warning\n"); else printf("success\n");; \
+ printf("\nSee all results in %s\n", ARGV[1]); }'
help:
@echo "Supported targets under selftests/nolibc:"
@@ -91,24 +136,25 @@ help:
@echo " sysroot create the nolibc sysroot here (uses \$$ARCH)"
@echo " nolibc-test build the executable (uses \$$CC and \$$CROSS_COMPILE)"
@echo " libc-test build an executable using the compiler's default libc instead"
- @echo " run-user runs the executable under QEMU (uses \$$ARCH, \$$TEST)"
+ @echo " run-user runs the executable under QEMU (uses \$$XARCH, \$$TEST)"
@echo " initramfs prepare the initramfs with nolibc-test"
- @echo " defconfig create a fresh new default config (uses \$$ARCH)"
- @echo " kernel (re)build the kernel with the initramfs (uses \$$ARCH)"
- @echo " run runs the kernel in QEMU after building it (uses \$$ARCH, \$$TEST)"
- @echo " rerun runs a previously prebuilt kernel in QEMU (uses \$$ARCH, \$$TEST)"
+ @echo " defconfig create a fresh new default config (uses \$$XARCH)"
+ @echo " kernel (re)build the kernel with the initramfs (uses \$$XARCH)"
+ @echo " run runs the kernel in QEMU after building it (uses \$$XARCH, \$$TEST)"
+ @echo " rerun runs a previously prebuilt kernel in QEMU (uses \$$XARCH, \$$TEST)"
@echo " clean clean the sysroot, initramfs, build and output files"
@echo ""
@echo "The output file is \"run.out\". Test ranges may be passed using \$$TEST."
@echo ""
@echo "Currently using the following variables:"
@echo " ARCH = $(ARCH)"
+ @echo " XARCH = $(XARCH)"
@echo " CROSS_COMPILE = $(CROSS_COMPILE)"
@echo " CC = $(CC)"
@echo " OUTPUT = $(OUTPUT)"
@echo " TEST = $(TEST)"
- @echo " QEMU_ARCH = $(if $(QEMU_ARCH),$(QEMU_ARCH),UNKNOWN_ARCH) [determined from \$$ARCH]"
- @echo " IMAGE_NAME = $(if $(IMAGE_NAME),$(IMAGE_NAME),UNKNOWN_ARCH) [determined from \$$ARCH]"
+ @echo " QEMU_ARCH = $(if $(QEMU_ARCH),$(QEMU_ARCH),UNKNOWN_ARCH) [determined from \$$XARCH]"
+ @echo " IMAGE_NAME = $(if $(IMAGE_NAME),$(IMAGE_NAME),UNKNOWN_ARCH) [determined from \$$XARCH]"
@echo ""
all: run
@@ -121,20 +167,33 @@ sysroot/$(ARCH)/include:
$(Q)$(MAKE) -C ../../../include/nolibc ARCH=$(ARCH) OUTPUT=$(CURDIR)/sysroot/ headers_standalone
$(Q)mv sysroot/sysroot sysroot/$(ARCH)
+ifneq ($(NOLIBC_SYSROOT),0)
nolibc-test: nolibc-test.c sysroot/$(ARCH)/include
$(QUIET_CC)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ \
-nostdlib -static -Isysroot/$(ARCH)/include $< -lgcc
+else
+nolibc-test: nolibc-test.c
+ $(QUIET_CC)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ \
+ -nostdlib -static -include ../../../include/nolibc/nolibc.h $< -lgcc
+endif
libc-test: nolibc-test.c
- $(QUIET_CC)$(CC) -o $@ $<
+ $(QUIET_CC)$(HOSTCC) -o $@ $<
+
+# local libc-test
+run-libc-test: libc-test
+ $(Q)./libc-test > "$(CURDIR)/run.out" || :
+ $(Q)$(REPORT) $(CURDIR)/run.out
+
+# local nolibc-test
+run-nolibc-test: nolibc-test
+ $(Q)./nolibc-test > "$(CURDIR)/run.out" || :
+ $(Q)$(REPORT) $(CURDIR)/run.out
# qemu user-land test
run-user: nolibc-test
$(Q)qemu-$(QEMU_ARCH) ./nolibc-test > "$(CURDIR)/run.out" || :
- $(Q)awk '/\[OK\][\r]*$$/{p++} /\[FAIL\][\r]*$$/{f++} /\[SKIPPED\][\r]*$$/{s++} \
- END{ printf("%d test(s) passed, %d skipped, %d failed.", p, s, f); \
- if (s+f > 0) printf(" See all results in %s\n", ARGV[1]); else print; }' \
- $(CURDIR)/run.out
+ $(Q)$(REPORT) $(CURDIR)/run.out
initramfs: nolibc-test
$(QUIET_MKDIR)mkdir -p initramfs
@@ -150,18 +209,16 @@ kernel: initramfs
# run the tests after building the kernel
run: kernel
$(Q)qemu-system-$(QEMU_ARCH) -display none -no-reboot -kernel "$(srctree)/$(IMAGE)" -serial stdio $(QEMU_ARGS) > "$(CURDIR)/run.out"
- $(Q)awk '/\[OK\][\r]*$$/{p++} /\[FAIL\][\r]*$$/{f++} /\[SKIPPED\][\r]*$$/{s++} \
- END{ printf("%d test(s) passed, %d skipped, %d failed.", p, s, f); \
- if (s+f > 0) printf(" See all results in %s\n", ARGV[1]); else print; }' \
- $(CURDIR)/run.out
+ $(Q)$(REPORT) $(CURDIR)/run.out
# re-run the tests from an existing kernel
rerun:
$(Q)qemu-system-$(QEMU_ARCH) -display none -no-reboot -kernel "$(srctree)/$(IMAGE)" -serial stdio $(QEMU_ARGS) > "$(CURDIR)/run.out"
- $(Q)awk '/\[OK\][\r]*$$/{p++} /\[FAIL\][\r]*$$/{f++} /\[SKIPPED\][\r]*$$/{s++} \
- END{ printf("%d test(s) passed, %d skipped, %d failed.", p, s, f); \
- if (s+f > 0) printf(" See all results in %s\n", ARGV[1]); else print; }' \
- $(CURDIR)/run.out
+ $(Q)$(REPORT) $(CURDIR)/run.out
+
+# report with existing test log
+report:
+ $(Q)$(REPORT) $(CURDIR)/run.out
clean:
$(call QUIET_CLEAN, sysroot)
diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
index 486334981e60..fb3bf91462e2 100644
--- a/tools/testing/selftests/nolibc/nolibc-test.c
+++ b/tools/testing/selftests/nolibc/nolibc-test.c
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#define _GNU_SOURCE
+#define _LARGEFILE64_SOURCE
/* libc-specific include files
* The program may be built in 3 ways:
@@ -14,7 +15,7 @@
#include <string.h>
#ifndef _NOLIBC_STDIO_H
/* standard libcs need more includes */
-#include <linux/reboot.h>
+#include <sys/auxv.h>
#include <sys/io.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
@@ -40,8 +41,21 @@
#endif
#endif
-/* will be used by nolibc by getenv() */
-char **environ;
+/* for the type of int_fast16_t and int_fast32_t, musl differs from glibc and nolibc */
+#define SINT_MAX_OF_TYPE(type) (((type)1 << (sizeof(type) * 8 - 2)) - (type)1 + ((type)1 << (sizeof(type) * 8 - 2)))
+#define SINT_MIN_OF_TYPE(type) (-SINT_MAX_OF_TYPE(type) - 1)
+
+/* will be used to test initialization of environ */
+static char **test_envp;
+
+/* will be used to test initialization of argv */
+static char **test_argv;
+
+/* will be used to test initialization of argc */
+static int test_argc;
+
+/* will be used by some test cases as readable file, please don't write it */
+static const char *argv0;
/* definition of a series of tests */
struct test {
@@ -66,7 +80,7 @@ char *itoa(int i)
/* returns the error name (e.g. "ENOENT") for common errors, "SUCCESS" for 0,
* or the decimal value for less common ones.
*/
-const char *errorname(int err)
+static const char *errorname(int err)
{
switch (err) {
case 0: return "SUCCESS";
@@ -120,17 +134,26 @@ static void putcharn(char c, size_t n)
fputs(buf, stdout);
}
-static int pad_spc(int llen, int cnt, const char *fmt, ...)
-{
- va_list args;
- int ret;
-
- putcharn(' ', cnt - llen);
+enum RESULT {
+ OK,
+ FAIL,
+ SKIPPED,
+};
- va_start(args, fmt);
- ret = vfprintf(stdout, fmt, args);
- va_end(args);
- return ret < 0 ? ret : ret + cnt - llen;
+static void result(int llen, enum RESULT r)
+{
+ const char *msg;
+
+ if (r == OK)
+ msg = " [OK]";
+ else if (r == SKIPPED)
+ msg = "[SKIPPED]";
+ else
+ msg = "[FAIL]";
+
+ if (llen < 64)
+ putcharn(' ', 64 - llen);
+ puts(msg);
}
/* The tests below are intended to be used by the macroes, which evaluate
@@ -140,173 +163,185 @@ static int pad_spc(int llen, int cnt, const char *fmt, ...)
*/
#define EXPECT_ZR(cond, expr) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_zr(expr, llen); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_zr(expr, llen); } while (0)
-static int expect_zr(int expr, int llen)
+static __attribute__((unused))
+int expect_zr(int expr, int llen)
{
int ret = !(expr == 0);
llen += printf(" = %d ", expr);
- pad_spc(llen, 64, ret ? "[FAIL]\n" : " [OK]\n");
+ result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_NZ(cond, expr, val) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_nz(expr, llen; } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_nz(expr, llen; } while (0)
-static int expect_nz(int expr, int llen)
+static __attribute__((unused))
+int expect_nz(int expr, int llen)
{
int ret = !(expr != 0);
llen += printf(" = %d ", expr);
- pad_spc(llen, 64, ret ? "[FAIL]\n" : " [OK]\n");
+ result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_EQ(cond, expr, val) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_eq(expr, llen, val); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_eq(expr, llen, val); } while (0)
-static int expect_eq(uint64_t expr, int llen, uint64_t val)
+static __attribute__((unused))
+int expect_eq(uint64_t expr, int llen, uint64_t val)
{
int ret = !(expr == val);
llen += printf(" = %lld ", (long long)expr);
- pad_spc(llen, 64, ret ? "[FAIL]\n" : " [OK]\n");
+ result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_NE(cond, expr, val) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_ne(expr, llen, val); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ne(expr, llen, val); } while (0)
-static int expect_ne(int expr, int llen, int val)
+static __attribute__((unused))
+int expect_ne(int expr, int llen, int val)
{
int ret = !(expr != val);
llen += printf(" = %d ", expr);
- pad_spc(llen, 64, ret ? "[FAIL]\n" : " [OK]\n");
+ result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_GE(cond, expr, val) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_ge(expr, llen, val); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ge(expr, llen, val); } while (0)
-static int expect_ge(int expr, int llen, int val)
+static __attribute__((unused))
+int expect_ge(int expr, int llen, int val)
{
int ret = !(expr >= val);
llen += printf(" = %d ", expr);
- pad_spc(llen, 64, ret ? "[FAIL]\n" : " [OK]\n");
+ result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_GT(cond, expr, val) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_gt(expr, llen, val); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_gt(expr, llen, val); } while (0)
-static int expect_gt(int expr, int llen, int val)
+static __attribute__((unused))
+int expect_gt(int expr, int llen, int val)
{
int ret = !(expr > val);
llen += printf(" = %d ", expr);
- pad_spc(llen, 64, ret ? "[FAIL]\n" : " [OK]\n");
+ result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_LE(cond, expr, val) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_le(expr, llen, val); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_le(expr, llen, val); } while (0)
-static int expect_le(int expr, int llen, int val)
+static __attribute__((unused))
+int expect_le(int expr, int llen, int val)
{
int ret = !(expr <= val);
llen += printf(" = %d ", expr);
- pad_spc(llen, 64, ret ? "[FAIL]\n" : " [OK]\n");
+ result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_LT(cond, expr, val) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_lt(expr, llen, val); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_lt(expr, llen, val); } while (0)
-static int expect_lt(int expr, int llen, int val)
+static __attribute__((unused))
+int expect_lt(int expr, int llen, int val)
{
int ret = !(expr < val);
llen += printf(" = %d ", expr);
- pad_spc(llen, 64, ret ? "[FAIL]\n" : " [OK]\n");
+ result(llen, ret ? FAIL : OK);
return ret;
}
#define EXPECT_SYSZR(cond, expr) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_syszr(expr, llen); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_syszr(expr, llen); } while (0)
-static int expect_syszr(int expr, int llen)
+static __attribute__((unused))
+int expect_syszr(int expr, int llen)
{
int ret = 0;
if (expr) {
ret = 1;
llen += printf(" = %d %s ", expr, errorname(errno));
- llen += pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
} else {
llen += printf(" = %d ", expr);
- llen += pad_spc(llen, 64, " [OK]\n");
+ result(llen, OK);
}
return ret;
}
#define EXPECT_SYSEQ(cond, expr, val) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_syseq(expr, llen, val); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_syseq(expr, llen, val); } while (0)
-static int expect_syseq(int expr, int llen, int val)
+static __attribute__((unused))
+int expect_syseq(int expr, int llen, int val)
{
int ret = 0;
if (expr != val) {
ret = 1;
llen += printf(" = %d %s ", expr, errorname(errno));
- llen += pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
} else {
llen += printf(" = %d ", expr);
- llen += pad_spc(llen, 64, " [OK]\n");
+ result(llen, OK);
}
return ret;
}
#define EXPECT_SYSNE(cond, expr, val) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_sysne(expr, llen, val); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_sysne(expr, llen, val); } while (0)
-static int expect_sysne(int expr, int llen, int val)
+static __attribute__((unused))
+int expect_sysne(int expr, int llen, int val)
{
int ret = 0;
if (expr == val) {
ret = 1;
llen += printf(" = %d %s ", expr, errorname(errno));
- llen += pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
} else {
llen += printf(" = %d ", expr);
- llen += pad_spc(llen, 64, " [OK]\n");
+ result(llen, OK);
}
return ret;
}
#define EXPECT_SYSER2(cond, expr, expret, experr1, experr2) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_syserr2(expr, expret, experr1, experr2, llen); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_syserr2(expr, expret, experr1, experr2, llen); } while (0)
#define EXPECT_SYSER(cond, expr, expret, experr) \
EXPECT_SYSER2(cond, expr, expret, experr, 0)
-static int expect_syserr2(int expr, int expret, int experr1, int experr2, int llen)
+static __attribute__((unused))
+int expect_syserr2(int expr, int expret, int experr1, int experr2, int llen)
{
int ret = 0;
int _errno = errno;
@@ -318,117 +353,238 @@ static int expect_syserr2(int expr, int expret, int experr1, int experr2, int ll
llen += printf(" != (%d %s) ", expret, errorname(experr1));
else
llen += printf(" != (%d %s %s) ", expret, errorname(experr1), errorname(experr2));
- llen += pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
} else {
- llen += pad_spc(llen, 64, " [OK]\n");
+ result(llen, OK);
}
return ret;
}
#define EXPECT_PTRZR(cond, expr) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_ptrzr(expr, llen); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrzr(expr, llen); } while (0)
-static int expect_ptrzr(const void *expr, int llen)
+static __attribute__((unused))
+int expect_ptrzr(const void *expr, int llen)
{
int ret = 0;
llen += printf(" = <%p> ", expr);
if (expr) {
ret = 1;
- llen += pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
} else {
- llen += pad_spc(llen, 64, " [OK]\n");
+ result(llen, OK);
}
return ret;
}
#define EXPECT_PTRNZ(cond, expr) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_ptrnz(expr, llen); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrnz(expr, llen); } while (0)
-static int expect_ptrnz(const void *expr, int llen)
+static __attribute__((unused))
+int expect_ptrnz(const void *expr, int llen)
{
int ret = 0;
llen += printf(" = <%p> ", expr);
if (!expr) {
ret = 1;
- llen += pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
+ } else {
+ result(llen, OK);
+ }
+ return ret;
+}
+
+#define EXPECT_PTREQ(cond, expr, cmp) \
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptreq(expr, llen, cmp); } while (0)
+
+static __attribute__((unused))
+int expect_ptreq(const void *expr, int llen, const void *cmp)
+{
+ int ret = 0;
+
+ llen += printf(" = <%p> ", expr);
+ if (expr != cmp) {
+ ret = 1;
+ result(llen, FAIL);
+ } else {
+ result(llen, OK);
+ }
+ return ret;
+}
+
+#define EXPECT_PTRNE(cond, expr, cmp) \
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrne(expr, llen, cmp); } while (0)
+
+static __attribute__((unused))
+int expect_ptrne(const void *expr, int llen, const void *cmp)
+{
+ int ret = 0;
+
+ llen += printf(" = <%p> ", expr);
+ if (expr == cmp) {
+ ret = 1;
+ result(llen, FAIL);
} else {
- llen += pad_spc(llen, 64, " [OK]\n");
+ result(llen, OK);
}
return ret;
}
+#define EXPECT_PTRGE(cond, expr, cmp) \
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrge(expr, llen, cmp); } while (0)
+
+static __attribute__((unused))
+int expect_ptrge(const void *expr, int llen, const void *cmp)
+{
+ int ret = !(expr >= cmp);
+
+ llen += printf(" = <%p> ", expr);
+ result(llen, ret ? FAIL : OK);
+ return ret;
+}
+
+#define EXPECT_PTRGT(cond, expr, cmp) \
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrgt(expr, llen, cmp); } while (0)
+
+static __attribute__((unused))
+int expect_ptrgt(const void *expr, int llen, const void *cmp)
+{
+ int ret = !(expr > cmp);
+
+ llen += printf(" = <%p> ", expr);
+ result(llen, ret ? FAIL : OK);
+ return ret;
+}
+
+
+#define EXPECT_PTRLE(cond, expr, cmp) \
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrle(expr, llen, cmp); } while (0)
+
+static __attribute__((unused))
+int expect_ptrle(const void *expr, int llen, const void *cmp)
+{
+ int ret = !(expr <= cmp);
+
+ llen += printf(" = <%p> ", expr);
+ result(llen, ret ? FAIL : OK);
+ return ret;
+}
+
+
+#define EXPECT_PTRLT(cond, expr, cmp) \
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrlt(expr, llen, cmp); } while (0)
+
+static __attribute__((unused))
+int expect_ptrlt(const void *expr, int llen, const void *cmp)
+{
+ int ret = !(expr < cmp);
+
+ llen += printf(" = <%p> ", expr);
+ result(llen, ret ? FAIL : OK);
+ return ret;
+}
+
+#define EXPECT_PTRER2(cond, expr, expret, experr1, experr2) \
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_ptrerr2(expr, expret, experr1, experr2, llen); } while (0)
+
+#define EXPECT_PTRER(cond, expr, expret, experr) \
+ EXPECT_PTRER2(cond, expr, expret, experr, 0)
+
+static __attribute__((unused))
+int expect_ptrerr2(const void *expr, const void *expret, int experr1, int experr2, int llen)
+{
+ int ret = 0;
+ int _errno = errno;
+
+ llen += printf(" = <%p> %s ", expr, errorname(_errno));
+ if (expr != expret || (_errno != experr1 && _errno != experr2)) {
+ ret = 1;
+ if (experr2 == 0)
+ llen += printf(" != (<%p> %s) ", expret, errorname(experr1));
+ else
+ llen += printf(" != (<%p> %s %s) ", expret, errorname(experr1), errorname(experr2));
+ result(llen, FAIL);
+ } else {
+ result(llen, OK);
+ }
+ return ret;
+}
#define EXPECT_STRZR(cond, expr) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_strzr(expr, llen); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_strzr(expr, llen); } while (0)
-static int expect_strzr(const char *expr, int llen)
+static __attribute__((unused))
+int expect_strzr(const char *expr, int llen)
{
int ret = 0;
llen += printf(" = <%s> ", expr);
if (expr) {
ret = 1;
- llen += pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
} else {
- llen += pad_spc(llen, 64, " [OK]\n");
+ result(llen, OK);
}
return ret;
}
#define EXPECT_STRNZ(cond, expr) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_strnz(expr, llen); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_strnz(expr, llen); } while (0)
-static int expect_strnz(const char *expr, int llen)
+static __attribute__((unused))
+int expect_strnz(const char *expr, int llen)
{
int ret = 0;
llen += printf(" = <%s> ", expr);
if (!expr) {
ret = 1;
- llen += pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
} else {
- llen += pad_spc(llen, 64, " [OK]\n");
+ result(llen, OK);
}
return ret;
}
#define EXPECT_STREQ(cond, expr, cmp) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_streq(expr, llen, cmp); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_streq(expr, llen, cmp); } while (0)
-static int expect_streq(const char *expr, int llen, const char *cmp)
+static __attribute__((unused))
+int expect_streq(const char *expr, int llen, const char *cmp)
{
int ret = 0;
llen += printf(" = <%s> ", expr);
if (strcmp(expr, cmp) != 0) {
ret = 1;
- llen += pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
} else {
- llen += pad_spc(llen, 64, " [OK]\n");
+ result(llen, OK);
}
return ret;
}
#define EXPECT_STRNE(cond, expr, cmp) \
- do { if (!cond) pad_spc(llen, 64, "[SKIPPED]\n"); else ret += expect_strne(expr, llen, cmp); } while (0)
+ do { if (!(cond)) result(llen, SKIPPED); else ret += expect_strne(expr, llen, cmp); } while (0)
-static int expect_strne(const char *expr, int llen, const char *cmp)
+static __attribute__((unused))
+int expect_strne(const char *expr, int llen, const char *cmp)
{
int ret = 0;
llen += printf(" = <%s> ", expr);
if (strcmp(expr, cmp) == 0) {
ret = 1;
- llen += pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
} else {
- llen += pad_spc(llen, 64, " [OK]\n");
+ result(llen, OK);
}
return ret;
}
@@ -438,6 +594,51 @@ static int expect_strne(const char *expr, int llen, const char *cmp)
#define CASE_TEST(name) \
case __LINE__: llen += printf("%d %s", test, #name);
+int run_startup(int min, int max)
+{
+ int test;
+ int ret = 0;
+ /* kernel at least passes HOME and TERM, shell passes more */
+ int env_total = 2;
+ /* checking NULL for argv/argv0, environ and _auxv is not enough, let's compare with sbrk(0) or &end */
+ extern char end;
+ char *brk = sbrk(0) != (void *)-1 ? sbrk(0) : &end;
+ /* differ from nolibc, both glibc and musl have no global _auxv */
+ const unsigned long *test_auxv = (void *)-1;
+#ifdef NOLIBC
+ test_auxv = _auxv;
+#endif
+
+ for (test = min; test >= 0 && test <= max; test++) {
+ int llen = 0; /* line length */
+
+ /* avoid leaving empty lines below, this will insert holes into
+ * test numbers.
+ */
+ switch (test + __LINE__ + 1) {
+ CASE_TEST(argc); EXPECT_GE(1, test_argc, 1); break;
+ CASE_TEST(argv_addr); EXPECT_PTRGT(1, test_argv, brk); break;
+ CASE_TEST(argv_environ); EXPECT_PTRLT(1, test_argv, environ); break;
+ CASE_TEST(argv_total); EXPECT_EQ(1, environ - test_argv - 1, test_argc ?: 1); break;
+ CASE_TEST(argv0_addr); EXPECT_PTRGT(1, argv0, brk); break;
+ CASE_TEST(argv0_str); EXPECT_STRNZ(1, argv0 > brk ? argv0 : NULL); break;
+ CASE_TEST(argv0_len); EXPECT_GE(1, argv0 > brk ? strlen(argv0) : 0, 1); break;
+ CASE_TEST(environ_addr); EXPECT_PTRGT(1, environ, brk); break;
+ CASE_TEST(environ_envp); EXPECT_PTREQ(1, environ, test_envp); break;
+ CASE_TEST(environ_auxv); EXPECT_PTRLT(test_auxv != (void *)-1, environ, test_auxv); break;
+ CASE_TEST(environ_total); EXPECT_GE(test_auxv != (void *)-1, (void *)test_auxv - (void *)environ - 1, env_total); break;
+ CASE_TEST(environ_HOME); EXPECT_PTRNZ(1, getenv("HOME")); break;
+ CASE_TEST(auxv_addr); EXPECT_PTRGT(test_auxv != (void *)-1, test_auxv, brk); break;
+ CASE_TEST(auxv_AT_UID); EXPECT_EQ(1, getauxval(AT_UID), getuid()); break;
+ CASE_TEST(auxv_AT_PAGESZ); EXPECT_GE(1, getauxval(AT_PAGESZ), 4096); break;
+ case __LINE__:
+ return ret; /* must be last */
+ /* note: do not set any defaults so as to permit holes above */
+ }
+ }
+ return ret;
+}
+
/* used by some syscall tests below */
int test_getdents64(const char *dir)
@@ -458,9 +659,9 @@ int test_getdents64(const char *dir)
return ret;
}
-static int test_getpagesize(void)
+int test_getpagesize(void)
{
- long x = getpagesize();
+ int x = getpagesize();
int c;
if (x < 0)
@@ -487,7 +688,7 @@ static int test_getpagesize(void)
return !c;
}
-static int test_fork(void)
+int test_fork(void)
{
int status;
pid_t pid;
@@ -512,14 +713,14 @@ static int test_fork(void)
}
}
-static int test_stat_timestamps(void)
+int test_stat_timestamps(void)
{
struct stat st;
if (sizeof(st.st_atim.tv_sec) != sizeof(st.st_atime))
return 1;
- if (stat("/proc/self/", &st))
+ if (stat("/proc/self/", &st) && stat(argv0, &st) && stat("/", &st))
return 1;
if (st.st_atim.tv_sec != st.st_atime || st.st_atim.tv_nsec > 1000000000)
@@ -534,6 +735,86 @@ static int test_stat_timestamps(void)
return 0;
}
+int test_mmap_munmap(void)
+{
+ int ret, fd, i, page_size;
+ void *mem;
+ size_t file_size, length;
+ off_t offset, pa_offset;
+ struct stat stat_buf;
+ const char * const files[] = {
+ "/dev/zero",
+ "/proc/1/exe", "/proc/self/exe",
+ argv0,
+ NULL
+ };
+
+ page_size = getpagesize();
+ if (page_size < 0)
+ return 1;
+
+ /* find a right file to mmap, existed and accessible */
+ for (i = 0; files[i] != NULL; i++) {
+ ret = fd = open(files[i], O_RDONLY);
+ if (ret == -1)
+ continue;
+ else
+ break;
+ }
+ if (ret == -1)
+ return 1;
+
+ ret = stat(files[i], &stat_buf);
+ if (ret == -1)
+ goto end;
+
+ /* file size of the special /dev/zero is 0, let's assign one manually */
+ if (i == 0)
+ file_size = 3*page_size;
+ else
+ file_size = stat_buf.st_size;
+
+ offset = file_size - 1;
+ if (offset < 0)
+ offset = 0;
+ length = file_size - offset;
+ pa_offset = offset & ~(page_size - 1);
+
+ mem = mmap(NULL, length + offset - pa_offset, PROT_READ, MAP_SHARED, fd, pa_offset);
+ if (mem == MAP_FAILED) {
+ ret = 1;
+ goto end;
+ }
+
+ ret = munmap(mem, length + offset - pa_offset);
+
+end:
+ close(fd);
+ return !!ret;
+}
+
+int test_pipe(void)
+{
+ const char *const msg = "hello, nolibc";
+ int pipefd[2];
+ char buf[32];
+ size_t len;
+
+ if (pipe(pipefd) == -1)
+ return 1;
+
+ write(pipefd[1], msg, strlen(msg));
+ close(pipefd[1]);
+ len = read(pipefd[0], buf, sizeof(buf));
+ close(pipefd[0]);
+
+ if (len != strlen(msg))
+ return 1;
+
+ return !!memcmp(buf, msg, len);
+}
+
+
/* Run syscall tests between IDs <min> and <max>.
* Return 0 on success, non-zero on failure.
*/
@@ -548,6 +829,7 @@ int run_syscall(int min, int max)
int tmp;
int ret = 0;
void *p1, *p2;
+ int has_gettid = 1;
/* <proc> indicates whether or not /proc is mounted */
proc = stat("/proc", &stat_buf) == 0;
@@ -555,6 +837,11 @@ int run_syscall(int min, int max)
/* this will be used to skip certain tests that can't be run unprivileged */
euid0 = geteuid() == 0;
+ /* from 2.30, glibc provides gettid() */
+#if defined(__GLIBC_MINOR__) && defined(__GLIBC__)
+ has_gettid = __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 30);
+#endif
+
for (test = min; test >= 0 && test <= max; test++) {
int llen = 0; /* line length */
@@ -564,25 +851,24 @@ int run_syscall(int min, int max)
switch (test + __LINE__ + 1) {
CASE_TEST(getpid); EXPECT_SYSNE(1, getpid(), -1); break;
CASE_TEST(getppid); EXPECT_SYSNE(1, getppid(), -1); break;
-#ifdef NOLIBC
- CASE_TEST(gettid); EXPECT_SYSNE(1, gettid(), -1); break;
-#endif
+ CASE_TEST(gettid); EXPECT_SYSNE(has_gettid, gettid(), -1); break;
CASE_TEST(getpgid_self); EXPECT_SYSNE(1, getpgid(0), -1); break;
CASE_TEST(getpgid_bad); EXPECT_SYSER(1, getpgid(-1), -1, ESRCH); break;
CASE_TEST(kill_0); EXPECT_SYSZR(1, kill(getpid(), 0)); break;
CASE_TEST(kill_CONT); EXPECT_SYSZR(1, kill(getpid(), 0)); break;
CASE_TEST(kill_BADPID); EXPECT_SYSER(1, kill(INT_MAX, 0), -1, ESRCH); break;
+ CASE_TEST(sbrk_0); EXPECT_PTRNE(1, sbrk(0), (void *)-1); break;
CASE_TEST(sbrk); if ((p1 = p2 = sbrk(4096)) != (void *)-1) p2 = sbrk(-4096); EXPECT_SYSZR(1, (p2 == (void *)-1) || p2 == p1); break;
CASE_TEST(brk); EXPECT_SYSZR(1, brk(sbrk(0))); break;
- CASE_TEST(chdir_root); EXPECT_SYSZR(1, chdir("/")); break;
+ CASE_TEST(chdir_root); EXPECT_SYSZR(1, chdir("/")); chdir(getenv("PWD")); break;
CASE_TEST(chdir_dot); EXPECT_SYSZR(1, chdir(".")); break;
CASE_TEST(chdir_blah); EXPECT_SYSER(1, chdir("/blah"), -1, ENOENT); break;
- CASE_TEST(chmod_net); EXPECT_SYSZR(proc, chmod("/proc/self/net", 0555)); break;
+ CASE_TEST(chmod_argv0); EXPECT_SYSZR(1, chmod(argv0, 0555)); break;
CASE_TEST(chmod_self); EXPECT_SYSER(proc, chmod("/proc/self", 0555), -1, EPERM); break;
CASE_TEST(chown_self); EXPECT_SYSER(proc, chown("/proc/self", 0, 0), -1, EPERM); break;
CASE_TEST(chroot_root); EXPECT_SYSZR(euid0, chroot("/")); break;
CASE_TEST(chroot_blah); EXPECT_SYSER(1, chroot("/proc/self/blah"), -1, ENOENT); break;
- CASE_TEST(chroot_exe); EXPECT_SYSER(proc, chroot("/proc/self/exe"), -1, ENOTDIR); break;
+ CASE_TEST(chroot_exe); EXPECT_SYSER(1, chroot(argv0), -1, ENOTDIR); break;
CASE_TEST(close_m1); EXPECT_SYSER(1, close(-1), -1, EBADF); break;
CASE_TEST(close_dup); EXPECT_SYSZR(1, close(dup(0))); break;
CASE_TEST(dup_0); tmp = dup(0); EXPECT_SYSNE(1, tmp, -1); close(tmp); break;
@@ -603,23 +889,28 @@ int run_syscall(int min, int max)
CASE_TEST(link_root1); EXPECT_SYSER(1, link("/", "/"), -1, EEXIST); break;
CASE_TEST(link_blah); EXPECT_SYSER(1, link("/proc/self/blah", "/blah"), -1, ENOENT); break;
CASE_TEST(link_dir); EXPECT_SYSER(euid0, link("/", "/blah"), -1, EPERM); break;
- CASE_TEST(link_cross); EXPECT_SYSER(proc, link("/proc/self/net", "/blah"), -1, EXDEV); break;
+ CASE_TEST(link_cross); EXPECT_SYSER(proc, link("/proc/self/cmdline", "/blah"), -1, EXDEV); break;
CASE_TEST(lseek_m1); EXPECT_SYSER(1, lseek(-1, 0, SEEK_SET), -1, EBADF); break;
CASE_TEST(lseek_0); EXPECT_SYSER(1, lseek(0, 0, SEEK_SET), -1, ESPIPE); break;
CASE_TEST(mkdir_root); EXPECT_SYSER(1, mkdir("/", 0755), -1, EEXIST); break;
+ CASE_TEST(mmap_bad); EXPECT_PTRER(1, mmap(NULL, 0, PROT_READ, MAP_PRIVATE, 0, 0), MAP_FAILED, EINVAL); break;
+ CASE_TEST(munmap_bad); EXPECT_SYSER(1, munmap((void *)1, 0), -1, EINVAL); break;
+ CASE_TEST(mmap_munmap_good); EXPECT_SYSZR(1, test_mmap_munmap()); break;
CASE_TEST(open_tty); EXPECT_SYSNE(1, tmp = open("/dev/null", 0), -1); if (tmp != -1) close(tmp); break;
CASE_TEST(open_blah); EXPECT_SYSER(1, tmp = open("/proc/self/blah", 0), -1, ENOENT); if (tmp != -1) close(tmp); break;
+ CASE_TEST(pipe); EXPECT_SYSZR(1, test_pipe()); break;
CASE_TEST(poll_null); EXPECT_SYSZR(1, poll(NULL, 0, 0)); break;
CASE_TEST(poll_stdout); EXPECT_SYSNE(1, ({ struct pollfd fds = { 1, POLLOUT, 0}; poll(&fds, 1, 0); }), -1); break;
CASE_TEST(poll_fault); EXPECT_SYSER(1, poll((void *)1, 1, 0), -1, EFAULT); break;
CASE_TEST(prctl); EXPECT_SYSER(1, prctl(PR_SET_NAME, (unsigned long)NULL, 0, 0, 0), -1, EFAULT); break;
CASE_TEST(read_badf); EXPECT_SYSER(1, read(-1, &tmp, 1), -1, EBADF); break;
+ CASE_TEST(rmdir_blah); EXPECT_SYSER(1, rmdir("/blah"), -1, ENOENT); break;
CASE_TEST(sched_yield); EXPECT_SYSZR(1, sched_yield()); break;
CASE_TEST(select_null); EXPECT_SYSZR(1, ({ struct timeval tv = { 0 }; select(0, NULL, NULL, NULL, &tv); })); break;
CASE_TEST(select_stdout); EXPECT_SYSNE(1, ({ fd_set fds; FD_ZERO(&fds); FD_SET(1, &fds); select(2, NULL, &fds, NULL, NULL); }), -1); break;
CASE_TEST(select_fault); EXPECT_SYSER(1, select(1, (void *)1, NULL, NULL, 0), -1, EFAULT); break;
CASE_TEST(stat_blah); EXPECT_SYSER(1, stat("/proc/self/blah", &stat_buf), -1, ENOENT); break;
- CASE_TEST(stat_fault); EXPECT_SYSER(1, stat(NULL, &stat_buf), -1, EFAULT); break;
+ CASE_TEST(stat_fault); EXPECT_SYSER(1, stat((void *)1, &stat_buf), -1, EFAULT); break;
CASE_TEST(stat_timestamps); EXPECT_SYSZR(1, test_stat_timestamps()); break;
CASE_TEST(symlink_root); EXPECT_SYSER(1, symlink("/", "/"), -1, EEXIST); break;
CASE_TEST(unlink_root); EXPECT_SYSER(1, unlink("/"), -1, EISDIR); break;
@@ -642,9 +933,7 @@ int run_syscall(int min, int max)
int run_stdlib(int min, int max)
{
int test;
- int tmp;
int ret = 0;
- void *p1, *p2;
for (test = min; test >= 0 && test <= max; test++) {
int llen = 0; /* line length */
@@ -699,32 +988,23 @@ int run_stdlib(int min, int max)
CASE_TEST(limit_int_fast8_max); EXPECT_EQ(1, INT_FAST8_MAX, (int_fast8_t) 0x7f); break;
CASE_TEST(limit_int_fast8_min); EXPECT_EQ(1, INT_FAST8_MIN, (int_fast8_t) 0x80); break;
CASE_TEST(limit_uint_fast8_max); EXPECT_EQ(1, UINT_FAST8_MAX, (uint_fast8_t) 0xff); break;
- CASE_TEST(limit_int_fast16_min); EXPECT_EQ(1, INT_FAST16_MIN, (int_fast16_t) INTPTR_MIN); break;
- CASE_TEST(limit_int_fast16_max); EXPECT_EQ(1, INT_FAST16_MAX, (int_fast16_t) INTPTR_MAX); break;
+ CASE_TEST(limit_int_fast16_min); EXPECT_EQ(1, INT_FAST16_MIN, (int_fast16_t) SINT_MIN_OF_TYPE(int_fast16_t)); break;
+ CASE_TEST(limit_int_fast16_max); EXPECT_EQ(1, INT_FAST16_MAX, (int_fast16_t) SINT_MAX_OF_TYPE(int_fast16_t)); break;
CASE_TEST(limit_uint_fast16_max); EXPECT_EQ(1, UINT_FAST16_MAX, (uint_fast16_t) UINTPTR_MAX); break;
- CASE_TEST(limit_int_fast32_min); EXPECT_EQ(1, INT_FAST32_MIN, (int_fast32_t) INTPTR_MIN); break;
- CASE_TEST(limit_int_fast32_max); EXPECT_EQ(1, INT_FAST32_MAX, (int_fast32_t) INTPTR_MAX); break;
+ CASE_TEST(limit_int_fast32_min); EXPECT_EQ(1, INT_FAST32_MIN, (int_fast32_t) SINT_MIN_OF_TYPE(int_fast32_t)); break;
+ CASE_TEST(limit_int_fast32_max); EXPECT_EQ(1, INT_FAST32_MAX, (int_fast32_t) SINT_MAX_OF_TYPE(int_fast32_t)); break;
CASE_TEST(limit_uint_fast32_max); EXPECT_EQ(1, UINT_FAST32_MAX, (uint_fast32_t) UINTPTR_MAX); break;
CASE_TEST(limit_int_fast64_min); EXPECT_EQ(1, INT_FAST64_MIN, (int_fast64_t) INT64_MIN); break;
CASE_TEST(limit_int_fast64_max); EXPECT_EQ(1, INT_FAST64_MAX, (int_fast64_t) INT64_MAX); break;
CASE_TEST(limit_uint_fast64_max); EXPECT_EQ(1, UINT_FAST64_MAX, (uint_fast64_t) UINT64_MAX); break;
-#if __SIZEOF_LONG__ == 8
- CASE_TEST(limit_intptr_min); EXPECT_EQ(1, INTPTR_MIN, (intptr_t) 0x8000000000000000LL); break;
- CASE_TEST(limit_intptr_max); EXPECT_EQ(1, INTPTR_MAX, (intptr_t) 0x7fffffffffffffffLL); break;
- CASE_TEST(limit_uintptr_max); EXPECT_EQ(1, UINTPTR_MAX, (uintptr_t) 0xffffffffffffffffULL); break;
- CASE_TEST(limit_ptrdiff_min); EXPECT_EQ(1, PTRDIFF_MIN, (ptrdiff_t) 0x8000000000000000LL); break;
- CASE_TEST(limit_ptrdiff_max); EXPECT_EQ(1, PTRDIFF_MAX, (ptrdiff_t) 0x7fffffffffffffffLL); break;
- CASE_TEST(limit_size_max); EXPECT_EQ(1, SIZE_MAX, (size_t) 0xffffffffffffffffULL); break;
-#elif __SIZEOF_LONG__ == 4
- CASE_TEST(limit_intptr_min); EXPECT_EQ(1, INTPTR_MIN, (intptr_t) 0x80000000); break;
- CASE_TEST(limit_intptr_max); EXPECT_EQ(1, INTPTR_MAX, (intptr_t) 0x7fffffff); break;
- CASE_TEST(limit_uintptr_max); EXPECT_EQ(1, UINTPTR_MAX, (uintptr_t) 0xffffffffU); break;
- CASE_TEST(limit_ptrdiff_min); EXPECT_EQ(1, PTRDIFF_MIN, (ptrdiff_t) 0x80000000); break;
- CASE_TEST(limit_ptrdiff_max); EXPECT_EQ(1, PTRDIFF_MAX, (ptrdiff_t) 0x7fffffff); break;
- CASE_TEST(limit_size_max); EXPECT_EQ(1, SIZE_MAX, (size_t) 0xffffffffU); break;
-#else
-# warning "__SIZEOF_LONG__ is undefined"
-#endif /* __SIZEOF_LONG__ */
+ CASE_TEST(sizeof_long_sane); EXPECT_EQ(1, sizeof(long) == 8 || sizeof(long) == 4, 1); break;
+ CASE_TEST(limit_intptr_min); EXPECT_EQ(1, INTPTR_MIN, sizeof(long) == 8 ? (intptr_t) 0x8000000000000000LL : (intptr_t) 0x80000000); break;
+ CASE_TEST(limit_intptr_max); EXPECT_EQ(1, INTPTR_MAX, sizeof(long) == 8 ? (intptr_t) 0x7fffffffffffffffLL : (intptr_t) 0x7fffffff); break;
+ CASE_TEST(limit_uintptr_max); EXPECT_EQ(1, UINTPTR_MAX, sizeof(long) == 8 ? (uintptr_t) 0xffffffffffffffffULL : (uintptr_t) 0xffffffffU); break;
+ CASE_TEST(limit_ptrdiff_min); EXPECT_EQ(1, PTRDIFF_MIN, sizeof(long) == 8 ? (ptrdiff_t) 0x8000000000000000LL : (ptrdiff_t) 0x80000000); break;
+ CASE_TEST(limit_ptrdiff_max); EXPECT_EQ(1, PTRDIFF_MAX, sizeof(long) == 8 ? (ptrdiff_t) 0x7fffffffffffffffLL : (ptrdiff_t) 0x7fffffff); break;
+ CASE_TEST(limit_size_max); EXPECT_EQ(1, SIZE_MAX, sizeof(long) == 8 ? (size_t) 0xffffffffffffffffULL : (size_t) 0xffffffffU); break;
+
case __LINE__:
return ret; /* must be last */
/* note: do not set any defaults so as to permit holes above */
@@ -736,22 +1016,23 @@ int run_stdlib(int min, int max)
#define EXPECT_VFPRINTF(c, expected, fmt, ...) \
ret += expect_vfprintf(llen, c, expected, fmt, ##__VA_ARGS__)
-static int expect_vfprintf(int llen, size_t c, const char *expected, const char *fmt, ...)
+static int expect_vfprintf(int llen, int c, const char *expected, const char *fmt, ...)
{
- int ret, fd, w, r;
+ int ret, fd;
+ ssize_t w, r;
char buf[100];
FILE *memfile;
va_list args;
- fd = memfd_create("vfprintf", 0);
+ fd = open("/tmp", O_TMPFILE | O_EXCL | O_RDWR, 0600);
if (fd == -1) {
- pad_spc(llen, 64, "[FAIL]\n");
- return 1;
+ result(llen, SKIPPED);
+ return 0;
}
memfile = fdopen(fd, "w+");
if (!memfile) {
- pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
return 1;
}
@@ -760,8 +1041,8 @@ static int expect_vfprintf(int llen, size_t c, const char *expected, const char
va_end(args);
if (w != c) {
- llen += printf(" written(%d) != %d", w, (int) c);
- pad_spc(llen, 64, "[FAIL]\n");
+ llen += printf(" written(%d) != %d", (int)w, c);
+ result(llen, FAIL);
return 1;
}
@@ -769,29 +1050,27 @@ static int expect_vfprintf(int llen, size_t c, const char *expected, const char
lseek(fd, 0, SEEK_SET);
r = read(fd, buf, sizeof(buf) - 1);
- buf[r] = '\0';
fclose(memfile);
if (r != w) {
- llen += printf(" written(%d) != read(%d)", w, r);
- pad_spc(llen, 64, "[FAIL]\n");
+ llen += printf(" written(%d) != read(%d)", (int)w, (int)r);
+ result(llen, FAIL);
return 1;
}
+ buf[r] = '\0';
llen += printf(" \"%s\" = \"%s\"", expected, buf);
ret = strncmp(expected, buf, c);
- pad_spc(llen, 64, ret ? "[FAIL]\n" : " [OK]\n");
+ result(llen, ret ? FAIL : OK);
return ret;
}
static int run_vfprintf(int min, int max)
{
int test;
- int tmp;
int ret = 0;
- void *p1, *p2;
for (test = min; test >= 0 && test <= max; test++) {
int llen = 0; /* line length */
@@ -829,7 +1108,8 @@ static int smash_stack(void)
return 1;
}
-static int run_protection(int min, int max)
+static int run_protection(int min __attribute__((unused)),
+ int max __attribute__((unused)))
{
pid_t pid;
int llen = 0, status;
@@ -838,14 +1118,14 @@ static int run_protection(int min, int max)
#if !defined(_NOLIBC_STACKPROTECTOR)
llen += printf("not supported");
- pad_spc(llen, 64, "[SKIPPED]\n");
+ result(llen, SKIPPED);
return 0;
#endif
#if defined(_NOLIBC_STACKPROTECTOR)
if (!__stack_chk_guard) {
llen += printf("__stack_chk_guard not initialized");
- pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
return 1;
}
#endif
@@ -856,7 +1136,7 @@ static int run_protection(int min, int max)
switch (pid) {
case -1:
llen += printf("fork()");
- pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
return 1;
case 0:
@@ -872,10 +1152,10 @@ static int run_protection(int min, int max)
if (pid == -1 || !WIFSIGNALED(status) || WTERMSIG(status) != SIGABRT) {
llen += printf("waitpid()");
- pad_spc(llen, 64, "[FAIL]\n");
+ result(llen, FAIL);
return 1;
}
- pad_spc(llen, 64, " [OK]\n");
+ result(llen, OK);
return 0;
}
}
@@ -891,11 +1171,13 @@ int prepare(void)
*/
if (stat("/dev/.", &stat_buf) == 0 || mkdir("/dev", 0755) == 0) {
if (stat("/dev/console", &stat_buf) != 0 ||
- stat("/dev/null", &stat_buf) != 0) {
+ stat("/dev/null", &stat_buf) != 0 ||
+ stat("/dev/zero", &stat_buf) != 0) {
/* try devtmpfs first, otherwise fall back to manual creation */
if (mount("/dev", "/dev", "devtmpfs", 0, 0) != 0) {
mknod("/dev/console", 0600 | S_IFCHR, makedev(5, 1));
mknod("/dev/null", 0666 | S_IFCHR, makedev(1, 3));
+ mknod("/dev/zero", 0666 | S_IFCHR, makedev(1, 5));
}
}
}
@@ -922,16 +1204,23 @@ int prepare(void)
/* try to mount /proc if not mounted. Silently fail otherwise */
if (stat("/proc/.", &stat_buf) == 0 || mkdir("/proc", 0755) == 0) {
- if (stat("/proc/self", &stat_buf) != 0)
- mount("/proc", "/proc", "proc", 0, 0);
+ if (stat("/proc/self", &stat_buf) != 0) {
+ /* If not mountable, remove /proc completely to avoid misuse */
+ if (mount("none", "/proc", "proc", 0, 0) != 0)
+ rmdir("/proc");
+ }
}
+ /* some tests rely on a writable /tmp */
+ mkdir("/tmp", 0755);
+
return 0;
}
/* This is the definition of known test names, with their functions */
static const struct test test_names[] = {
/* add new tests here */
+ { .name = "startup", .func = run_startup },
{ .name = "syscall", .func = run_syscall },
{ .name = "stdlib", .func = run_stdlib },
{ .name = "vfprintf", .func = run_vfprintf },
@@ -939,6 +1228,35 @@ static const struct test test_names[] = {
{ 0 }
};
+static int is_setting_valid(char *test)
+{
+ int idx, len, test_len, valid = 0;
+ char delimiter;
+
+ if (!test)
+ return valid;
+
+ test_len = strlen(test);
+
+ for (idx = 0; test_names[idx].name; idx++) {
+ len = strlen(test_names[idx].name);
+ if (test_len < len)
+ continue;
+
+ if (strncmp(test, test_names[idx].name, len) != 0)
+ continue;
+
+ delimiter = test[len];
+ if (delimiter != ':' && delimiter != ',' && delimiter != '\0')
+ continue;
+
+ valid = 1;
+ break;
+ }
+
+ return valid;
+}
+
int main(int argc, char **argv, char **envp)
{
int min = 0;
@@ -948,7 +1266,10 @@ int main(int argc, char **argv, char **envp)
int idx;
char *test;
- environ = envp;
+ argv0 = argv[0];
+ test_argc = argc;
+ test_argv = argv;
+ test_envp = envp;
/* when called as init, it's possible that no console was opened, for
* example if no /dev file system was provided. We'll check that fd#1
@@ -964,10 +1285,10 @@ int main(int argc, char **argv, char **envp)
* syscall:5-15[:.*],stdlib:8-10
*/
test = argv[1];
- if (!test)
+ if (!is_setting_valid(test))
test = getenv("NOLIBC_TEST");
- if (test) {
+ if (is_setting_valid(test)) {
char *comma, *colon, *dash, *value;
do {
@@ -1045,17 +1366,13 @@ int main(int argc, char **argv, char **envp)
*/
printf("Leaving init with final status: %d\n", !!ret);
if (ret == 0)
- reboot(LINUX_REBOOT_CMD_POWER_OFF);
+ reboot(RB_POWER_OFF);
#if defined(__x86_64__)
/* QEMU started with "-device isa-debug-exit -no-reboot" will
* exit with status code 2N+1 when N is written to 0x501. We
* hard-code the syscall here as it's arch-dependent.
*/
-#if defined(_NOLIBC_SYS_H)
- else if (my_syscall3(__NR_ioperm, 0x501, 1, 1) == 0)
-#else
- else if (ioperm(0x501, 1, 1) == 0)
-#endif
+ else if (syscall(__NR_ioperm, 0x501, 1, 1) == 0)
__asm__ volatile ("outb %%al, %%dx" :: "d"(0x501), "a"(0));
/* if it does nothing, fall back to the regular panic */
#endif
diff --git a/tools/testing/selftests/prctl/.gitignore b/tools/testing/selftests/prctl/.gitignore
index 7a657b25f686..05d5e31661df 100644
--- a/tools/testing/selftests/prctl/.gitignore
+++ b/tools/testing/selftests/prctl/.gitignore
@@ -3,3 +3,4 @@ disable-tsc-ctxt-sw-stress-test
disable-tsc-on-off-stress-test
disable-tsc-test
set-anon-vma-name-test
+set-process-name
diff --git a/tools/testing/selftests/prctl/Makefile b/tools/testing/selftests/prctl/Makefile
index c058b81eeb41..01dc90fbb509 100644
--- a/tools/testing/selftests/prctl/Makefile
+++ b/tools/testing/selftests/prctl/Makefile
@@ -5,12 +5,10 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
ifeq ($(ARCH),x86)
TEST_PROGS := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test \
- disable-tsc-test set-anon-vma-name-test
+ disable-tsc-test set-anon-vma-name-test set-process-name
all: $(TEST_PROGS)
include ../lib.mk
-clean:
- rm -fr $(TEST_PROGS)
endif
endif
diff --git a/tools/testing/selftests/prctl/set-process-name.c b/tools/testing/selftests/prctl/set-process-name.c
new file mode 100644
index 000000000000..3bc5e0e09eb9
--- /dev/null
+++ b/tools/testing/selftests/prctl/set-process-name.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This test covers the PR_SET_NAME functionality of prctl calls
+ */
+
+#include <errno.h>
+#include <sys/prctl.h>
+#include <string.h>
+
+#include "../kselftest_harness.h"
+
+#define CHANGE_NAME "changename"
+#define EMPTY_NAME ""
+#define TASK_COMM_LEN 16
+
+int set_name(char *name)
+{
+ int res;
+
+ res = prctl(PR_SET_NAME, name, NULL, NULL, NULL);
+
+ if (res < 0)
+ return -errno;
+ return res;
+}
+
+int check_is_name_correct(char *check_name)
+{
+ char name[TASK_COMM_LEN];
+ int res;
+
+ res = prctl(PR_GET_NAME, name, NULL, NULL, NULL);
+
+ if (res < 0)
+ return -errno;
+
+ return !strcmp(name, check_name);
+}
+
+int check_null_pointer(char *check_name)
+{
+ char *name = NULL;
+ int res;
+
+ res = prctl(PR_GET_NAME, name, NULL, NULL, NULL);
+
+ return res;
+}
+
+TEST(rename_process) {
+
+ EXPECT_GE(set_name(CHANGE_NAME), 0);
+ EXPECT_TRUE(check_is_name_correct(CHANGE_NAME));
+
+ EXPECT_GE(set_name(EMPTY_NAME), 0);
+ EXPECT_TRUE(check_is_name_correct(EMPTY_NAME));
+
+ EXPECT_GE(set_name(CHANGE_NAME), 0);
+ EXPECT_LT(check_null_pointer(CHANGE_NAME), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/rcutorture/bin/configcheck.sh b/tools/testing/selftests/rcutorture/bin/configcheck.sh
index b92dfeb7fbbf..99162d18bad3 100755
--- a/tools/testing/selftests/rcutorture/bin/configcheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/configcheck.sh
@@ -3,6 +3,8 @@
#
# Usage: configcheck.sh .config .config-template
#
+# Non-empty output if errors detected.
+#
# Copyright (C) IBM Corporation, 2011
#
# Authors: Paul E. McKenney <paulmck@linux.ibm.com>
@@ -10,32 +12,35 @@
T="`mktemp -d ${TMPDIR-/tmp}/configcheck.sh.XXXXXX`"
trap 'rm -rf $T' 0
-sed -e 's/"//g' < $1 > $T/.config
+# function test_kconfig_enabled ( Kconfig-var=val )
+function test_kconfig_enabled () {
+ if ! grep -q "^$1$" $T/.config
+ then
+ echo :$1: improperly set
+ return 1
+ fi
+ return 0
+}
-sed -e 's/"//g' -e 's/\(.*\)=n/# \1 is not set/' -e 's/^#CHECK#//' < $2 |
-awk '
-{
- print "if grep -q \"" $0 "\" < '"$T/.config"'";
- print "then";
- print "\t:";
- print "else";
- if ($1 == "#") {
- print "\tif grep -q \"" $2 "\" < '"$T/.config"'";
- print "\tthen";
- print "\t\tif test \"$firsttime\" = \"\""
- print "\t\tthen"
- print "\t\t\tfirsttime=1"
- print "\t\tfi"
- print "\t\techo \":" $2 ": improperly set\"";
- print "\telse";
- print "\t\t:";
- print "\tfi";
- } else {
- print "\tif test \"$firsttime\" = \"\""
- print "\tthen"
- print "\t\tfirsttime=1"
- print "\tfi"
- print "\techo \":" $0 ": improperly set\"";
- }
- print "fi";
- }' | sh
+# function test_kconfig_disabled ( Kconfig-var )
+function test_kconfig_disabled () {
+ if grep -q "^$1=n$" $T/.config
+ then
+ return 0
+ fi
+ if grep -q "^$1=" $T/.config
+ then
+ echo :$1=n: improperly set
+ return 1
+ fi
+ return 0
+}
+
+sed -e 's/"//g' < $1 > $T/.config
+sed -e 's/^#CHECK#//' < $2 > $T/ConfigFragment
+grep '^CONFIG_.*=n$' $T/ConfigFragment |
+ sed -e 's/^/test_kconfig_disabled /' -e 's/=n$//' > $T/kconfig-n.sh
+. $T/kconfig-n.sh
+grep -v '^CONFIG_.*=n$' $T/ConfigFragment | grep '^CONFIG_' |
+ sed -e 's/^/test_kconfig_enabled /' > $T/kconfig-not-n.sh
+. $T/kconfig-not-n.sh
diff --git a/tools/testing/selftests/rcutorture/bin/functions.sh b/tools/testing/selftests/rcutorture/bin/functions.sh
index 48b9147e8c91..b8e2ea23cb3f 100644
--- a/tools/testing/selftests/rcutorture/bin/functions.sh
+++ b/tools/testing/selftests/rcutorture/bin/functions.sh
@@ -45,7 +45,7 @@ checkarg () {
configfrag_boot_params () {
if test -r "$2.boot"
then
- echo $1 `grep -v '^#' "$2.boot" | tr '\012' ' '`
+ echo `grep -v '^#' "$2.boot" | tr '\012' ' '` $1
else
echo $1
fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuscale.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuscale.sh
index b582113178ac..f683e424ddd5 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuscale.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuscale.sh
@@ -40,6 +40,10 @@ awk '
sum += $5 / 1000.;
}
+/rcu_scale: Grace-period kthread CPU time/ {
+ cputime = $6;
+}
+
END {
newNR = asort(gptimes);
if (newNR <= 0) {
@@ -78,6 +82,8 @@ END {
print "90th percentile grace-period duration: " gptimes[pct90];
print "99th percentile grace-period duration: " gptimes[pct99];
print "Maximum grace-period duration: " gptimes[newNR];
- print "Grace periods: " ngps + 0 " Batches: " nbatches + 0 " Ratio: " ngps / nbatches;
+ if (cputime != "")
+ cpustr = " CPU: " cputime;
+ print "Grace periods: " ngps + 0 " Batches: " nbatches + 0 " Ratio: " ngps / nbatches cpustr;
print "Computed from rcuscale printk output.";
}'
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index 1df7e695edf7..5be670dd4009 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -16,6 +16,8 @@
T=/tmp/kvm-recheck.sh.$$
trap 'rm -f $T' 0 2
+configerrors=0
+
PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
. functions.sh
for rd in "$@"
@@ -32,7 +34,7 @@ do
fi
TORTURE_SUITE="`cat $i/../torture_suite`" ; export TORTURE_SUITE
configfile=`echo $i | sed -e 's,^.*/,,'`
- rm -f $i/console.log.*.diags
+ rm -f $i/console.log.*.diags $i/ConfigFragment.diags
case "${TORTURE_SUITE}" in
X*)
;;
@@ -49,8 +51,21 @@ do
then
echo QEMU killed
fi
- configcheck.sh $i/.config $i/ConfigFragment > $T 2>&1
- cat $T
+ configcheck.sh $i/.config $i/ConfigFragment > $i/ConfigFragment.diags 2>&1
+ if grep -q '^CONFIG_KCSAN=y$' $i/ConfigFragment.input
+ then
+ # KCSAN forces a number of Kconfig options, so remove
+ # complaints about those Kconfig options in KCSAN runs.
+ mv $i/ConfigFragment.diags $i/ConfigFragment.diags.kcsan
+ grep -v -E 'CONFIG_PROVE_RCU|CONFIG_PREEMPT_COUNT' $i/ConfigFragment.diags.kcsan > $i/ConfigFragment.diags
+ fi
+ if test -s $i/ConfigFragment.diags
+ then
+ cat $i/ConfigFragment.diags
+ configerrors=$((configerrors+1))
+ else
+ rm $i/ConfigFragment.diags
+ fi
if test -r $i/Make.oldconfig.err
then
cat $i/Make.oldconfig.err
@@ -65,7 +80,14 @@ do
if test -f "$i/buildonly"
then
echo Build-only run, no boot/test
- configcheck.sh $i/.config $i/ConfigFragment
+ configcheck.sh $i/.config $i/ConfigFragment > $i/ConfigFragment.diags 2>&1
+ if test -s $i/ConfigFragment.diags
+ then
+ cat $i/ConfigFragment.diags
+ configerrors=$((configerrors+1))
+ else
+ rm $i/ConfigFragment.diags
+ fi
parse-build.sh $i/Make.out $configfile
elif test -f "$i/qemu-cmd"
then
@@ -79,10 +101,10 @@ do
done
if test -f "$rd/kcsan.sum"
then
- if ! test -f $T
+ if ! test -f $i/ConfigFragment.diags
then
:
- elif grep -q CONFIG_KCSAN=y $T
+ elif grep -q CONFIG_KCSAN=y $i/ConfigFragment.diags
then
echo "Compiler or architecture does not support KCSAN!"
echo Did you forget to switch your compiler with '--kmake-arg CC=<cc-that-supports-kcsan>'?
@@ -94,17 +116,23 @@ do
fi
fi
done
+
+if test "$configerrors" -gt 0
+then
+ echo $configerrors runs with .config errors.
+ ret=1
+fi
EDITOR=echo kvm-find-errors.sh "${@: -1}" > $T 2>&1
builderrors="`tr ' ' '\012' < $T | grep -c '/Make.out.diags'`"
if test "$builderrors" -gt 0
then
echo $builderrors runs with build errors.
- ret=1
+ ret=2
fi
runerrors="`tr ' ' '\012' < $T | grep -c '/console.log.diags'`"
if test "$runerrors" -gt 0
then
echo $runerrors runs with runtime errors.
- ret=2
+ ret=3
fi
exit $ret
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh
index a2328163eba1..134cdef5a6e0 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-remote.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-remote.sh
@@ -137,14 +137,20 @@ chmod +x $T/bin/kvm-remote-*.sh
# Check first to avoid the need for cleanup for system-name typos
for i in $systems
do
- ncpus="`ssh -o BatchMode=yes $i getconf _NPROCESSORS_ONLN 2> /dev/null`"
+ ssh -o BatchMode=yes $i getconf _NPROCESSORS_ONLN > $T/ssh.stdout 2> $T/ssh.stderr
ret=$?
if test "$ret" -ne 0
then
- echo System $i unreachable, giving up. | tee -a "$oldrun/remote-log"
+ echo "System $i unreachable ($ret), giving up." | tee -a "$oldrun/remote-log"
+ echo ' --- ssh stdout: vvv' | tee -a "$oldrun/remote-log"
+ cat $T/ssh.stdout | tee -a "$oldrun/remote-log"
+ echo ' --- ssh stdout: ^^^' | tee -a "$oldrun/remote-log"
+ echo ' --- ssh stderr: vvv' | tee -a "$oldrun/remote-log"
+ cat $T/ssh.stderr | tee -a "$oldrun/remote-log"
+ echo ' --- ssh stderr: ^^^' | tee -a "$oldrun/remote-log"
exit 4
fi
- echo $i: $ncpus CPUs " " `date` | tee -a "$oldrun/remote-log"
+ echo $i: `cat $T/ssh.stdout` CPUs " " `date` | tee -a "$oldrun/remote-log"
done
# Download and expand the tarball on all systems.
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index d2a3710a5f2a..b33cd8753689 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -9,9 +9,10 @@
#
# Usage: kvm-test-1-run.sh config resdir seconds qemu-args boot_args_in
#
-# qemu-args defaults to "-enable-kvm -nographic", along with arguments
-# specifying the number of CPUs and other options
-# generated from the underlying CPU architecture.
+# qemu-args defaults to "-enable-kvm -display none -no-reboot", along
+# with arguments specifying the number of CPUs
+# and other options generated from the underlying
+# CPU architecture.
# boot_args_in defaults to value returned by the per_version_boot_params
# shell function.
#
@@ -57,7 +58,6 @@ config_override_param () {
cat $T/Kconfig_args >> $resdir/ConfigFragment.input
config_override.sh $T/$2 $T/Kconfig_args > $T/$2.tmp
mv $T/$2.tmp $T/$2
- # Note that "#CHECK#" is not permitted on commandline.
fi
}
@@ -140,7 +140,7 @@ then
fi
# Generate -smp qemu argument.
-qemu_args="-enable-kvm -nographic $qemu_args"
+qemu_args="-enable-kvm -display none -no-reboot $qemu_args"
cpu_count=`configNR_CPUS.sh $resdir/ConfigFragment`
cpu_count=`configfrag_boot_cpus "$boot_args_in" "$config_template" "$cpu_count"`
if test "$cpu_count" -gt "$TORTURE_ALLOTED_CPUS"
@@ -163,7 +163,7 @@ boot_args="`configfrag_boot_params "$boot_args_in" "$config_template"`"
boot_args="`per_version_boot_params "$boot_args" $resdir/.config $seconds`"
if test -n "$TORTURE_BOOT_GDB_ARG"
then
- boot_args="$boot_args $TORTURE_BOOT_GDB_ARG"
+ boot_args="$TORTURE_BOOT_GDB_ARG $boot_args"
fi
# Give bare-metal advice
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index d3cdc2d33d4b..b0f36a638a69 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -186,7 +186,7 @@ do
fi
;;
--kconfig|--kconfigs)
- checkarg --kconfig "(Kconfig options)" $# "$2" '^CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\|"[^"]*"\)\( CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\|"[^"]*"\)\)*$' '^error$'
+ checkarg --kconfig "(Kconfig options)" $# "$2" '^\(#CHECK#\)\?CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\|"[^"]*"\)\( \(#CHECK#\)\?CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\|"[^"]*"\)\)*$' '^error$'
TORTURE_KCONFIG_ARG="`echo "$TORTURE_KCONFIG_ARG $2" | sed -e 's/^ *//' -e 's/ *$//'`"
shift
;;
diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
index 71f0dfbb2a6d..212c52ca90b5 100755
--- a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
+++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
@@ -10,7 +10,6 @@
D=tools/testing/selftests/rcutorture
# Prerequisite checks
-[ -z "$D" ] && echo >&2 "No argument supplied" && exit 1
if [ ! -d "$D" ]; then
echo >&2 "$D does not exist: Malformed kernel source tree?"
exit 1
@@ -34,12 +33,16 @@ cat > init.c << '___EOF___'
volatile unsigned long delaycount;
-int main(int argc, int argv[])
+int main(int argc, char *argv[])
{
int i;
struct timeval tv;
struct timeval tvb;
+ printf("Torture-test rudimentary init program started, command line:\n");
+ for (i = 0; i < argc; i++)
+ printf(" %s", argv[i]);
+ printf("\n");
for (;;) {
sleep(1);
/* Need some userspace time. */
@@ -64,15 +67,23 @@ ___EOF___
# build using nolibc on supported archs (smaller executable) and fall
# back to regular glibc on other ones.
if echo -e "#if __x86_64__||__i386__||__i486__||__i586__||__i686__" \
- "||__ARM_EABI__||__aarch64__||__s390x__\nyes\n#endif" \
+ "||__ARM_EABI__||__aarch64__||__s390x__||__loongarch__\nyes\n#endif" \
| ${CROSS_COMPILE}gcc -E -nostdlib -xc - \
| grep -q '^yes'; then
# architecture supported by nolibc
${CROSS_COMPILE}gcc -fno-asynchronous-unwind-tables -fno-ident \
-nostdlib -include ../../../../include/nolibc/nolibc.h \
-s -static -Os -o init init.c -lgcc
+ ret=$?
else
${CROSS_COMPILE}gcc -s -static -Os -o init init.c
+ ret=$?
+fi
+
+if [ "$ret" -ne 0 ]
+then
+ echo "Failed to create a statically linked C-language initrd"
+ exit "$ret"
fi
rm init.c
diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh
index 5a2ae2264403..12b50a4a881a 100755
--- a/tools/testing/selftests/rcutorture/bin/torture.sh
+++ b/tools/testing/selftests/rcutorture/bin/torture.sh
@@ -55,6 +55,8 @@ do_kasan=yes
do_kcsan=no
do_clocksourcewd=yes
do_rt=yes
+do_rcutasksflavors=yes
+do_srcu_lockdep=yes
# doyesno - Helper function for yes/no arguments
function doyesno () {
@@ -73,18 +75,20 @@ usage () {
echo " --configs-locktorture \"config-file list w/ repeat factor (10*LOCK01)\""
echo " --configs-scftorture \"config-file list w/ repeat factor (2*CFLIST)\""
echo " --do-all"
- echo " --do-allmodconfig / --do-no-allmodconfig"
- echo " --do-clocksourcewd / --do-no-clocksourcewd"
- echo " --do-kasan / --do-no-kasan"
- echo " --do-kcsan / --do-no-kcsan"
- echo " --do-kvfree / --do-no-kvfree"
- echo " --do-locktorture / --do-no-locktorture"
+ echo " --do-allmodconfig / --do-no-allmodconfig / --no-allmodconfig"
+ echo " --do-clocksourcewd / --do-no-clocksourcewd / --no-clocksourcewd"
+ echo " --do-kasan / --do-no-kasan / --no-kasan"
+ echo " --do-kcsan / --do-no-kcsan / --no-kcsan"
+ echo " --do-kvfree / --do-no-kvfree / --no-kvfree"
+ echo " --do-locktorture / --do-no-locktorture / --no-locktorture"
echo " --do-none"
- echo " --do-rcuscale / --do-no-rcuscale"
- echo " --do-rcutorture / --do-no-rcutorture"
- echo " --do-refscale / --do-no-refscale"
- echo " --do-rt / --do-no-rt"
- echo " --do-scftorture / --do-no-scftorture"
+ echo " --do-rcuscale / --do-no-rcuscale / --no-rcuscale"
+ echo " --do-rcutasksflavors / --do-no-rcutasksflavors / --no-rcutasksflavors"
+ echo " --do-rcutorture / --do-no-rcutorture / --no-rcutorture"
+ echo " --do-refscale / --do-no-refscale / --no-refscale"
+ echo " --do-rt / --do-no-rt / --no-rt"
+ echo " --do-scftorture / --do-no-scftorture / --no-scftorture"
+ echo " --do-srcu-lockdep / --do-no-srcu-lockdep / --no-srcu-lockdep"
echo " --duration [ <minutes> | <hours>h | <days>d ]"
echo " --kcsan-kmake-arg kernel-make-arguments"
exit 1
@@ -115,6 +119,7 @@ do
;;
--do-all|--doall)
do_allmodconfig=yes
+ do_rcutasksflavor=yes
do_rcutorture=yes
do_locktorture=yes
do_scftorture=yes
@@ -125,27 +130,29 @@ do
do_kasan=yes
do_kcsan=yes
do_clocksourcewd=yes
+ do_srcu_lockdep=yes
;;
- --do-allmodconfig|--do-no-allmodconfig)
+ --do-allmodconfig|--do-no-allmodconfig|--no-allmodconfig)
do_allmodconfig=`doyesno "$1" --do-allmodconfig`
;;
- --do-clocksourcewd|--do-no-clocksourcewd)
+ --do-clocksourcewd|--do-no-clocksourcewd|--no-clocksourcewd)
do_clocksourcewd=`doyesno "$1" --do-clocksourcewd`
;;
- --do-kasan|--do-no-kasan)
+ --do-kasan|--do-no-kasan|--no-kasan)
do_kasan=`doyesno "$1" --do-kasan`
;;
- --do-kcsan|--do-no-kcsan)
+ --do-kcsan|--do-no-kcsan|--no-kcsan)
do_kcsan=`doyesno "$1" --do-kcsan`
;;
- --do-kvfree|--do-no-kvfree)
+ --do-kvfree|--do-no-kvfree|--no-kvfree)
do_kvfree=`doyesno "$1" --do-kvfree`
;;
- --do-locktorture|--do-no-locktorture)
+ --do-locktorture|--do-no-locktorture|--no-locktorture)
do_locktorture=`doyesno "$1" --do-locktorture`
;;
--do-none|--donone)
do_allmodconfig=no
+ do_rcutasksflavors=no
do_rcutorture=no
do_locktorture=no
do_scftorture=no
@@ -156,22 +163,29 @@ do
do_kasan=no
do_kcsan=no
do_clocksourcewd=no
+ do_srcu_lockdep=no
;;
- --do-rcuscale|--do-no-rcuscale)
+ --do-rcuscale|--do-no-rcuscale|--no-rcuscale)
do_rcuscale=`doyesno "$1" --do-rcuscale`
;;
- --do-rcutorture|--do-no-rcutorture)
+ --do-rcutasksflavors|--do-no-rcutasksflavors|--no-rcutasksflavors)
+ do_rcutasksflavors=`doyesno "$1" --do-rcutasksflavors`
+ ;;
+ --do-rcutorture|--do-no-rcutorture|--no-rcutorture)
do_rcutorture=`doyesno "$1" --do-rcutorture`
;;
- --do-refscale|--do-no-refscale)
+ --do-refscale|--do-no-refscale|--no-refscale)
do_refscale=`doyesno "$1" --do-refscale`
;;
- --do-rt|--do-no-rt)
+ --do-rt|--do-no-rt|--no-rt)
do_rt=`doyesno "$1" --do-rt`
;;
- --do-scftorture|--do-no-scftorture)
+ --do-scftorture|--do-no-scftorture|--no-scftorture)
do_scftorture=`doyesno "$1" --do-scftorture`
;;
+ --do-srcu-lockdep|--do-no-srcu-lockdep|--no-srcu-lockdep)
+ do_srcu_lockdep=`doyesno "$1" --do-srcu-lockdep`
+ ;;
--duration)
checkarg --duration "(minutes)" $# "$2" '^[0-9][0-9]*\(m\|h\|d\|\)$' '^error'
mult=1
@@ -361,6 +375,40 @@ then
fi
fi
+# Test building RCU Tasks flavors in isolation, both SMP and !SMP
+if test "$do_rcutasksflavors" = "yes"
+then
+ echo " --- rcutasksflavors:" Start `date` | tee -a $T/log
+ rtfdir="tools/testing/selftests/rcutorture/res/$ds/results-rcutasksflavors"
+ mkdir -p "$rtfdir"
+ cat > $T/rcutasksflavors << __EOF__
+#CHECK#CONFIG_TASKS_RCU=n
+#CHECK#CONFIG_TASKS_RUDE_RCU=n
+#CHECK#CONFIG_TASKS_TRACE_RCU=n
+__EOF__
+ for flavor in CONFIG_TASKS_RCU CONFIG_TASKS_RUDE_RCU CONFIG_TASKS_TRACE_RCU
+ do
+ forceflavor="`echo $flavor | sed -e 's/^CONFIG/CONFIG_FORCE/'`"
+ deselectedflavors="`grep -v $flavor $T/rcutasksflavors | tr '\012' ' ' | tr -s ' ' | sed -e 's/ *$//'`"
+ echo " --- Running RCU Tasks Trace flavor $flavor `date`" >> $rtfdir/log
+ tools/testing/selftests/rcutorture/bin/kvm.sh --datestamp "$ds/results-rcutasksflavors/$flavor" --buildonly --configs "TINY01 TREE04" --kconfig "CONFIG_RCU_EXPERT=y CONFIG_RCU_SCALE_TEST=y $forceflavor=y $deselectedflavors" --trust-make > $T/$flavor.out 2>&1
+ retcode=$?
+ if test "$retcode" -ne 0
+ then
+ break
+ fi
+ done
+ if test "$retcode" -eq 0
+ then
+ echo "rcutasksflavors($retcode)" $rtfdir >> $T/successes
+ echo Success >> $rtfdir/log
+ else
+ echo "rcutasksflavors($retcode)" $rtfdir >> $T/failures
+ echo " --- rcutasksflavors Test summary:" >> $rtfdir/log
+ echo " --- Summary: Exit code $retcode from $flavor, see Make.out" >> $rtfdir/log
+ fi
+fi
+
# --torture rcu
if test "$do_rcutorture" = "yes"
then
@@ -376,8 +424,10 @@ fi
if test "$do_scftorture" = "yes"
then
+ # Scale memory based on the number of CPUs.
+ scfmem=$((2+HALF_ALLOTED_CPUS/16))
torture_bootargs="scftorture.nthreads=$HALF_ALLOTED_CPUS torture.disable_onoff_at_boot csdlock_debug=1"
- torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --configs "$configs_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory 2G --trust-make
+ torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --configs "$configs_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --memory ${scfmem}G --trust-make
fi
if test "$do_rt" = "yes"
@@ -391,6 +441,23 @@ then
torture_set "rcurttorture-exp" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --trust-make
fi
+if test "$do_srcu_lockdep" = "yes"
+then
+ echo " --- do-srcu-lockdep:" Start `date` | tee -a $T/log
+ tools/testing/selftests/rcutorture/bin/srcu_lockdep.sh --datestamp "$ds/results-srcu-lockdep" > $T/srcu_lockdep.sh.out 2>&1
+ retcode=$?
+ cp $T/srcu_lockdep.sh.out "tools/testing/selftests/rcutorture/res/$ds/results-srcu-lockdep/log"
+ if test "$retcode" -eq 0
+ then
+ echo "srcu_lockdep($retcode)" "tools/testing/selftests/rcutorture/res/$ds/results-srcu-lockdep" >> $T/successes
+ echo Success >> "tools/testing/selftests/rcutorture/res/$ds/results-srcu-lockdep/log"
+ else
+ echo "srcu_lockdep($retcode)" "tools/testing/selftests/rcutorture/res/$ds/results-srcu-lockdep" >> $T/failures
+ echo " --- srcu_lockdep Test Summary:" >> "tools/testing/selftests/rcutorture/res/$ds/results-srcu-lockdep/log"
+ echo " --- Summary: Exit code $retcode from srcu_lockdep.sh, see ds/results-srcu-lockdep" >> "tools/testing/selftests/rcutorture/res/$ds/results-srcu-lockdep/log"
+ fi
+fi
+
if test "$do_refscale" = yes
then
primlist="`grep '\.name[ ]*=' kernel/rcu/refscale.c | sed -e 's/^[^"]*"//' -e 's/".*$//'`"
@@ -541,11 +608,23 @@ then
fi
echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log
echo Summary: Successes: $nsuccesses Failures: $nfailures. | tee -a $T/log
+tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`"
+find "$tdir" -name 'ConfigFragment.diags' -print > $T/configerrors
+find "$tdir" -name 'Make.out.diags' -print > $T/builderrors
+if test -s "$T/configerrors"
+then
+ echo " Scenarios with .config errors: `wc -l "$T/configerrors" | awk '{ print $1 }'`"
+ nonkcsanbug="yes"
+fi
+if test -s "$T/builderrors"
+then
+ echo " Scenarios with build errors: `wc -l "$T/builderrors" | awk '{ print $1 }'`"
+ nonkcsanbug="yes"
+fi
if test -z "$nonkcsanbug" && test -s "$T/failuresum"
then
echo " All bugs were KCSAN failures."
fi
-tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`"
if test -n "$tdir" && test $compress_concurrency -gt 0
then
# KASAN vmlinux files can approach 1GB in size, so compress them.
diff --git a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
index d3e4b2971f92..e7bb32709d78 100644
--- a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
@@ -22,8 +22,9 @@ locktorture_param_onoff () {
#
# Adds per-version torture-module parameters to kernels supporting them.
per_version_boot_params () {
- echo $1 `locktorture_param_onoff "$1" "$2"` \
+ echo `locktorture_param_onoff "$1" "$2"` \
locktorture.stat_interval=15 \
locktorture.shutdown_secs=$3 \
- locktorture.verbose=1
+ locktorture.verbose=1 \
+ $1
}
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS03 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03
index dea26c568678..2ef2fb69c360 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS03
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS03
@@ -6,6 +6,5 @@ CONFIG_PREEMPT=y
CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=n
CONFIG_NO_HZ_FULL=y
-#CHECK#CONFIG_RCU_EXPERT=n
CONFIG_TASKS_RCU=y
CONFIG_RCU_EXPERT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
index 04831ef1f9b5..8ae41d5f81a3 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
@@ -15,4 +15,3 @@ CONFIG_DEBUG_LOCK_ALLOC=n
CONFIG_RCU_BOOST=n
CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
CONFIG_RCU_EXPERT=y
-CONFIG_BOOTPARAM_HOTPLUG_CPU0=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
index e2bc99c785e7..c044df386876 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
@@ -46,10 +46,11 @@ rcutorture_param_stat_interval () {
#
# Adds per-version torture-module parameters to kernels supporting them.
per_version_boot_params () {
- echo $1 `rcutorture_param_onoff "$1" "$2"` \
+ echo `rcutorture_param_onoff "$1" "$2"` \
`rcutorture_param_n_barrier_cbs "$1"` \
`rcutorture_param_stat_interval "$1"` \
rcutorture.shutdown_secs=$3 \
rcutorture.test_no_idle_hz=1 \
- rcutorture.verbose=1
+ rcutorture.verbose=1 \
+ $1
}
diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/CFcommon b/tools/testing/selftests/rcutorture/configs/rcuscale/CFcommon
index 6a00157bee5b..b1ffd7c67604 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuscale/CFcommon
+++ b/tools/testing/selftests/rcutorture/configs/rcuscale/CFcommon
@@ -2,5 +2,7 @@ CONFIG_RCU_SCALE_TEST=y
CONFIG_PRINTK_TIME=y
CONFIG_FORCE_TASKS_RCU=y
#CHECK#CONFIG_TASKS_RCU=y
+CONFIG_FORCE_TASKS_RUDE_RCU=y
+#CHECK#CONFIG_TASKS_RUDE_RCU=y
CONFIG_FORCE_TASKS_TRACE_RCU=y
#CHECK#CONFIG_TASKS_TRACE_RCU=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/TRACE01 b/tools/testing/selftests/rcutorture/configs/rcuscale/TRACE01
index 227aba7783af..0059592c7408 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuscale/TRACE01
+++ b/tools/testing/selftests/rcutorture/configs/rcuscale/TRACE01
@@ -2,6 +2,8 @@ CONFIG_SMP=y
CONFIG_PREEMPT_NONE=y
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=n
+CONFIG_PREEMPT_DYNAMIC=n
+#CHECK#CONFIG_TREE_RCU=y
CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=y
CONFIG_NO_HZ_FULL=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcuscale/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcuscale/ver_functions.sh
index ffbe15109f0d..28070b43f017 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuscale/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcuscale/ver_functions.sh
@@ -11,6 +11,7 @@
#
# Adds per-version torture-module parameters to kernels supporting them.
per_version_boot_params () {
- echo $1 rcuscale.shutdown=1 \
- rcuscale.verbose=0
+ echo rcuscale.shutdown=1 \
+ rcuscale.verbose=0 \
+ $1
}
diff --git a/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT b/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT
index ef2b501a6971..67f9d2998afd 100644
--- a/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT
+++ b/tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT
@@ -2,6 +2,7 @@ CONFIG_SMP=y
CONFIG_PREEMPT_NONE=y
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=n
+CONFIG_PREEMPT_DYNAMIC=n
#CHECK#CONFIG_PREEMPT_RCU=n
CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=y
diff --git a/tools/testing/selftests/rcutorture/configs/refscale/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/refscale/ver_functions.sh
index f81fa2c541a6..748465627601 100644
--- a/tools/testing/selftests/rcutorture/configs/refscale/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/refscale/ver_functions.sh
@@ -11,6 +11,7 @@
#
# Adds per-version torture-module parameters to kernels supporting them.
per_version_boot_params () {
- echo $1 refscale.shutdown=1 \
- refscale.verbose=0
+ echo refscale.shutdown=1 \
+ refscale.verbose=0 \
+ $1
}
diff --git a/tools/testing/selftests/rcutorture/configs/scf/NOPREEMPT b/tools/testing/selftests/rcutorture/configs/scf/NOPREEMPT
index 3a59346b3de7..6133f54ce2a7 100644
--- a/tools/testing/selftests/rcutorture/configs/scf/NOPREEMPT
+++ b/tools/testing/selftests/rcutorture/configs/scf/NOPREEMPT
@@ -2,6 +2,8 @@ CONFIG_SMP=y
CONFIG_PREEMPT_NONE=y
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=n
+CONFIG_PREEMPT_DYNAMIC=n
+#CHECK#CONFIG_PREEMPT_RCU=n
CONFIG_HZ_PERIODIC=n
CONFIG_NO_HZ_IDLE=n
CONFIG_NO_HZ_FULL=y
diff --git a/tools/testing/selftests/rcutorture/configs/scf/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/scf/ver_functions.sh
index 2d949e58f5a5..7637f68ef0ce 100644
--- a/tools/testing/selftests/rcutorture/configs/scf/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/scf/ver_functions.sh
@@ -22,8 +22,9 @@ scftorture_param_onoff () {
#
# Adds per-version torture-module parameters to kernels supporting them.
per_version_boot_params () {
- echo $1 `scftorture_param_onoff "$1" "$2"` \
+ echo `scftorture_param_onoff "$1" "$2"` \
scftorture.stat_interval=15 \
scftorture.shutdown_secs=$3 \
- scftorture.verbose=1
+ scftorture.verbose=1 \
+ $1
}
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/Makefile b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/Makefile
deleted file mode 100644
index 4bed0b678f8b..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-all: srcu.c store_buffering
-
-LINUX_SOURCE = ../../../../../..
-
-modified_srcu_input = $(LINUX_SOURCE)/include/linux/srcu.h \
- $(LINUX_SOURCE)/kernel/rcu/srcu.c
-
-modified_srcu_output = include/linux/srcu.h srcu.c
-
-include/linux/srcu.h: srcu.c
-
-srcu.c: modify_srcu.awk Makefile $(modified_srcu_input)
- awk -f modify_srcu.awk $(modified_srcu_input) $(modified_srcu_output)
-
-store_buffering:
- @cd tests/store_buffering; make
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/delay.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/delay.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/delay.h
+++ /dev/null
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/export.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/export.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/export.h
+++ /dev/null
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/mutex.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/mutex.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/mutex.h
+++ /dev/null
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/percpu.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/percpu.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/percpu.h
+++ /dev/null
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/preempt.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/preempt.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/preempt.h
+++ /dev/null
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/rcupdate.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/rcupdate.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/rcupdate.h
+++ /dev/null
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/sched.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/sched.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/sched.h
+++ /dev/null
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/smp.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/smp.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/smp.h
+++ /dev/null
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/workqueue.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/workqueue.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/linux/workqueue.h
+++ /dev/null
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/uapi/linux/types.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/uapi/linux/types.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/empty_includes/uapi/linux/types.h
+++ /dev/null
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore
deleted file mode 100644
index 57d296341304..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-srcu.h
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/kconfig.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/kconfig.h
deleted file mode 100644
index f2860dd1b407..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/kconfig.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <LINUX_SOURCE/linux/kconfig.h>
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h
deleted file mode 100644
index 8bc960e5e713..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header has been modifies to remove definitions of types that
- * are defined in standard userspace headers or are problematic for some
- * other reason.
- */
-
-#ifndef _LINUX_TYPES_H
-#define _LINUX_TYPES_H
-
-#define __EXPORTED_HEADERS__
-#include <uapi/linux/types.h>
-
-#ifndef __ASSEMBLY__
-
-#define DECLARE_BITMAP(name, bits) \
- unsigned long name[BITS_TO_LONGS(bits)]
-
-typedef __u32 __kernel_dev_t;
-
-/* bsd */
-typedef unsigned char u_char;
-typedef unsigned short u_short;
-typedef unsigned int u_int;
-typedef unsigned long u_long;
-
-/* sysv */
-typedef unsigned char unchar;
-typedef unsigned short ushort;
-typedef unsigned int uint;
-typedef unsigned long ulong;
-
-#ifndef __BIT_TYPES_DEFINED__
-#define __BIT_TYPES_DEFINED__
-
-typedef __u8 u_int8_t;
-typedef __s8 int8_t;
-typedef __u16 u_int16_t;
-typedef __s16 int16_t;
-typedef __u32 u_int32_t;
-typedef __s32 int32_t;
-
-#endif /* !(__BIT_TYPES_DEFINED__) */
-
-typedef __u8 uint8_t;
-typedef __u16 uint16_t;
-typedef __u32 uint32_t;
-
-/* this is a special 64bit data type that is 8-byte aligned */
-#define aligned_u64 __u64 __attribute__((aligned(8)))
-#define aligned_be64 __be64 __attribute__((aligned(8)))
-#define aligned_le64 __le64 __attribute__((aligned(8)))
-
-/**
- * The type used for indexing onto a disc or disc partition.
- *
- * Linux always considers sectors to be 512 bytes long independently
- * of the devices real block size.
- *
- * blkcnt_t is the type of the inode's block count.
- */
-typedef u64 sector_t;
-
-/*
- * The type of an index into the pagecache.
- */
-#define pgoff_t unsigned long
-
-/*
- * A dma_addr_t can hold any valid DMA address, i.e., any address returned
- * by the DMA API.
- *
- * If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
- * bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
- * but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
- * so they don't care about the size of the actual bus addresses.
- */
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-typedef u64 dma_addr_t;
-#else
-typedef u32 dma_addr_t;
-#endif
-
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
-typedef u64 phys_addr_t;
-#else
-typedef u32 phys_addr_t;
-#endif
-
-typedef phys_addr_t resource_size_t;
-
-/*
- * This type is the placeholder for a hardware interrupt number. It has to be
- * big enough to enclose whatever representation is used by a given platform.
- */
-typedef unsigned long irq_hw_number_t;
-
-typedef struct {
- int counter;
-} atomic_t;
-
-#ifdef CONFIG_64BIT
-typedef struct {
- long counter;
-} atomic64_t;
-#endif
-
-struct list_head {
- struct list_head *next, *prev;
-};
-
-struct hlist_head {
- struct hlist_node *first;
-};
-
-struct hlist_node {
- struct hlist_node *next, **pprev;
-};
-
-/**
- * struct callback_head - callback structure for use with RCU and task_work
- * @next: next update requests in a list
- * @func: actual update function to call after the grace period.
- *
- * The struct is aligned to size of pointer. On most architectures it happens
- * naturally due ABI requirements, but some architectures (like CRIS) have
- * weird ABI and we need to ask it explicitly.
- *
- * The alignment is required to guarantee that bits 0 and 1 of @next will be
- * clear under normal conditions -- as long as we use call_rcu() or
- * call_srcu() to queue callback.
- *
- * This guarantee is important for few reasons:
- * - future call_rcu_lazy() will make use of lower bits in the pointer;
- * - the structure shares storage spacer in struct page with @compound_head,
- * which encode PageTail() in bit 0. The guarantee is needed to avoid
- * false-positive PageTail().
- */
-struct callback_head {
- struct callback_head *next;
- void (*func)(struct callback_head *head);
-} __attribute__((aligned(sizeof(void *))));
-#define rcu_head callback_head
-
-typedef void (*rcu_callback_t)(struct rcu_head *head);
-typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
-
-/* clocksource cycle base type */
-typedef u64 cycle_t;
-
-#endif /* __ASSEMBLY__ */
-#endif /* _LINUX_TYPES_H */
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk
deleted file mode 100755
index e05182d3e47d..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/modify_srcu.awk
+++ /dev/null
@@ -1,376 +0,0 @@
-#!/usr/bin/awk -f
-# SPDX-License-Identifier: GPL-2.0
-
-# Modify SRCU for formal verification. The first argument should be srcu.h and
-# the second should be srcu.c. Outputs modified srcu.h and srcu.c into the
-# current directory.
-
-BEGIN {
- if (ARGC != 5) {
- print "Usange: input.h input.c output.h output.c" > "/dev/stderr";
- exit 1;
- }
- h_output = ARGV[3];
- c_output = ARGV[4];
- ARGC = 3;
-
- # Tokenize using FS and not RS as FS supports regular expressions. Each
- # record is one line of source, except that backslashed lines are
- # combined. Comments are treated as field separators, as are quotes.
- quote_regexp="\"([^\\\\\"]|\\\\.)*\"";
- comment_regexp="\\/\\*([^*]|\\*+[^*/])*\\*\\/|\\/\\/.*(\n|$)";
- FS="([ \\\\\t\n\v\f;,.=(){}+*/<>&|^-]|\\[|\\]|" comment_regexp "|" quote_regexp ")+";
-
- inside_srcu_struct = 0;
- inside_srcu_init_def = 0;
- srcu_init_param_name = "";
- in_macro = 0;
- brace_nesting = 0;
- paren_nesting = 0;
-
- # Allow the manipulation of the last field separator after has been
- # seen.
- last_fs = "";
- # Whether the last field separator was intended to be output.
- last_fs_print = 0;
-
- # rcu_batches stores the initialization for each instance of struct
- # rcu_batch
-
- in_comment = 0;
-
- outputfile = "";
-}
-
-{
- prev_outputfile = outputfile;
- if (FILENAME ~ /\.h$/) {
- outputfile = h_output;
- if (FNR != NR) {
- print "Incorrect file order" > "/dev/stderr";
- exit 1;
- }
- }
- else
- outputfile = c_output;
-
- if (prev_outputfile && outputfile != prev_outputfile) {
- new_outputfile = outputfile;
- outputfile = prev_outputfile;
- update_fieldsep("", 0);
- outputfile = new_outputfile;
- }
-}
-
-# Combine the next line into $0.
-function combine_line() {
- ret = getline next_line;
- if (ret == 0) {
- # Don't allow two consecutive getlines at the end of the file
- if (eof_found) {
- print "Error: expected more input." > "/dev/stderr";
- exit 1;
- } else {
- eof_found = 1;
- }
- } else if (ret == -1) {
- print "Error reading next line of file" FILENAME > "/dev/stderr";
- exit 1;
- }
- $0 = $0 "\n" next_line;
-}
-
-# Combine backslashed lines and multiline comments.
-function combine_backslashes() {
- while (/\\$|\/\*([^*]|\*+[^*\/])*\**$/) {
- combine_line();
- }
-}
-
-function read_line() {
- combine_line();
- combine_backslashes();
-}
-
-# Print out field separators and update variables that depend on them. Only
-# print if p is true. Call with sep="" and p=0 to print out the last field
-# separator.
-function update_fieldsep(sep, p) {
- # Count braces
- sep_tmp = sep;
- gsub(quote_regexp "|" comment_regexp, "", sep_tmp);
- while (1)
- {
- if (sub("[^{}()]*\\{", "", sep_tmp)) {
- brace_nesting++;
- continue;
- }
- if (sub("[^{}()]*\\}", "", sep_tmp)) {
- brace_nesting--;
- if (brace_nesting < 0) {
- print "Unbalanced braces!" > "/dev/stderr";
- exit 1;
- }
- continue;
- }
- if (sub("[^{}()]*\\(", "", sep_tmp)) {
- paren_nesting++;
- continue;
- }
- if (sub("[^{}()]*\\)", "", sep_tmp)) {
- paren_nesting--;
- if (paren_nesting < 0) {
- print "Unbalanced parenthesis!" > "/dev/stderr";
- exit 1;
- }
- continue;
- }
-
- break;
- }
-
- if (last_fs_print)
- printf("%s", last_fs) > outputfile;
- last_fs = sep;
- last_fs_print = p;
-}
-
-# Shifts the fields down by n positions. Calls next if there are no more. If p
-# is true then print out field separators.
-function shift_fields(n, p) {
- do {
- if (match($0, FS) > 0) {
- update_fieldsep(substr($0, RSTART, RLENGTH), p);
- if (RSTART + RLENGTH <= length())
- $0 = substr($0, RSTART + RLENGTH);
- else
- $0 = "";
- } else {
- update_fieldsep("", 0);
- print "" > outputfile;
- next;
- }
- } while (--n > 0);
-}
-
-# Shifts and prints the first n fields.
-function print_fields(n) {
- do {
- update_fieldsep("", 0);
- printf("%s", $1) > outputfile;
- shift_fields(1, 1);
- } while (--n > 0);
-}
-
-{
- combine_backslashes();
-}
-
-# Print leading FS
-{
- if (match($0, "^(" FS ")+") > 0) {
- update_fieldsep(substr($0, RSTART, RLENGTH), 1);
- if (RSTART + RLENGTH <= length())
- $0 = substr($0, RSTART + RLENGTH);
- else
- $0 = "";
- }
-}
-
-# Parse the line.
-{
- while (NF > 0) {
- if ($1 == "struct" && NF < 3) {
- read_line();
- continue;
- }
-
- if (FILENAME ~ /\.h$/ && !inside_srcu_struct &&
- brace_nesting == 0 && paren_nesting == 0 &&
- $1 == "struct" && $2 == "srcu_struct" &&
- $0 ~ "^struct(" FS ")+srcu_struct(" FS ")+\\{") {
- inside_srcu_struct = 1;
- print_fields(2);
- continue;
- }
- if (inside_srcu_struct && brace_nesting == 0 &&
- paren_nesting == 0) {
- inside_srcu_struct = 0;
- update_fieldsep("", 0);
- for (name in rcu_batches)
- print "extern struct rcu_batch " name ";" > outputfile;
- }
-
- if (inside_srcu_struct && $1 == "struct" && $2 == "rcu_batch") {
- # Move rcu_batches outside of the struct.
- rcu_batches[$3] = "";
- shift_fields(3, 1);
- sub(/;[[:space:]]*$/, "", last_fs);
- continue;
- }
-
- if (FILENAME ~ /\.h$/ && !inside_srcu_init_def &&
- $1 == "#define" && $2 == "__SRCU_STRUCT_INIT") {
- inside_srcu_init_def = 1;
- srcu_init_param_name = $3;
- in_macro = 1;
- print_fields(3);
- continue;
- }
- if (inside_srcu_init_def && brace_nesting == 0 &&
- paren_nesting == 0) {
- inside_srcu_init_def = 0;
- in_macro = 0;
- continue;
- }
-
- if (inside_srcu_init_def && brace_nesting == 1 &&
- paren_nesting == 0 && last_fs ~ /\.[[:space:]]*$/ &&
- $1 ~ /^[[:alnum:]_]+$/) {
- name = $1;
- if (name in rcu_batches) {
- # Remove the dot.
- sub(/\.[[:space:]]*$/, "", last_fs);
-
- old_record = $0;
- do
- shift_fields(1, 0);
- while (last_fs !~ /,/ || paren_nesting > 0);
- end_loc = length(old_record) - length($0);
- end_loc += index(last_fs, ",") - length(last_fs);
-
- last_fs = substr(last_fs, index(last_fs, ",") + 1);
- last_fs_print = 1;
-
- match(old_record, "^"name"("FS")+=");
- start_loc = RSTART + RLENGTH;
-
- len = end_loc - start_loc;
- initializer = substr(old_record, start_loc, len);
- gsub(srcu_init_param_name "\\.", "", initializer);
- rcu_batches[name] = initializer;
- continue;
- }
- }
-
- # Don't include a nonexistent file
- if (!in_macro && $1 == "#include" && /^#include[[:space:]]+"rcu\.h"/) {
- update_fieldsep("", 0);
- next;
- }
-
- # Ignore most preprocessor stuff.
- if (!in_macro && $1 ~ /#/) {
- break;
- }
-
- if (brace_nesting > 0 && $1 ~ "^[[:alnum:]_]+$" && NF < 2) {
- read_line();
- continue;
- }
- if (brace_nesting > 0 &&
- $0 ~ "^[[:alnum:]_]+[[:space:]]*(\\.|->)[[:space:]]*[[:alnum:]_]+" &&
- $2 in rcu_batches) {
- # Make uses of rcu_batches global. Somewhat unreliable.
- shift_fields(1, 0);
- print_fields(1);
- continue;
- }
-
- if ($1 == "static" && NF < 3) {
- read_line();
- continue;
- }
- if ($1 == "static" && ($2 == "bool" && $3 == "try_check_zero" ||
- $2 == "void" && $3 == "srcu_flip")) {
- shift_fields(1, 1);
- print_fields(2);
- continue;
- }
-
- # Distinguish between read-side and write-side memory barriers.
- if ($1 == "smp_mb" && NF < 2) {
- read_line();
- continue;
- }
- if (match($0, /^smp_mb[[:space:]();\/*]*[[:alnum:]]/)) {
- barrier_letter = substr($0, RLENGTH, 1);
- if (barrier_letter ~ /A|D/)
- new_barrier_name = "sync_smp_mb";
- else if (barrier_letter ~ /B|C/)
- new_barrier_name = "rs_smp_mb";
- else {
- print "Unrecognized memory barrier." > "/dev/null";
- exit 1;
- }
-
- shift_fields(1, 1);
- printf("%s", new_barrier_name) > outputfile;
- continue;
- }
-
- # Skip definition of rcu_synchronize, since it is already
- # defined in misc.h. Only present in old versions of srcu.
- if (brace_nesting == 0 && paren_nesting == 0 &&
- $1 == "struct" && $2 == "rcu_synchronize" &&
- $0 ~ "^struct(" FS ")+rcu_synchronize(" FS ")+\\{") {
- shift_fields(2, 0);
- while (brace_nesting) {
- if (NF < 2)
- read_line();
- shift_fields(1, 0);
- }
- }
-
- # Skip definition of wakeme_after_rcu for the same reason
- if (brace_nesting == 0 && $1 == "static" && $2 == "void" &&
- $3 == "wakeme_after_rcu") {
- while (NF < 5)
- read_line();
- shift_fields(3, 0);
- do {
- while (NF < 3)
- read_line();
- shift_fields(1, 0);
- } while (paren_nesting || brace_nesting);
- }
-
- if ($1 ~ /^(unsigned|long)$/ && NF < 3) {
- read_line();
- continue;
- }
-
- # Give srcu_batches_completed the correct type for old SRCU.
- if (brace_nesting == 0 && $1 == "long" &&
- $2 == "srcu_batches_completed") {
- update_fieldsep("", 0);
- printf("unsigned ") > outputfile;
- print_fields(2);
- continue;
- }
- if (brace_nesting == 0 && $1 == "unsigned" && $2 == "long" &&
- $3 == "srcu_batches_completed") {
- print_fields(3);
- continue;
- }
-
- # Just print out the input code by default.
- print_fields(1);
- }
- update_fieldsep("", 0);
- print > outputfile;
- next;
-}
-
-END {
- update_fieldsep("", 0);
-
- if (brace_nesting != 0) {
- print "Unbalanced braces!" > "/dev/stderr";
- exit 1;
- }
-
- # Define the rcu_batches
- for (name in rcu_batches)
- print "struct rcu_batch " name " = " rcu_batches[name] ";" > c_output;
-}
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/assume.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/assume.h
deleted file mode 100644
index 570a49d9da7e..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/assume.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASSUME_H
-#define ASSUME_H
-
-/* Provide an assumption macro that can be disabled for gcc. */
-#ifdef RUN
-#define assume(x) \
- do { \
- /* Evaluate x to suppress warnings. */ \
- (void) (x); \
- } while (0)
-
-#else
-#define assume(x) __CPROVER_assume(x)
-#endif
-
-#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
deleted file mode 100644
index 3f95a768a03b..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef BARRIERS_H
-#define BARRIERS_H
-
-#define barrier() __asm__ __volatile__("" : : : "memory")
-
-#ifdef RUN
-#define smp_mb() __sync_synchronize()
-#define smp_mb__after_unlock_lock() __sync_synchronize()
-#else
-/*
- * Copied from CBMC's implementation of __sync_synchronize(), which
- * seems to be disabled by default.
- */
-#define smp_mb() __CPROVER_fence("WWfence", "RRfence", "RWfence", "WRfence", \
- "WWcumul", "RRcumul", "RWcumul", "WRcumul")
-#define smp_mb__after_unlock_lock() __CPROVER_fence("WWfence", "RRfence", "RWfence", "WRfence", \
- "WWcumul", "RRcumul", "RWcumul", "WRcumul")
-#endif
-
-/*
- * Allow memory barriers to be disabled in either the read or write side
- * of SRCU individually.
- */
-
-#ifndef NO_SYNC_SMP_MB
-#define sync_smp_mb() smp_mb()
-#else
-#define sync_smp_mb() do {} while (0)
-#endif
-
-#ifndef NO_READ_SIDE_SMP_MB
-#define rs_smp_mb() smp_mb()
-#else
-#define rs_smp_mb() do {} while (0)
-#endif
-
-#define READ_ONCE(x) (*(volatile typeof(x) *) &(x))
-#define WRITE_ONCE(x) ((*(volatile typeof(x) *) &(x)) = (val))
-
-#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/bug_on.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/bug_on.h
deleted file mode 100644
index 5e7912c6a521..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/bug_on.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef BUG_ON_H
-#define BUG_ON_H
-
-#include <assert.h>
-
-#define BUG() assert(0)
-#define BUG_ON(x) assert(!(x))
-
-/* Does it make sense to treat warnings as errors? */
-#define WARN() BUG()
-#define WARN_ON(x) (BUG_ON(x), false)
-
-#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/combined_source.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/combined_source.c
deleted file mode 100644
index e67ee5b3dd7c..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/combined_source.c
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <config.h>
-
-/* Include all source files. */
-
-#include "include_srcu.c"
-
-#include "preempt.c"
-#include "misc.c"
-
-/* Used by test.c files */
-#include <pthread.h>
-#include <stdlib.h>
-#include <linux/srcu.h>
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/config.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/config.h
deleted file mode 100644
index 283d7103334f..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/config.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* "Cheater" definitions based on restricted Kconfig choices. */
-
-#undef CONFIG_TINY_RCU
-#undef __CHECKER__
-#undef CONFIG_DEBUG_LOCK_ALLOC
-#undef CONFIG_DEBUG_OBJECTS_RCU_HEAD
-#undef CONFIG_HOTPLUG_CPU
-#undef CONFIG_MODULES
-#undef CONFIG_NO_HZ_FULL_SYSIDLE
-#undef CONFIG_PREEMPT_COUNT
-#undef CONFIG_PREEMPT_RCU
-#undef CONFIG_PROVE_RCU
-#undef CONFIG_RCU_NOCB_CPU
-#undef CONFIG_RCU_NOCB_CPU_ALL
-#undef CONFIG_RCU_STALL_COMMON
-#undef CONFIG_RCU_TRACE
-#undef CONFIG_RCU_USER_QS
-#undef CONFIG_TASKS_RCU
-#define CONFIG_TREE_RCU
-
-#define CONFIG_GENERIC_ATOMIC64
-
-#if NR_CPUS > 1
-#define CONFIG_SMP
-#else
-#undef CONFIG_SMP
-#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/include_srcu.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/include_srcu.c
deleted file mode 100644
index e5202d4cff30..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/include_srcu.c
+++ /dev/null
@@ -1,32 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <config.h>
-
-#include <assert.h>
-#include <errno.h>
-#include <inttypes.h>
-#include <pthread.h>
-#include <stddef.h>
-#include <string.h>
-#include <sys/types.h>
-
-#include "int_typedefs.h"
-
-#include "barriers.h"
-#include "bug_on.h"
-#include "locks.h"
-#include "misc.h"
-#include "preempt.h"
-#include "percpu.h"
-#include "workqueues.h"
-
-#ifdef USE_SIMPLE_SYNC_SRCU
-#define synchronize_srcu(sp) synchronize_srcu_original(sp)
-#endif
-
-#include <srcu.c>
-
-#ifdef USE_SIMPLE_SYNC_SRCU
-#undef synchronize_srcu
-
-#include "simple_sync_srcu.c"
-#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/int_typedefs.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/int_typedefs.h
deleted file mode 100644
index 0dd27aa517a7..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/int_typedefs.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef INT_TYPEDEFS_H
-#define INT_TYPEDEFS_H
-
-#include <inttypes.h>
-
-typedef int8_t s8;
-typedef uint8_t u8;
-typedef int16_t s16;
-typedef uint16_t u16;
-typedef int32_t s32;
-typedef uint32_t u32;
-typedef int64_t s64;
-typedef uint64_t u64;
-
-typedef int8_t __s8;
-typedef uint8_t __u8;
-typedef int16_t __s16;
-typedef uint16_t __u16;
-typedef int32_t __s32;
-typedef uint32_t __u32;
-typedef int64_t __s64;
-typedef uint64_t __u64;
-
-#define S8_C(x) INT8_C(x)
-#define U8_C(x) UINT8_C(x)
-#define S16_C(x) INT16_C(x)
-#define U16_C(x) UINT16_C(x)
-#define S32_C(x) INT32_C(x)
-#define U32_C(x) UINT32_C(x)
-#define S64_C(x) INT64_C(x)
-#define U64_C(x) UINT64_C(x)
-
-#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h
deleted file mode 100644
index 1e24827f96f1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h
+++ /dev/null
@@ -1,221 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LOCKS_H
-#define LOCKS_H
-
-#include <limits.h>
-#include <pthread.h>
-#include <stdbool.h>
-
-#include "assume.h"
-#include "bug_on.h"
-#include "preempt.h"
-
-int nondet_int(void);
-
-#define __acquire(x)
-#define __acquires(x)
-#define __release(x)
-#define __releases(x)
-
-/* Only use one lock mechanism. Select which one. */
-#ifdef PTHREAD_LOCK
-struct lock_impl {
- pthread_mutex_t mutex;
-};
-
-static inline void lock_impl_lock(struct lock_impl *lock)
-{
- BUG_ON(pthread_mutex_lock(&lock->mutex));
-}
-
-static inline void lock_impl_unlock(struct lock_impl *lock)
-{
- BUG_ON(pthread_mutex_unlock(&lock->mutex));
-}
-
-static inline bool lock_impl_trylock(struct lock_impl *lock)
-{
- int err = pthread_mutex_trylock(&lock->mutex);
-
- if (!err)
- return true;
- else if (err == EBUSY)
- return false;
- BUG();
-}
-
-static inline void lock_impl_init(struct lock_impl *lock)
-{
- pthread_mutex_init(&lock->mutex, NULL);
-}
-
-#define LOCK_IMPL_INITIALIZER {.mutex = PTHREAD_MUTEX_INITIALIZER}
-
-#else /* !defined(PTHREAD_LOCK) */
-/* Spinlock that assumes that it always gets the lock immediately. */
-
-struct lock_impl {
- bool locked;
-};
-
-static inline bool lock_impl_trylock(struct lock_impl *lock)
-{
-#ifdef RUN
- /* TODO: Should this be a test and set? */
- return __sync_bool_compare_and_swap(&lock->locked, false, true);
-#else
- __CPROVER_atomic_begin();
- bool old_locked = lock->locked;
- lock->locked = true;
- __CPROVER_atomic_end();
-
- /* Minimal barrier to prevent accesses leaking out of lock. */
- __CPROVER_fence("RRfence", "RWfence");
-
- return !old_locked;
-#endif
-}
-
-static inline void lock_impl_lock(struct lock_impl *lock)
-{
- /*
- * CBMC doesn't support busy waiting, so just assume that the
- * lock is available.
- */
- assume(lock_impl_trylock(lock));
-
- /*
- * If the lock was already held by this thread then the assumption
- * is unsatisfiable (deadlock).
- */
-}
-
-static inline void lock_impl_unlock(struct lock_impl *lock)
-{
-#ifdef RUN
- BUG_ON(!__sync_bool_compare_and_swap(&lock->locked, true, false));
-#else
- /* Minimal barrier to prevent accesses leaking out of lock. */
- __CPROVER_fence("RWfence", "WWfence");
-
- __CPROVER_atomic_begin();
- bool old_locked = lock->locked;
- lock->locked = false;
- __CPROVER_atomic_end();
-
- BUG_ON(!old_locked);
-#endif
-}
-
-static inline void lock_impl_init(struct lock_impl *lock)
-{
- lock->locked = false;
-}
-
-#define LOCK_IMPL_INITIALIZER {.locked = false}
-
-#endif /* !defined(PTHREAD_LOCK) */
-
-/*
- * Implement spinlocks using the lock mechanism. Wrap the lock to prevent mixing
- * locks of different types.
- */
-typedef struct {
- struct lock_impl internal_lock;
-} spinlock_t;
-
-#define SPIN_LOCK_UNLOCKED {.internal_lock = LOCK_IMPL_INITIALIZER}
-#define __SPIN_LOCK_UNLOCKED(x) SPIN_LOCK_UNLOCKED
-#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
-
-static inline void spin_lock_init(spinlock_t *lock)
-{
- lock_impl_init(&lock->internal_lock);
-}
-
-static inline void spin_lock(spinlock_t *lock)
-{
- /*
- * Spin locks also need to be removed in order to eliminate all
- * memory barriers. They are only used by the write side anyway.
- */
-#ifndef NO_SYNC_SMP_MB
- preempt_disable();
- lock_impl_lock(&lock->internal_lock);
-#endif
-}
-
-static inline void spin_unlock(spinlock_t *lock)
-{
-#ifndef NO_SYNC_SMP_MB
- lock_impl_unlock(&lock->internal_lock);
- preempt_enable();
-#endif
-}
-
-/* Don't bother with interrupts */
-#define spin_lock_irq(lock) spin_lock(lock)
-#define spin_unlock_irq(lock) spin_unlock(lock)
-#define spin_lock_irqsave(lock, flags) spin_lock(lock)
-#define spin_unlock_irqrestore(lock, flags) spin_unlock(lock)
-
-/*
- * This is supposed to return an int, but I think that a bool should work as
- * well.
- */
-static inline bool spin_trylock(spinlock_t *lock)
-{
-#ifndef NO_SYNC_SMP_MB
- preempt_disable();
- return lock_impl_trylock(&lock->internal_lock);
-#else
- return true;
-#endif
-}
-
-struct completion {
- /* Hopefully this won't overflow. */
- unsigned int count;
-};
-
-#define COMPLETION_INITIALIZER(x) {.count = 0}
-#define DECLARE_COMPLETION(x) struct completion x = COMPLETION_INITIALIZER(x)
-#define DECLARE_COMPLETION_ONSTACK(x) DECLARE_COMPLETION(x)
-
-static inline void init_completion(struct completion *c)
-{
- c->count = 0;
-}
-
-static inline void wait_for_completion(struct completion *c)
-{
- unsigned int prev_count = __sync_fetch_and_sub(&c->count, 1);
-
- assume(prev_count);
-}
-
-static inline void complete(struct completion *c)
-{
- unsigned int prev_count = __sync_fetch_and_add(&c->count, 1);
-
- BUG_ON(prev_count == UINT_MAX);
-}
-
-/* This function probably isn't very useful for CBMC. */
-static inline bool try_wait_for_completion(struct completion *c)
-{
- BUG();
-}
-
-static inline bool completion_done(struct completion *c)
-{
- return c->count;
-}
-
-/* TODO: Implement complete_all */
-static inline void complete_all(struct completion *c)
-{
- BUG();
-}
-
-#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.c
deleted file mode 100644
index 9440cc39e3c6..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.c
+++ /dev/null
@@ -1,12 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <config.h>
-
-#include "misc.h"
-#include "bug_on.h"
-
-struct rcu_head;
-
-void wakeme_after_rcu(struct rcu_head *head)
-{
- BUG();
-}
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h
deleted file mode 100644
index aca50030f954..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/misc.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef MISC_H
-#define MISC_H
-
-#include "assume.h"
-#include "int_typedefs.h"
-#include "locks.h"
-
-#include <linux/types.h>
-
-/* Probably won't need to deal with bottom halves. */
-static inline void local_bh_disable(void) {}
-static inline void local_bh_enable(void) {}
-
-#define MODULE_ALIAS(X)
-#define module_param(...)
-#define EXPORT_SYMBOL_GPL(x)
-
-#define container_of(ptr, type, member) ({ \
- const typeof(((type *)0)->member) *__mptr = (ptr); \
- (type *)((char *)__mptr - offsetof(type, member)); \
-})
-
-#ifndef USE_SIMPLE_SYNC_SRCU
-/* Abuse udelay to make sure that busy loops terminate. */
-#define udelay(x) assume(0)
-
-#else
-
-/* The simple custom synchronize_srcu is ok with try_check_zero failing. */
-#define udelay(x) do { } while (0)
-#endif
-
-#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
- do { } while (0)
-
-#define notrace
-
-/* Avoid including rcupdate.h */
-struct rcu_synchronize {
- struct rcu_head head;
- struct completion completion;
-};
-
-void wakeme_after_rcu(struct rcu_head *head);
-
-#define rcu_lock_acquire(a) do { } while (0)
-#define rcu_lock_release(a) do { } while (0)
-#define rcu_lockdep_assert(c, s) do { } while (0)
-#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
-
-/* Let CBMC non-deterministically choose switch between normal and expedited. */
-bool rcu_gp_is_normal(void);
-bool rcu_gp_is_expedited(void);
-
-/* Do the same for old versions of rcu. */
-#define rcu_expedited (rcu_gp_is_expedited())
-
-#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h
deleted file mode 100644
index 27e67a3f291f..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/percpu.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PERCPU_H
-#define PERCPU_H
-
-#include <stddef.h>
-#include "bug_on.h"
-#include "preempt.h"
-
-#define __percpu
-
-/* Maximum size of any percpu data. */
-#define PERCPU_OFFSET (4 * sizeof(long))
-
-/* Ignore alignment, as CBMC doesn't care about false sharing. */
-#define alloc_percpu(type) __alloc_percpu(sizeof(type), 1)
-
-static inline void *__alloc_percpu(size_t size, size_t align)
-{
- BUG();
- return NULL;
-}
-
-static inline void free_percpu(void *ptr)
-{
- BUG();
-}
-
-#define per_cpu_ptr(ptr, cpu) \
- ((typeof(ptr)) ((char *) (ptr) + PERCPU_OFFSET * cpu))
-
-#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1)
-#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1)
-#define __this_cpu_sub(pcp, n) __this_cpu_add(pcp, -(typeof(pcp)) (n))
-
-#define this_cpu_inc(pcp) this_cpu_add(pcp, 1)
-#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1)
-#define this_cpu_sub(pcp, n) this_cpu_add(pcp, -(typeof(pcp)) (n))
-
-/* Make CBMC use atomics to work around bug. */
-#ifdef RUN
-#define THIS_CPU_ADD_HELPER(ptr, x) (*(ptr) += (x))
-#else
-/*
- * Split the atomic into a read and a write so that it has the least
- * possible ordering.
- */
-#define THIS_CPU_ADD_HELPER(ptr, x) \
- do { \
- typeof(ptr) this_cpu_add_helper_ptr = (ptr); \
- typeof(ptr) this_cpu_add_helper_x = (x); \
- typeof(*ptr) this_cpu_add_helper_temp; \
- __CPROVER_atomic_begin(); \
- this_cpu_add_helper_temp = *(this_cpu_add_helper_ptr); \
- __CPROVER_atomic_end(); \
- this_cpu_add_helper_temp += this_cpu_add_helper_x; \
- __CPROVER_atomic_begin(); \
- *(this_cpu_add_helper_ptr) = this_cpu_add_helper_temp; \
- __CPROVER_atomic_end(); \
- } while (0)
-#endif
-
-/*
- * For some reason CBMC needs an atomic operation even though this is percpu
- * data.
- */
-#define __this_cpu_add(pcp, n) \
- do { \
- BUG_ON(preemptible()); \
- THIS_CPU_ADD_HELPER(per_cpu_ptr(&(pcp), thread_cpu_id), \
- (typeof(pcp)) (n)); \
- } while (0)
-
-#define this_cpu_add(pcp, n) \
- do { \
- int this_cpu_add_impl_cpu = get_cpu(); \
- THIS_CPU_ADD_HELPER(per_cpu_ptr(&(pcp), this_cpu_add_impl_cpu), \
- (typeof(pcp)) (n)); \
- put_cpu(); \
- } while (0)
-
-/*
- * This will cause a compiler warning because of the cast from char[][] to
- * type*. This will cause a compile time error if type is too big.
- */
-#define DEFINE_PER_CPU(type, name) \
- char name[NR_CPUS][PERCPU_OFFSET]; \
- typedef char percpu_too_big_##name \
- [sizeof(type) > PERCPU_OFFSET ? -1 : 1]
-
-#define for_each_possible_cpu(cpu) \
- for ((cpu) = 0; (cpu) < NR_CPUS; ++(cpu))
-
-#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c
deleted file mode 100644
index b4083ae348fb..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.c
+++ /dev/null
@@ -1,79 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <config.h>
-
-#include "preempt.h"
-
-#include "assume.h"
-#include "locks.h"
-
-/* Support NR_CPUS of at most 64 */
-#define CPU_PREEMPTION_LOCKS_INIT0 LOCK_IMPL_INITIALIZER
-#define CPU_PREEMPTION_LOCKS_INIT1 \
- CPU_PREEMPTION_LOCKS_INIT0, CPU_PREEMPTION_LOCKS_INIT0
-#define CPU_PREEMPTION_LOCKS_INIT2 \
- CPU_PREEMPTION_LOCKS_INIT1, CPU_PREEMPTION_LOCKS_INIT1
-#define CPU_PREEMPTION_LOCKS_INIT3 \
- CPU_PREEMPTION_LOCKS_INIT2, CPU_PREEMPTION_LOCKS_INIT2
-#define CPU_PREEMPTION_LOCKS_INIT4 \
- CPU_PREEMPTION_LOCKS_INIT3, CPU_PREEMPTION_LOCKS_INIT3
-#define CPU_PREEMPTION_LOCKS_INIT5 \
- CPU_PREEMPTION_LOCKS_INIT4, CPU_PREEMPTION_LOCKS_INIT4
-
-/*
- * Simulate disabling preemption by locking a particular cpu. NR_CPUS
- * should be the actual number of cpus, not just the maximum.
- */
-struct lock_impl cpu_preemption_locks[NR_CPUS] = {
- CPU_PREEMPTION_LOCKS_INIT0
-#if (NR_CPUS - 1) & 1
- , CPU_PREEMPTION_LOCKS_INIT0
-#endif
-#if (NR_CPUS - 1) & 2
- , CPU_PREEMPTION_LOCKS_INIT1
-#endif
-#if (NR_CPUS - 1) & 4
- , CPU_PREEMPTION_LOCKS_INIT2
-#endif
-#if (NR_CPUS - 1) & 8
- , CPU_PREEMPTION_LOCKS_INIT3
-#endif
-#if (NR_CPUS - 1) & 16
- , CPU_PREEMPTION_LOCKS_INIT4
-#endif
-#if (NR_CPUS - 1) & 32
- , CPU_PREEMPTION_LOCKS_INIT5
-#endif
-};
-
-#undef CPU_PREEMPTION_LOCKS_INIT0
-#undef CPU_PREEMPTION_LOCKS_INIT1
-#undef CPU_PREEMPTION_LOCKS_INIT2
-#undef CPU_PREEMPTION_LOCKS_INIT3
-#undef CPU_PREEMPTION_LOCKS_INIT4
-#undef CPU_PREEMPTION_LOCKS_INIT5
-
-__thread int thread_cpu_id;
-__thread int preempt_disable_count;
-
-void preempt_disable(void)
-{
- BUG_ON(preempt_disable_count < 0 || preempt_disable_count == INT_MAX);
-
- if (preempt_disable_count++)
- return;
-
- thread_cpu_id = nondet_int();
- assume(thread_cpu_id >= 0);
- assume(thread_cpu_id < NR_CPUS);
- lock_impl_lock(&cpu_preemption_locks[thread_cpu_id]);
-}
-
-void preempt_enable(void)
-{
- BUG_ON(preempt_disable_count < 1);
-
- if (--preempt_disable_count)
- return;
-
- lock_impl_unlock(&cpu_preemption_locks[thread_cpu_id]);
-}
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.h
deleted file mode 100644
index f8b762cd214c..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/preempt.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PREEMPT_H
-#define PREEMPT_H
-
-#include <stdbool.h>
-
-#include "bug_on.h"
-
-/* This flag contains garbage if preempt_disable_count is 0. */
-extern __thread int thread_cpu_id;
-
-/* Support recursive preemption disabling. */
-extern __thread int preempt_disable_count;
-
-void preempt_disable(void);
-void preempt_enable(void);
-
-static inline void preempt_disable_notrace(void)
-{
- preempt_disable();
-}
-
-static inline void preempt_enable_no_resched(void)
-{
- preempt_enable();
-}
-
-static inline void preempt_enable_notrace(void)
-{
- preempt_enable();
-}
-
-static inline int preempt_count(void)
-{
- return preempt_disable_count;
-}
-
-static inline bool preemptible(void)
-{
- return !preempt_count();
-}
-
-static inline int get_cpu(void)
-{
- preempt_disable();
- return thread_cpu_id;
-}
-
-static inline void put_cpu(void)
-{
- preempt_enable();
-}
-
-static inline void might_sleep(void)
-{
- BUG_ON(preempt_disable_count);
-}
-
-#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/simple_sync_srcu.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/simple_sync_srcu.c
deleted file mode 100644
index 97f592048e0b..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/simple_sync_srcu.c
+++ /dev/null
@@ -1,51 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <config.h>
-
-#include <assert.h>
-#include <errno.h>
-#include <inttypes.h>
-#include <pthread.h>
-#include <stddef.h>
-#include <string.h>
-#include <sys/types.h>
-
-#include "int_typedefs.h"
-
-#include "barriers.h"
-#include "bug_on.h"
-#include "locks.h"
-#include "misc.h"
-#include "preempt.h"
-#include "percpu.h"
-#include "workqueues.h"
-
-#include <linux/srcu.h>
-
-/* Functions needed from modify_srcu.c */
-bool try_check_zero(struct srcu_struct *sp, int idx, int trycount);
-void srcu_flip(struct srcu_struct *sp);
-
-/* Simpler implementation of synchronize_srcu that ignores batching. */
-void synchronize_srcu(struct srcu_struct *sp)
-{
- int idx;
- /*
- * This code assumes that try_check_zero will succeed anyway,
- * so there is no point in multiple tries.
- */
- const int trycount = 1;
-
- might_sleep();
-
- /* Ignore the lock, as multiple writers aren't working yet anyway. */
-
- idx = 1 ^ (sp->completed & 1);
-
- /* For comments see srcu_advance_batches. */
-
- assume(try_check_zero(sp, idx, trycount));
-
- srcu_flip(sp);
-
- assume(try_check_zero(sp, idx^1, trycount));
-}
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h
deleted file mode 100644
index 28b960300971..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef WORKQUEUES_H
-#define WORKQUEUES_H
-
-#include <stdbool.h>
-
-#include "barriers.h"
-#include "bug_on.h"
-#include "int_typedefs.h"
-
-#include <linux/types.h>
-
-/* Stub workqueue implementation. */
-
-struct work_struct;
-typedef void (*work_func_t)(struct work_struct *work);
-void delayed_work_timer_fn(unsigned long __data);
-
-struct work_struct {
-/* atomic_long_t data; */
- unsigned long data;
-
- struct list_head entry;
- work_func_t func;
-#ifdef CONFIG_LOCKDEP
- struct lockdep_map lockdep_map;
-#endif
-};
-
-struct timer_list {
- struct hlist_node entry;
- unsigned long expires;
- void (*function)(unsigned long);
- unsigned long data;
- u32 flags;
- int slack;
-};
-
-struct delayed_work {
- struct work_struct work;
- struct timer_list timer;
-
- /* target workqueue and CPU ->timer uses to queue ->work */
- struct workqueue_struct *wq;
- int cpu;
-};
-
-
-static inline bool schedule_work(struct work_struct *work)
-{
- BUG();
- return true;
-}
-
-static inline bool schedule_work_on(int cpu, struct work_struct *work)
-{
- BUG();
- return true;
-}
-
-static inline bool queue_work(struct workqueue_struct *wq,
- struct work_struct *work)
-{
- BUG();
- return true;
-}
-
-static inline bool queue_delayed_work(struct workqueue_struct *wq,
- struct delayed_work *dwork,
- unsigned long delay)
-{
- BUG();
- return true;
-}
-
-#define INIT_WORK(w, f) \
- do { \
- (w)->data = 0; \
- (w)->func = (f); \
- } while (0)
-
-#define INIT_DELAYED_WORK(w, f) INIT_WORK(&(w)->work, (f))
-
-#define __WORK_INITIALIZER(n, f) { \
- .data = 0, \
- .entry = { &(n).entry, &(n).entry }, \
- .func = f \
- }
-
-/* Don't bother initializing timer. */
-#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
- .work = __WORK_INITIALIZER((n).work, (f)), \
- }
-
-#define DECLARE_WORK(n, f) \
- struct workqueue_struct n = __WORK_INITIALIZER
-
-#define DECLARE_DELAYED_WORK(n, f) \
- struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
-
-#define system_power_efficient_wq ((struct workqueue_struct *) NULL)
-
-#endif
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore
deleted file mode 100644
index d65462d64816..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-*.out
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/Makefile b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/Makefile
deleted file mode 100644
index ad21b925fbb4..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-CBMC_FLAGS = -I../.. -I../../src -I../../include -I../../empty_includes -32 -pointer-check -mm pso
-
-all:
- for i in ./*.pass; do \
- echo $$i ; \
- CBMC_FLAGS="$(CBMC_FLAGS)" sh ../test_script.sh --should-pass $$i > $$i.out 2>&1 ; \
- done
- for i in ./*.fail; do \
- echo $$i ; \
- CBMC_FLAGS="$(CBMC_FLAGS)" sh ../test_script.sh --should-fail $$i > $$i.out 2>&1 ; \
- done
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/assert_end.fail b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/assert_end.fail
deleted file mode 100644
index 40c8075919d1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/assert_end.fail
+++ /dev/null
@@ -1 +0,0 @@
-test_cbmc_options="-DASSERT_END"
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force.fail b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force.fail
deleted file mode 100644
index ada5baf0b60d..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force.fail
+++ /dev/null
@@ -1 +0,0 @@
-test_cbmc_options="-DFORCE_FAILURE"
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force2.fail b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force2.fail
deleted file mode 100644
index 8fe00c8db466..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force2.fail
+++ /dev/null
@@ -1 +0,0 @@
-test_cbmc_options="-DFORCE_FAILURE_2"
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force3.fail b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force3.fail
deleted file mode 100644
index 612ed6772844..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/force3.fail
+++ /dev/null
@@ -1 +0,0 @@
-test_cbmc_options="-DFORCE_FAILURE_3"
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/main.pass b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/main.pass
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/main.pass
+++ /dev/null
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/test.c b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/test.c
deleted file mode 100644
index 2ce2016f7871..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/store_buffering/test.c
+++ /dev/null
@@ -1,73 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <src/combined_source.c>
-
-int x;
-int y;
-
-int __unbuffered_tpr_x;
-int __unbuffered_tpr_y;
-
-DEFINE_SRCU(ss);
-
-void rcu_reader(void)
-{
- int idx;
-
-#ifndef FORCE_FAILURE_3
- idx = srcu_read_lock(&ss);
-#endif
- might_sleep();
-
- __unbuffered_tpr_y = READ_ONCE(y);
-#ifdef FORCE_FAILURE
- srcu_read_unlock(&ss, idx);
- idx = srcu_read_lock(&ss);
-#endif
- WRITE_ONCE(x, 1);
-
-#ifndef FORCE_FAILURE_3
- srcu_read_unlock(&ss, idx);
-#endif
- might_sleep();
-}
-
-void *thread_update(void *arg)
-{
- WRITE_ONCE(y, 1);
-#ifndef FORCE_FAILURE_2
- synchronize_srcu(&ss);
-#endif
- might_sleep();
- __unbuffered_tpr_x = READ_ONCE(x);
-
- return NULL;
-}
-
-void *thread_process_reader(void *arg)
-{
- rcu_reader();
-
- return NULL;
-}
-
-int main(int argc, char *argv[])
-{
- pthread_t tu;
- pthread_t tpr;
-
- if (pthread_create(&tu, NULL, thread_update, NULL))
- abort();
- if (pthread_create(&tpr, NULL, thread_process_reader, NULL))
- abort();
- if (pthread_join(tu, NULL))
- abort();
- if (pthread_join(tpr, NULL))
- abort();
- assert(__unbuffered_tpr_y != 0 || __unbuffered_tpr_x != 0);
-
-#ifdef ASSERT_END
- assert(0);
-#endif
-
- return 0;
-}
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/test_script.sh b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/test_script.sh
deleted file mode 100755
index 2fe1f0339b4f..000000000000
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/tests/test_script.sh
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-# This script expects a mode (either --should-pass or --should-fail) followed by
-# an input file. The script uses the following environment variables. The test C
-# source file is expected to be named test.c in the directory containing the
-# input file.
-#
-# CBMC: The command to run CBMC. Default: cbmc
-# CBMC_FLAGS: Additional flags to pass to CBMC
-# NR_CPUS: Number of cpus to run tests with. Default specified by the test
-# SYNC_SRCU_MODE: Choose implementation of synchronize_srcu. Defaults to simple.
-# kernel: Version included in the linux kernel source.
-# simple: Use try_check_zero directly.
-#
-# The input file is a script that is sourced by this file. It can define any of
-# the following variables to configure the test.
-#
-# test_cbmc_options: Extra options to pass to CBMC.
-# min_cpus_fail: Minimum number of CPUs (NR_CPUS) for verification to fail.
-# The test is expected to pass if it is run with fewer. (Only
-# useful for .fail files)
-# default_cpus: Quantity of CPUs to use for the test, if not specified on the
-# command line. Default: Larger of 2 and MIN_CPUS_FAIL.
-
-set -e
-
-if test "$#" -ne 2; then
- echo "Expected one option followed by an input file" 1>&2
- exit 99
-fi
-
-if test "x$1" = "x--should-pass"; then
- should_pass="yes"
-elif test "x$1" = "x--should-fail"; then
- should_pass="no"
-else
- echo "Unrecognized argument '$1'" 1>&2
-
- # Exit code 99 indicates a hard error.
- exit 99
-fi
-
-CBMC=${CBMC:-cbmc}
-
-SYNC_SRCU_MODE=${SYNC_SRCU_MODE:-simple}
-
-case ${SYNC_SRCU_MODE} in
-kernel) sync_srcu_mode_flags="" ;;
-simple) sync_srcu_mode_flags="-DUSE_SIMPLE_SYNC_SRCU" ;;
-
-*)
- echo "Unrecognized argument '${SYNC_SRCU_MODE}'" 1>&2
- exit 99
- ;;
-esac
-
-min_cpus_fail=1
-
-c_file=`dirname "$2"`/test.c
-
-# Source the input file.
-. $2
-
-if test ${min_cpus_fail} -gt 2; then
- default_default_cpus=${min_cpus_fail}
-else
- default_default_cpus=2
-fi
-default_cpus=${default_cpus:-${default_default_cpus}}
-cpus=${NR_CPUS:-${default_cpus}}
-
-# Check if there are two few cpus to make the test fail.
-if test $cpus -lt ${min_cpus_fail:-0}; then
- should_pass="yes"
-fi
-
-cbmc_opts="-DNR_CPUS=${cpus} ${sync_srcu_mode_flags} ${test_cbmc_options} ${CBMC_FLAGS}"
-
-echo "Running CBMC: ${CBMC} ${cbmc_opts} ${c_file}"
-if ${CBMC} ${cbmc_opts} "${c_file}"; then
- # Verification successful. Make sure that it was supposed to verify.
- test "x${should_pass}" = xyes
-else
- cbmc_exit_status=$?
-
- # An exit status of 10 indicates a failed verification.
- # (see cbmc_parse_optionst::do_bmc in the CBMC source code)
- if test ${cbmc_exit_status} -eq 10 && test "x${should_pass}" = xno; then
- :
- else
- echo "CBMC returned ${cbmc_exit_status} exit status" 1>&2
-
- # Parse errors have exit status 6. Any other type of error
- # should be considered a hard error.
- if test ${cbmc_exit_status} -ne 6 && \
- test ${cbmc_exit_status} -ne 10; then
- exit 99
- else
- exit 1
- fi
- fi
-fi
diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
index 73d53257df42..5073dbc96125 100644
--- a/tools/testing/selftests/resctrl/Makefile
+++ b/tools/testing/selftests/resctrl/Makefile
@@ -7,4 +7,4 @@ TEST_GEN_PROGS := resctrl_tests
include ../lib.mk
-$(OUTPUT)/resctrl_tests: $(wildcard *.c)
+$(OUTPUT)/resctrl_tests: $(wildcard *.[ch])
diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
index 8a4fe8693be6..d3cbb829ff6a 100644
--- a/tools/testing/selftests/resctrl/cache.c
+++ b/tools/testing/selftests/resctrl/cache.c
@@ -87,21 +87,19 @@ static int reset_enable_llc_perf(pid_t pid, int cpu_no)
static int get_llc_perf(unsigned long *llc_perf_miss)
{
__u64 total_misses;
+ int ret;
/* Stop counters after one span to get miss rate */
ioctl(fd_lm, PERF_EVENT_IOC_DISABLE, 0);
- if (read(fd_lm, &rf_cqm, sizeof(struct read_format)) == -1) {
+ ret = read(fd_lm, &rf_cqm, sizeof(struct read_format));
+ if (ret == -1) {
perror("Could not get llc misses through perf");
-
return -1;
}
total_misses = rf_cqm.values[0].value;
-
- close(fd_lm);
-
*llc_perf_miss = total_misses;
return 0;
@@ -212,7 +210,7 @@ int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
*/
int cat_val(struct resctrl_val_param *param)
{
- int malloc_and_init_memory = 1, memflush = 1, operation = 0, ret = 0;
+ int memflush = 1, operation = 0, ret = 0;
char *resctrl_val = param->resctrl_val;
pid_t bm_pid;
@@ -232,40 +230,38 @@ int cat_val(struct resctrl_val_param *param)
if (ret)
return ret;
- if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
- initialize_llc_perf();
+ initialize_llc_perf();
/* Test runs until the callback setup() tells the test to stop. */
while (1) {
- if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
- ret = param->setup(1, param);
- if (ret == END_OF_TESTS) {
- ret = 0;
- break;
- }
- if (ret < 0)
- break;
- ret = reset_enable_llc_perf(bm_pid, param->cpu_no);
- if (ret)
- break;
-
- if (run_fill_buf(param->span, malloc_and_init_memory,
- memflush, operation, resctrl_val)) {
- fprintf(stderr, "Error-running fill buffer\n");
- ret = -1;
- break;
- }
-
- sleep(1);
- ret = measure_cache_vals(param, bm_pid);
- if (ret)
- break;
- } else {
+ ret = param->setup(param);
+ if (ret == END_OF_TESTS) {
+ ret = 0;
+ break;
+ }
+ if (ret < 0)
+ break;
+ ret = reset_enable_llc_perf(bm_pid, param->cpu_no);
+ if (ret)
break;
+
+ if (run_fill_buf(param->span, memflush, operation, true)) {
+ fprintf(stderr, "Error-running fill buffer\n");
+ ret = -1;
+ goto pe_close;
}
+
+ sleep(1);
+ ret = measure_cache_vals(param, bm_pid);
+ if (ret)
+ goto pe_close;
}
return ret;
+
+pe_close:
+ close(fd_lm);
+ return ret;
}
/*
@@ -282,7 +278,7 @@ int cat_val(struct resctrl_val_param *param)
* Return: 0 on success. non-zero on failure.
*/
int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
- unsigned long cache_span, unsigned long max_diff,
+ size_t cache_span, unsigned long max_diff,
unsigned long max_diff_percent, unsigned long num_of_runs,
bool platform, bool cmt)
{
@@ -291,7 +287,7 @@ int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
long avg_diff = 0;
int ret;
- avg_llc_val = sum_llc_val / (num_of_runs - 1);
+ avg_llc_val = sum_llc_val / num_of_runs;
avg_diff = (long)abs(cache_span - avg_llc_val);
diff_percent = ((float)cache_span - avg_llc_val) / cache_span * 100;
@@ -304,7 +300,7 @@ int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
ksft_print_msg("Percent diff=%d\n", abs((int)diff_percent));
ksft_print_msg("Number of bits: %d\n", no_of_bits);
ksft_print_msg("Average LLC val: %lu\n", avg_llc_val);
- ksft_print_msg("Cache span (%s): %lu\n", cmt ? "bytes" : "lines",
+ ksft_print_msg("Cache span (%s): %zu\n", cmt ? "bytes" : "lines",
cache_span);
return ret;
diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
index fb1443f888c4..3848dfb46aba 100644
--- a/tools/testing/selftests/resctrl/cat_test.c
+++ b/tools/testing/selftests/resctrl/cat_test.c
@@ -17,27 +17,16 @@
#define MAX_DIFF_PERCENT 4
#define MAX_DIFF 1000000
-static int count_of_bits;
-static char cbm_mask[256];
-static unsigned long long_mask;
-static unsigned long cache_size;
-
/*
* Change schemata. Write schemata to specified
* con_mon grp, mon_grp in resctrl FS.
* Run 5 times in order to get average values.
*/
-static int cat_setup(int num, ...)
+static int cat_setup(struct resctrl_val_param *p)
{
- struct resctrl_val_param *p;
char schemata[64];
- va_list param;
int ret = 0;
- va_start(param, num);
- p = va_arg(param, struct resctrl_val_param *);
- va_end(param);
-
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
return END_OF_TESTS;
@@ -88,7 +77,7 @@ static int check_results(struct resctrl_val_param *param)
no_of_bits = count_bits(param->mask);
return show_cache_info(sum_llc_perf_miss, no_of_bits, param->span / 64,
- MAX_DIFF, MAX_DIFF_PERCENT, NUM_OF_RUNS,
+ MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
get_vendor() == ARCH_INTEL, false);
}
@@ -102,14 +91,12 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
{
unsigned long l_mask, l_mask_1;
int ret, pipefd[2], sibling_cpu_no;
+ unsigned long cache_size = 0;
+ unsigned long long_mask;
+ char cbm_mask[256];
+ int count_of_bits;
char pipe_message;
- cache_size = 0;
-
- ret = remount_resctrlfs(true);
- if (ret)
- return ret;
-
/* Get default cbm mask for L3/L2 cache */
ret = get_cbm_mask(cache_type, cbm_mask);
if (ret)
@@ -144,7 +131,6 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
struct resctrl_val_param param = {
.resctrl_val = CAT_STR,
.cpu_no = cpu_no,
- .mum_resctrlfs = false,
.setup = cat_setup,
};
@@ -227,8 +213,6 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
out:
cat_test_cleanup();
- if (bm_pid)
- umount_resctrlfs();
return ret;
}
diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
index af71b2141271..cb2197647c6c 100644
--- a/tools/testing/selftests/resctrl/cmt_test.c
+++ b/tools/testing/selftests/resctrl/cmt_test.c
@@ -16,20 +16,8 @@
#define MAX_DIFF 2000000
#define MAX_DIFF_PERCENT 15
-static int count_of_bits;
-static char cbm_mask[256];
-static unsigned long long_mask;
-static unsigned long cache_size;
-
-static int cmt_setup(int num, ...)
+static int cmt_setup(struct resctrl_val_param *p)
{
- struct resctrl_val_param *p;
- va_list param;
-
- va_start(param, num);
- p = va_arg(param, struct resctrl_val_param *);
- va_end(param);
-
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
return END_OF_TESTS;
@@ -71,7 +59,7 @@ static int check_results(struct resctrl_val_param *param, int no_of_bits)
fclose(fp);
return show_cache_info(sum_llc_occu_resc, no_of_bits, param->span,
- MAX_DIFF, MAX_DIFF_PERCENT, NUM_OF_RUNS,
+ MAX_DIFF, MAX_DIFF_PERCENT, runs - 1,
true, true);
}
@@ -82,14 +70,12 @@ void cmt_test_cleanup(void)
int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
{
+ unsigned long cache_size = 0;
+ unsigned long long_mask;
+ char cbm_mask[256];
+ int count_of_bits;
int ret;
- cache_size = 0;
-
- ret = remount_resctrlfs(true);
- if (ret)
- return ret;
-
if (!validate_resctrl_feature_request(CMT_STR))
return -1;
@@ -117,7 +103,6 @@ int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
- .mum_resctrlfs = false,
.filename = RESULT_FILE_NAME,
.mask = ~(long_mask << n) & long_mask,
.span = cache_size * n / count_of_bits,
@@ -126,7 +111,7 @@ int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
};
if (strcmp(benchmark_cmd[0], "fill_buf") == 0)
- sprintf(benchmark_cmd[1], "%lu", param.span);
+ sprintf(benchmark_cmd[1], "%zu", param.span);
remove(RESULT_FILE_NAME);
diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
index 341cc93ca84c..0d425f26583a 100644
--- a/tools/testing/selftests/resctrl/fill_buf.c
+++ b/tools/testing/selftests/resctrl/fill_buf.c
@@ -22,8 +22,6 @@
#define PAGE_SIZE (4 * 1024)
#define MB (1024 * 1024)
-static unsigned char *startptr;
-
static void sb(void)
{
#if defined(__i386) || defined(__x86_64)
@@ -40,32 +38,32 @@ static void cl_flush(void *p)
#endif
}
-static void mem_flush(void *p, size_t s)
+static void mem_flush(unsigned char *buf, size_t buf_size)
{
- char *cp = (char *)p;
+ unsigned char *cp = buf;
size_t i = 0;
- s = s / CL_SIZE; /* mem size in cache llines */
+ buf_size = buf_size / CL_SIZE; /* mem size in cache lines */
- for (i = 0; i < s; i++)
+ for (i = 0; i < buf_size; i++)
cl_flush(&cp[i * CL_SIZE]);
sb();
}
-static void *malloc_and_init_memory(size_t s)
+static void *malloc_and_init_memory(size_t buf_size)
{
void *p = NULL;
uint64_t *p64;
size_t s64;
int ret;
- ret = posix_memalign(&p, PAGE_SIZE, s);
+ ret = posix_memalign(&p, PAGE_SIZE, buf_size);
if (ret < 0)
return NULL;
p64 = (uint64_t *)p;
- s64 = s / sizeof(uint64_t);
+ s64 = buf_size / sizeof(uint64_t);
while (s64 > 0) {
*p64 = (uint64_t)rand();
@@ -76,12 +74,13 @@ static void *malloc_and_init_memory(size_t s)
return p;
}
-static int fill_one_span_read(unsigned char *start_ptr, unsigned char *end_ptr)
+static int fill_one_span_read(unsigned char *buf, size_t buf_size)
{
+ unsigned char *end_ptr = buf + buf_size;
unsigned char sum, *p;
sum = 0;
- p = start_ptr;
+ p = buf;
while (p < end_ptr) {
sum += *p;
p += (CL_SIZE / 2);
@@ -90,27 +89,26 @@ static int fill_one_span_read(unsigned char *start_ptr, unsigned char *end_ptr)
return sum;
}
-static
-void fill_one_span_write(unsigned char *start_ptr, unsigned char *end_ptr)
+static void fill_one_span_write(unsigned char *buf, size_t buf_size)
{
+ unsigned char *end_ptr = buf + buf_size;
unsigned char *p;
- p = start_ptr;
+ p = buf;
while (p < end_ptr) {
*p = '1';
p += (CL_SIZE / 2);
}
}
-static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
- char *resctrl_val)
+static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once)
{
int ret = 0;
FILE *fp;
while (1) {
- ret = fill_one_span_read(start_ptr, end_ptr);
- if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
+ ret = fill_one_span_read(buf, buf_size);
+ if (once)
break;
}
@@ -126,75 +124,52 @@ static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
return 0;
}
-static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
- char *resctrl_val)
+static int fill_cache_write(unsigned char *buf, size_t buf_size, bool once)
{
while (1) {
- fill_one_span_write(start_ptr, end_ptr);
- if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
+ fill_one_span_write(buf, buf_size);
+ if (once)
break;
}
return 0;
}
-static int
-fill_cache(unsigned long long buf_size, int malloc_and_init, int memflush,
- int op, char *resctrl_val)
+static int fill_cache(size_t buf_size, int memflush, int op, bool once)
{
- unsigned char *start_ptr, *end_ptr;
- unsigned long long i;
+ unsigned char *buf;
int ret;
- if (malloc_and_init)
- start_ptr = malloc_and_init_memory(buf_size);
- else
- start_ptr = malloc(buf_size);
-
- if (!start_ptr)
+ buf = malloc_and_init_memory(buf_size);
+ if (!buf)
return -1;
- startptr = start_ptr;
- end_ptr = start_ptr + buf_size;
-
- /*
- * It's better to touch the memory once to avoid any compiler
- * optimizations
- */
- if (!malloc_and_init) {
- for (i = 0; i < buf_size; i++)
- *start_ptr++ = (unsigned char)rand();
- }
-
- start_ptr = startptr;
-
/* Flush the memory before using to avoid "cache hot pages" effect */
if (memflush)
- mem_flush(start_ptr, buf_size);
+ mem_flush(buf, buf_size);
if (op == 0)
- ret = fill_cache_read(start_ptr, end_ptr, resctrl_val);
+ ret = fill_cache_read(buf, buf_size, once);
else
- ret = fill_cache_write(start_ptr, end_ptr, resctrl_val);
+ ret = fill_cache_write(buf, buf_size, once);
+
+ free(buf);
if (ret) {
printf("\n Error in fill cache read/write...\n");
return -1;
}
- free(startptr);
return 0;
}
-int run_fill_buf(unsigned long span, int malloc_and_init_memory,
- int memflush, int op, char *resctrl_val)
+int run_fill_buf(size_t span, int memflush, int op, bool once)
{
- unsigned long long cache_size = span;
+ size_t cache_size = span;
int ret;
- ret = fill_cache(cache_size, malloc_and_init_memory, memflush, op,
- resctrl_val);
+ ret = fill_cache(cache_size, memflush, op, once);
if (ret) {
printf("\n Error in fill cache\n");
return -1;
diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
index cde3781a9ab0..4d2f145804b8 100644
--- a/tools/testing/selftests/resctrl/mba_test.c
+++ b/tools/testing/selftests/resctrl/mba_test.c
@@ -22,18 +22,12 @@
* con_mon grp, mon_grp in resctrl FS.
* For each allocation, run 5 times in order to get average values.
*/
-static int mba_setup(int num, ...)
+static int mba_setup(struct resctrl_val_param *p)
{
static int runs_per_allocation, allocation = 100;
- struct resctrl_val_param *p;
char allocation_str[64];
- va_list param;
int ret;
- va_start(param, num);
- p = va_arg(param, struct resctrl_val_param *);
- va_end(param);
-
if (runs_per_allocation >= NUM_OF_RUNS)
runs_per_allocation = 0;
@@ -154,7 +148,6 @@ int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
.ctrlgrp = "c1",
.mongrp = "m1",
.cpu_no = cpu_no,
- .mum_resctrlfs = true,
.filename = RESULT_FILE_NAME,
.bw_report = bw_report,
.setup = mba_setup
diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
index 538d35a6485a..c7de6f5977f6 100644
--- a/tools/testing/selftests/resctrl/mbm_test.c
+++ b/tools/testing/selftests/resctrl/mbm_test.c
@@ -15,7 +15,7 @@
#define NUM_OF_RUNS 5
static int
-show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, int span)
+show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, size_t span)
{
unsigned long avg_bw_imc = 0, avg_bw_resc = 0;
unsigned long sum_bw_imc = 0, sum_bw_resc = 0;
@@ -40,14 +40,14 @@ show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, int span)
ksft_print_msg("%s Check MBM diff within %d%%\n",
ret ? "Fail:" : "Pass:", MAX_DIFF_PERCENT);
ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per);
- ksft_print_msg("Span (MB): %d\n", span);
+ ksft_print_msg("Span (MB): %zu\n", span / MB);
ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc);
ksft_print_msg("avg_bw_resc: %lu\n", avg_bw_resc);
return ret;
}
-static int check_results(int span)
+static int check_results(size_t span)
{
unsigned long bw_imc[NUM_OF_RUNS], bw_resc[NUM_OF_RUNS];
char temp[1024], *token_array[8];
@@ -86,16 +86,10 @@ static int check_results(int span)
return ret;
}
-static int mbm_setup(int num, ...)
+static int mbm_setup(struct resctrl_val_param *p)
{
- struct resctrl_val_param *p;
- va_list param;
int ret = 0;
- va_start(param, num);
- p = va_arg(param, struct resctrl_val_param *);
- va_end(param);
-
/* Run NUM_OF_RUNS times */
if (p->num_of_runs >= NUM_OF_RUNS)
return END_OF_TESTS;
@@ -115,7 +109,7 @@ void mbm_test_cleanup(void)
remove(RESULT_FILE_NAME);
}
-int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
+int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd)
{
struct resctrl_val_param param = {
.resctrl_val = MBM_STR,
@@ -123,7 +117,6 @@ int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
.mongrp = "m1",
.span = span,
.cpu_no = cpu_no,
- .mum_resctrlfs = true,
.filename = RESULT_FILE_NAME,
.bw_report = bw_report,
.setup = mbm_setup
diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
index 87e39456dee0..838d1a438f33 100644
--- a/tools/testing/selftests/resctrl/resctrl.h
+++ b/tools/testing/selftests/resctrl/resctrl.h
@@ -3,7 +3,6 @@
#ifndef RESCTRL_H
#define RESCTRL_H
#include <stdio.h>
-#include <stdarg.h>
#include <math.h>
#include <errno.h>
#include <sched.h>
@@ -43,6 +42,7 @@
do { \
perror(err_msg); \
kill(ppid, SIGKILL); \
+ umount_resctrlfs(); \
exit(EXIT_FAILURE); \
} while (0)
@@ -53,7 +53,6 @@
* @mongrp: Name of the monitor group (mon grp)
* @cpu_no: CPU number to which the benchmark would be binded
* @span: Memory bytes accessed in each benchmark iteration
- * @mum_resctrlfs: Should the resctrl FS be remounted?
* @filename: Name of file to which the o/p should be written
* @bw_report: Bandwidth report type (reads vs writes)
* @setup: Call back function to setup test environment
@@ -63,13 +62,12 @@ struct resctrl_val_param {
char ctrlgrp[64];
char mongrp[64];
int cpu_no;
- unsigned long span;
- bool mum_resctrlfs;
+ size_t span;
char filename[64];
char *bw_report;
unsigned long mask;
int num_of_runs;
- int (*setup)(int num, ...);
+ int (*setup)(struct resctrl_val_param *param);
};
#define MBM_STR "mbm"
@@ -84,8 +82,8 @@ extern char llc_occup_path[1024];
int get_vendor(void);
bool check_resctrlfs_support(void);
int filter_dmesg(void);
-int remount_resctrlfs(bool mum_resctrlfs);
int get_resource_id(int cpu_no, int *resource_id);
+int mount_resctrlfs(void);
int umount_resctrlfs(void);
int validate_bw_report_request(char *bw_report);
bool validate_resctrl_feature_request(const char *resctrl_val);
@@ -98,10 +96,9 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
char *resctrl_val);
int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
int group_fd, unsigned long flags);
-int run_fill_buf(unsigned long span, int malloc_and_init_memory, int memflush,
- int op, char *resctrl_va);
+int run_fill_buf(size_t span, int memflush, int op, bool once);
int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param);
-int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd);
+int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd);
void tests_cleanup(void);
void mbm_test_cleanup(void);
int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
@@ -120,7 +117,7 @@ void cmt_test_cleanup(void);
int get_core_sibling(int cpu_no);
int measure_cache_vals(struct resctrl_val_param *param, int bm_pid);
int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
- unsigned long cache_span, unsigned long max_diff,
+ size_t cache_span, unsigned long max_diff,
unsigned long max_diff_percent, unsigned long num_of_runs,
bool platform, bool cmt);
diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
index 9b9751206e1c..d511daeb6851 100644
--- a/tools/testing/selftests/resctrl/resctrl_tests.c
+++ b/tools/testing/selftests/resctrl/resctrl_tests.c
@@ -70,60 +70,81 @@ void tests_cleanup(void)
cat_test_cleanup();
}
-static void run_mbm_test(bool has_ben, char **benchmark_cmd, int span,
+static void run_mbm_test(char **benchmark_cmd, size_t span,
int cpu_no, char *bw_report)
{
int res;
ksft_print_msg("Starting MBM BW change ...\n");
+ res = mount_resctrlfs();
+ if (res) {
+ ksft_exit_fail_msg("Failed to mount resctrl FS\n");
+ return;
+ }
+
if (!validate_resctrl_feature_request(MBM_STR) || (get_vendor() != ARCH_INTEL)) {
ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n");
- return;
+ goto umount;
}
- if (!has_ben)
- sprintf(benchmark_cmd[5], "%s", MBA_STR);
res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
ksft_test_result(!res, "MBM: bw change\n");
if ((get_vendor() == ARCH_INTEL) && res)
ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
+
+umount:
+ umount_resctrlfs();
}
-static void run_mba_test(bool has_ben, char **benchmark_cmd, int span,
- int cpu_no, char *bw_report)
+static void run_mba_test(char **benchmark_cmd, int cpu_no, char *bw_report)
{
int res;
ksft_print_msg("Starting MBA Schemata change ...\n");
+ res = mount_resctrlfs();
+ if (res) {
+ ksft_exit_fail_msg("Failed to mount resctrl FS\n");
+ return;
+ }
+
if (!validate_resctrl_feature_request(MBA_STR) || (get_vendor() != ARCH_INTEL)) {
ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n");
- return;
+ goto umount;
}
- if (!has_ben)
- sprintf(benchmark_cmd[1], "%d", span);
res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd);
ksft_test_result(!res, "MBA: schemata change\n");
+
+umount:
+ umount_resctrlfs();
}
-static void run_cmt_test(bool has_ben, char **benchmark_cmd, int cpu_no)
+static void run_cmt_test(char **benchmark_cmd, int cpu_no)
{
int res;
ksft_print_msg("Starting CMT test ...\n");
+
+ res = mount_resctrlfs();
+ if (res) {
+ ksft_exit_fail_msg("Failed to mount resctrl FS\n");
+ return;
+ }
+
if (!validate_resctrl_feature_request(CMT_STR)) {
ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n");
- return;
+ goto umount;
}
- if (!has_ben)
- sprintf(benchmark_cmd[5], "%s", CMT_STR);
res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd);
ksft_test_result(!res, "CMT: test\n");
if ((get_vendor() == ARCH_INTEL) && res)
ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
+
+umount:
+ umount_resctrlfs();
}
static void run_cat_test(int cpu_no, int no_of_bits)
@@ -132,22 +153,32 @@ static void run_cat_test(int cpu_no, int no_of_bits)
ksft_print_msg("Starting CAT test ...\n");
+ res = mount_resctrlfs();
+ if (res) {
+ ksft_exit_fail_msg("Failed to mount resctrl FS\n");
+ return;
+ }
+
if (!validate_resctrl_feature_request(CAT_STR)) {
ksft_test_result_skip("Hardware does not support CAT or CAT is disabled\n");
- return;
+ goto umount;
}
res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
ksft_test_result(!res, "CAT: test\n");
+
+umount:
+ umount_resctrlfs();
}
int main(int argc, char **argv)
{
bool has_ben = false, mbm_test = true, mba_test = true, cmt_test = true;
- int c, cpu_no = 1, span = 250, argc_new = argc, i, no_of_bits = 0;
char *benchmark_cmd[BENCHMARK_ARGS], bw_report[64], bm_type[64];
char benchmark_cmd_area[BENCHMARK_ARGS][BENCHMARK_ARG_SIZE];
+ int c, cpu_no = 1, argc_new = argc, i, no_of_bits = 0;
int ben_ind, ben_count, tests = 0;
+ size_t span = 250 * MB;
bool cat_test = true;
for (i = 0; i < argc; i++) {
@@ -232,16 +263,15 @@ int main(int argc, char **argv)
benchmark_cmd[ben_count] = NULL;
} else {
/* If no benchmark is given by "-b" argument, use fill_buf. */
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 5; i++)
benchmark_cmd[i] = benchmark_cmd_area[i];
strcpy(benchmark_cmd[0], "fill_buf");
- sprintf(benchmark_cmd[1], "%d", span);
+ sprintf(benchmark_cmd[1], "%zu", span);
strcpy(benchmark_cmd[2], "1");
- strcpy(benchmark_cmd[3], "1");
- strcpy(benchmark_cmd[4], "0");
- strcpy(benchmark_cmd[5], "");
- benchmark_cmd[6] = NULL;
+ strcpy(benchmark_cmd[3], "0");
+ strcpy(benchmark_cmd[4], "false");
+ benchmark_cmd[5] = NULL;
}
sprintf(bw_report, "reads");
@@ -250,23 +280,24 @@ int main(int argc, char **argv)
if (!check_resctrlfs_support())
return ksft_exit_skip("resctrl FS does not exist. Enable X86_CPU_RESCTRL config option.\n");
+ if (umount_resctrlfs())
+ return ksft_exit_skip("resctrl FS unmount failed.\n");
+
filter_dmesg();
ksft_set_plan(tests ? : 4);
if (mbm_test)
- run_mbm_test(has_ben, benchmark_cmd, span, cpu_no, bw_report);
+ run_mbm_test(benchmark_cmd, span, cpu_no, bw_report);
if (mba_test)
- run_mba_test(has_ben, benchmark_cmd, span, cpu_no, bw_report);
+ run_mba_test(benchmark_cmd, cpu_no, bw_report);
if (cmt_test)
- run_cmt_test(has_ben, benchmark_cmd, cpu_no);
+ run_cmt_test(benchmark_cmd, cpu_no);
if (cat_test)
run_cat_test(cpu_no, no_of_bits);
- umount_resctrlfs();
-
ksft_finished();
}
diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
index ab1eab1e7ff6..f0f6c5f6e98b 100644
--- a/tools/testing/selftests/resctrl/resctrl_val.c
+++ b/tools/testing/selftests/resctrl/resctrl_val.c
@@ -648,10 +648,6 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
return ret;
}
- ret = remount_resctrlfs(param->mum_resctrlfs);
- if (ret)
- return ret;
-
/*
* If benchmark wasn't successfully started by child, then child should
* kill parent, so save parent's pid
@@ -763,7 +759,7 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
/* Test runs until the callback setup() tells the test to stop. */
while (1) {
- ret = param->setup(1, param);
+ ret = param->setup(param);
if (ret == END_OF_TESTS) {
ret = 0;
break;
@@ -788,7 +784,6 @@ unregister:
signal_handler_unregister();
out:
kill(bm_pid, SIGKILL);
- umount_resctrlfs();
return ret;
}
diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
index fb00245dee92..bd36ee206602 100644
--- a/tools/testing/selftests/resctrl/resctrlfs.c
+++ b/tools/testing/selftests/resctrl/resctrlfs.c
@@ -48,29 +48,20 @@ static int find_resctrl_mount(char *buffer)
}
/*
- * remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl
- * @mum_resctrlfs: Should the resctrl FS be remounted?
+ * mount_resctrlfs - Mount resctrl FS at /sys/fs/resctrl
*
- * If not mounted, mount it.
- * If mounted and mum_resctrlfs then remount resctrl FS.
- * If mounted and !mum_resctrlfs then noop
+ * Mounts resctrl FS. Fails if resctrl FS is already mounted to avoid
+ * pre-existing settings interfering with the test results.
*
* Return: 0 on success, non-zero on failure
*/
-int remount_resctrlfs(bool mum_resctrlfs)
+int mount_resctrlfs(void)
{
- char mountpoint[256];
int ret;
- ret = find_resctrl_mount(mountpoint);
- if (ret)
- strcpy(mountpoint, RESCTRL_PATH);
-
- if (!ret && mum_resctrlfs && umount(mountpoint))
- ksft_print_msg("Fail: unmounting \"%s\"\n", mountpoint);
-
- if (!ret && !mum_resctrlfs)
- return 0;
+ ret = find_resctrl_mount(NULL);
+ if (ret != -ENOENT)
+ return -1;
ksft_print_msg("Mounting resctrl to \"%s\"\n", RESCTRL_PATH);
ret = mount("resctrl", RESCTRL_PATH, "resctrl", 0, NULL);
@@ -82,10 +73,16 @@ int remount_resctrlfs(bool mum_resctrlfs)
int umount_resctrlfs(void)
{
- if (find_resctrl_mount(NULL))
+ char mountpoint[256];
+ int ret;
+
+ ret = find_resctrl_mount(mountpoint);
+ if (ret == -ENOENT)
return 0;
+ if (ret)
+ return ret;
- if (umount(RESCTRL_PATH)) {
+ if (umount(mountpoint)) {
perror("# Unable to umount resctrl");
return errno;
@@ -305,10 +302,10 @@ int taskset_benchmark(pid_t bm_pid, int cpu_no)
*/
void run_benchmark(int signum, siginfo_t *info, void *ucontext)
{
- int operation, ret, malloc_and_init_memory, memflush;
- unsigned long span, buffer_span;
+ int operation, ret, memflush;
char **benchmark_cmd;
- char resctrl_val[64];
+ size_t span;
+ bool once;
FILE *fp;
benchmark_cmd = info->si_ptr;
@@ -324,18 +321,16 @@ void run_benchmark(int signum, siginfo_t *info, void *ucontext)
if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
/* Execute default fill_buf benchmark */
span = strtoul(benchmark_cmd[1], NULL, 10);
- malloc_and_init_memory = atoi(benchmark_cmd[2]);
- memflush = atoi(benchmark_cmd[3]);
- operation = atoi(benchmark_cmd[4]);
- sprintf(resctrl_val, "%s", benchmark_cmd[5]);
-
- if (strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
- buffer_span = span * MB;
+ memflush = atoi(benchmark_cmd[2]);
+ operation = atoi(benchmark_cmd[3]);
+ if (!strcmp(benchmark_cmd[4], "true"))
+ once = true;
+ else if (!strcmp(benchmark_cmd[4], "false"))
+ once = false;
else
- buffer_span = span;
+ PARENT_EXIT("Invalid once parameter");
- if (run_fill_buf(buffer_span, malloc_and_init_memory, memflush,
- operation, resctrl_val))
+ if (run_fill_buf(span, memflush, operation, once))
fprintf(stderr, "Error in running fill buffer\n");
} else {
/* Execute specified benchmark */
@@ -611,7 +606,8 @@ char *fgrep(FILE *inf, const char *str)
* validate_resctrl_feature_request - Check if requested feature is valid.
* @resctrl_val: Requested feature
*
- * Return: True if the feature is supported, else false
+ * Return: True if the feature is supported, else false. False is also
+ * returned if resctrl FS is not mounted.
*/
bool validate_resctrl_feature_request(const char *resctrl_val)
{
@@ -619,11 +615,13 @@ bool validate_resctrl_feature_request(const char *resctrl_val)
bool found = false;
char *res;
FILE *inf;
+ int ret;
if (!resctrl_val)
return false;
- if (remount_resctrlfs(false))
+ ret = find_resctrl_mount(NULL);
+ if (ret)
return false;
if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
index 7a957c7d459a..5a3432fceb58 100644
--- a/tools/testing/selftests/rseq/Makefile
+++ b/tools/testing/selftests/rseq/Makefile
@@ -33,7 +33,7 @@ $(OUTPUT)/%: %.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h
$(CC) $(CFLAGS) $< $(LDLIBS) -lrseq -o $@
$(OUTPUT)/basic_percpu_ops_mm_cid_test: basic_percpu_ops_test.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h
- $(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID_ID $< $(LDLIBS) -lrseq -o $@
+ $(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID $< $(LDLIBS) -lrseq -o $@
$(OUTPUT)/param_test_benchmark: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
rseq.h rseq-*.h
diff --git a/tools/testing/selftests/rseq/compiler.h b/tools/testing/selftests/rseq/compiler.h
index f47092bddeba..49d62fbd6dda 100644
--- a/tools/testing/selftests/rseq/compiler.h
+++ b/tools/testing/selftests/rseq/compiler.h
@@ -33,4 +33,30 @@
#define RSEQ_COMBINE_TOKENS(_tokena, _tokenb) \
RSEQ__COMBINE_TOKENS(_tokena, _tokenb)
+#ifdef __cplusplus
+#define rseq_unqual_scalar_typeof(x) \
+ std::remove_cv<std::remove_reference<decltype(x)>::type>::type
+#else
+#define rseq_scalar_type_to_expr(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (signed type)0
+
+/*
+ * Use C11 _Generic to express unqualified type from expression. This removes
+ * volatile qualifier from expression type.
+ */
+#define rseq_unqual_scalar_typeof(x) \
+ __typeof__( \
+ _Generic((x), \
+ char: (char)0, \
+ rseq_scalar_type_to_expr(char), \
+ rseq_scalar_type_to_expr(short), \
+ rseq_scalar_type_to_expr(int), \
+ rseq_scalar_type_to_expr(long), \
+ rseq_scalar_type_to_expr(long long), \
+ default: (x) \
+ ) \
+ )
+#endif
+
#endif /* RSEQ_COMPILER_H_ */
diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h
index 8414fc3eac15..d887b3bbe257 100644
--- a/tools/testing/selftests/rseq/rseq-arm.h
+++ b/tools/testing/selftests/rseq/rseq-arm.h
@@ -66,7 +66,7 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
- __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
rseq_smp_mb(); \
____p1; \
})
@@ -76,7 +76,7 @@ __extension__ ({ \
#define rseq_smp_store_release(p, v) \
do { \
rseq_smp_mb(); \
- RSEQ_WRITE_ONCE(*p, v); \
+ RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
diff --git a/tools/testing/selftests/rseq/rseq-arm64.h b/tools/testing/selftests/rseq/rseq-arm64.h
index 85b90977e7e6..21e1626a7235 100644
--- a/tools/testing/selftests/rseq/rseq-arm64.h
+++ b/tools/testing/selftests/rseq/rseq-arm64.h
@@ -27,59 +27,61 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
- __typeof(*p) ____p1; \
- switch (sizeof(*p)) { \
+ union { rseq_unqual_scalar_typeof(*(p)) __val; char __c[sizeof(*(p))]; } __u; \
+ switch (sizeof(*(p))) { \
case 1: \
- asm volatile ("ldarb %w0, %1" \
- : "=r" (*(__u8 *)p) \
- : "Q" (*p) : "memory"); \
+ __asm__ __volatile__ ("ldarb %w0, %1" \
+ : "=r" (*(__u8 *)__u.__c) \
+ : "Q" (*(p)) : "memory"); \
break; \
case 2: \
- asm volatile ("ldarh %w0, %1" \
- : "=r" (*(__u16 *)p) \
- : "Q" (*p) : "memory"); \
+ __asm__ __volatile__ ("ldarh %w0, %1" \
+ : "=r" (*(__u16 *)__u.__c) \
+ : "Q" (*(p)) : "memory"); \
break; \
case 4: \
- asm volatile ("ldar %w0, %1" \
- : "=r" (*(__u32 *)p) \
- : "Q" (*p) : "memory"); \
+ __asm__ __volatile__ ("ldar %w0, %1" \
+ : "=r" (*(__u32 *)__u.__c) \
+ : "Q" (*(p)) : "memory"); \
break; \
case 8: \
- asm volatile ("ldar %0, %1" \
- : "=r" (*(__u64 *)p) \
- : "Q" (*p) : "memory"); \
+ __asm__ __volatile__ ("ldar %0, %1" \
+ : "=r" (*(__u64 *)__u.__c) \
+ : "Q" (*(p)) : "memory"); \
break; \
} \
- ____p1; \
+ (rseq_unqual_scalar_typeof(*(p)))__u.__val; \
})
#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
#define rseq_smp_store_release(p, v) \
do { \
- switch (sizeof(*p)) { \
+ union { rseq_unqual_scalar_typeof(*(p)) __val; char __c[sizeof(*(p))]; } __u = \
+ { .__val = (rseq_unqual_scalar_typeof(*(p))) (v) }; \
+ switch (sizeof(*(p))) { \
case 1: \
- asm volatile ("stlrb %w1, %0" \
- : "=Q" (*p) \
- : "r" ((__u8)v) \
+ __asm__ __volatile__ ("stlrb %w1, %0" \
+ : "=Q" (*(p)) \
+ : "r" (*(__u8 *)__u.__c) \
: "memory"); \
break; \
case 2: \
- asm volatile ("stlrh %w1, %0" \
- : "=Q" (*p) \
- : "r" ((__u16)v) \
+ __asm__ __volatile__ ("stlrh %w1, %0" \
+ : "=Q" (*(p)) \
+ : "r" (*(__u16 *)__u.__c) \
: "memory"); \
break; \
case 4: \
- asm volatile ("stlr %w1, %0" \
- : "=Q" (*p) \
- : "r" ((__u32)v) \
+ __asm__ __volatile__ ("stlr %w1, %0" \
+ : "=Q" (*(p)) \
+ : "r" (*(__u32 *)__u.__c) \
: "memory"); \
break; \
case 8: \
- asm volatile ("stlr %1, %0" \
- : "=Q" (*p) \
- : "r" ((__u64)v) \
+ __asm__ __volatile__ ("stlr %1, %0" \
+ : "=Q" (*(p)) \
+ : "r" (*(__u64 *)__u.__c) \
: "memory"); \
break; \
} \
diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h
index 50b950cf9585..42ef8e946693 100644
--- a/tools/testing/selftests/rseq/rseq-mips.h
+++ b/tools/testing/selftests/rseq/rseq-mips.h
@@ -45,7 +45,7 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
- __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
rseq_smp_mb(); \
____p1; \
})
@@ -55,7 +55,7 @@ __extension__ ({ \
#define rseq_smp_store_release(p, v) \
do { \
rseq_smp_mb(); \
- RSEQ_WRITE_ONCE(*p, v); \
+ RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
#if _MIPS_SZLONG == 64
diff --git a/tools/testing/selftests/rseq/rseq-ppc.h b/tools/testing/selftests/rseq/rseq-ppc.h
index dc9190facee9..57b160597189 100644
--- a/tools/testing/selftests/rseq/rseq-ppc.h
+++ b/tools/testing/selftests/rseq/rseq-ppc.h
@@ -23,7 +23,7 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
- __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
rseq_smp_lwsync(); \
____p1; \
})
@@ -33,7 +33,7 @@ __extension__ ({ \
#define rseq_smp_store_release(p, v) \
do { \
rseq_smp_lwsync(); \
- RSEQ_WRITE_ONCE(*p, v); \
+ RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
/*
diff --git a/tools/testing/selftests/rseq/rseq-riscv.h b/tools/testing/selftests/rseq/rseq-riscv.h
index 17932a79e066..37e598d0a365 100644
--- a/tools/testing/selftests/rseq/rseq-riscv.h
+++ b/tools/testing/selftests/rseq/rseq-riscv.h
@@ -36,8 +36,8 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
- __typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
- RISCV_FENCE(r, rw) \
+ rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
+ RISCV_FENCE(r, rw); \
____p1; \
})
@@ -46,7 +46,7 @@ __extension__ ({ \
#define rseq_smp_store_release(p, v) \
do { \
RISCV_FENCE(rw, w); \
- RSEQ_WRITE_ONCE(*(p), v); \
+ RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
diff --git a/tools/testing/selftests/rseq/rseq-s390.h b/tools/testing/selftests/rseq/rseq-s390.h
index 46c92598acc7..33baaa9f9997 100644
--- a/tools/testing/selftests/rseq/rseq-s390.h
+++ b/tools/testing/selftests/rseq/rseq-s390.h
@@ -15,7 +15,7 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
- __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
rseq_barrier(); \
____p1; \
})
@@ -25,7 +25,7 @@ __extension__ ({ \
#define rseq_smp_store_release(p, v) \
do { \
rseq_barrier(); \
- RSEQ_WRITE_ONCE(*p, v); \
+ RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
#ifdef __s390x__
diff --git a/tools/testing/selftests/rseq/rseq-x86.h b/tools/testing/selftests/rseq/rseq-x86.h
index fb65ef54b0fb..a2aa428ba151 100644
--- a/tools/testing/selftests/rseq/rseq-x86.h
+++ b/tools/testing/selftests/rseq/rseq-x86.h
@@ -42,7 +42,7 @@
#define rseq_smp_load_acquire(p) \
__extension__ ({ \
- __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \
rseq_barrier(); \
____p1; \
})
@@ -52,7 +52,7 @@ __extension__ ({ \
#define rseq_smp_store_release(p, v) \
do { \
rseq_barrier(); \
- RSEQ_WRITE_ONCE(*p, v); \
+ RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 43ec36b179dc..38f651469968 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -2184,6 +2184,9 @@ FIXTURE_TEARDOWN(TRACE_syscall)
TEST(negative_ENOSYS)
{
+#if defined(__arm__)
+ SKIP(return, "arm32 does not support calling syscall -1");
+#endif
/*
* There should be no difference between an "internal" skip
* and userspace asking for syscall "-1".
@@ -3072,7 +3075,8 @@ TEST(syscall_restart)
timeout.tv_sec = 1;
errno = 0;
EXPECT_EQ(0, nanosleep(&timeout, NULL)) {
- TH_LOG("Call to nanosleep() failed (errno %d)", errno);
+ TH_LOG("Call to nanosleep() failed (errno %d: %s)",
+ errno, strerror(errno));
}
/* Read final sync from parent. */
@@ -3908,6 +3912,9 @@ TEST(user_notification_filter_empty)
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
+ if (__NR_clone3 < 0)
+ SKIP(return, "Test not built with clone3 support");
+
pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
@@ -3962,6 +3969,9 @@ TEST(user_notification_filter_empty_threaded)
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
+ if (__NR_clone3 < 0)
+ SKIP(return, "Test not built with clone3 support");
+
pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
@@ -4255,6 +4265,61 @@ TEST(user_notification_addfd_rlimit)
close(memfd);
}
+#ifndef SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP
+#define SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP (1UL << 0)
+#define SECCOMP_IOCTL_NOTIF_SET_FLAGS SECCOMP_IOW(4, __u64)
+#endif
+
+TEST(user_notification_sync)
+{
+ struct seccomp_notif req = {};
+ struct seccomp_notif_resp resp = {};
+ int status, listener;
+ pid_t pid;
+ long ret;
+
+ ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
+ ASSERT_EQ(0, ret) {
+ TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
+ }
+
+ listener = user_notif_syscall(__NR_getppid,
+ SECCOMP_FILTER_FLAG_NEW_LISTENER);
+ ASSERT_GE(listener, 0);
+
+ /* Try to set invalid flags. */
+ EXPECT_SYSCALL_RETURN(-EINVAL,
+ ioctl(listener, SECCOMP_IOCTL_NOTIF_SET_FLAGS, 0xffffffff, 0));
+
+ ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SET_FLAGS,
+ SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP, 0), 0);
+
+ pid = fork();
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ ret = syscall(__NR_getppid);
+ ASSERT_EQ(ret, USER_NOTIF_MAGIC) {
+ _exit(1);
+ }
+ _exit(0);
+ }
+
+ req.pid = 0;
+ ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
+
+ ASSERT_EQ(req.data.nr, __NR_getppid);
+
+ resp.id = req.id;
+ resp.error = 0;
+ resp.val = USER_NOTIF_MAGIC;
+ resp.flags = 0;
+ ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
+
+ ASSERT_EQ(waitpid(pid, &status, 0), pid);
+ ASSERT_EQ(status, 0);
+}
+
+
/* Make sure PTRACE_O_SUSPEND_SECCOMP requires CAP_SYS_ADMIN. */
FIXTURE(O_SUSPEND_SECCOMP) {
pid_t pid;
diff --git a/tools/testing/selftests/user_events/Makefile b/tools/testing/selftests/user_events/Makefile
index 9e95bd41b0b4..10fcd0066203 100644
--- a/tools/testing/selftests/user_events/Makefile
+++ b/tools/testing/selftests/user_events/Makefile
@@ -2,14 +2,6 @@
CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
LDLIBS += -lrt -lpthread -lm
-# Note:
-# This test depends on <linux/user_events.h> exported in uapi
-# The following commit removed user_events.h out of uapi:
-# commit 5cfff569cab8bf544bab62c911c5d6efd5af5e05
-# tracing: Move user_events.h temporarily out of include/uapi
-# This test will not compile until user_events.h is added
-# back to uapi.
-
TEST_GEN_PROGS = ftrace_test dyn_test perf_test abi_test
TEST_FILES := settings